fix: centralize provider thinking profiles

This commit is contained in:
Peter Steinberger
2026-04-21 09:04:37 +01:00
parent 1cc2fc82ca
commit f1805ab54d
57 changed files with 718 additions and 572 deletions

View File

@@ -32,6 +32,7 @@ Docs: https://docs.openclaw.ai
- Webchat/images: treat inline image attachments as media for empty-turn gating while still ignoring metadata-only blank turns. (#69474) Thanks @Jaswir.
- Discord/think: only show `adaptive` in `/think` autocomplete for provider/model pairs that actually support provider-managed adaptive thinking, so GPT/OpenAI models no longer advertise an Anthropic-only option.
- Thinking: only expose `max` for models that explicitly support provider max reasoning, and remap stored `max` settings to the largest supported thinking mode when users switch to another model.
- Thinking/UI: drive `/think` options and chat/Sessions pickers from provider-owned thinking profiles, so custom model level sets such as binary `on/off`, Gemini 3 Pro `off/low/high`, Anthropic `adaptive/max`, and OpenAI `xhigh` stay in one runtime contract.
- Gateway/usage: bound the cost usage cache with FIFO eviction so date/range lookups cannot grow unbounded. (#68842) Thanks @Feelw00.
- OpenAI/Responses: resolve `/think` levels against each GPT model's supported reasoning efforts so `/think off` no longer becomes high reasoning or sends unsupported `reasoning.effort: "none"` payloads.
- Lobster/TaskFlow: allow managed approval resumes to use `approvalId` without a resume token, and persist that id in approval wait state. (#69559) Thanks @kirkluokun.

View File

@@ -327,7 +327,7 @@ Surface different features that extend the above defaults.
{
"command": "/think",
"description": "Set the thinking level",
"usage_hint": "<off|minimal|low|medium|high|xhigh>"
"usage_hint": "<level>"
},
{
"command": "/verbose",
@@ -448,7 +448,7 @@ Surface different features that extend the above defaults.
{
"command": "/think",
"description": "Set the thinking level",
"usage_hint": "<off|minimal|low|medium|high|xhigh>",
"usage_hint": "<level>",
"url": "https://gateway-host.example.com/slack/events"
},
{

View File

@@ -26,7 +26,7 @@ Related:
- `-t, --to <dest>`: recipient used to derive the session key
- `--session-id <id>`: explicit session id
- `--agent <id>`: agent id; overrides routing bindings
- `--thinking <off|minimal|low|medium|high|xhigh>`: agent thinking level
- `--thinking <level>`: agent thinking level (`off`, `minimal`, `low`, `medium`, `high`, plus provider-supported custom levels such as `xhigh`, `adaptive`, or `max`)
- `--verbose <on|off>`: persist verbose level for the session
- `--channel <channel>`: delivery channel; omit to use the main session channel
- `--reply-to <target>`: delivery target override

View File

@@ -994,7 +994,7 @@ Options:
- `-t, --to <dest>` (for session key and optional delivery)
- `--session-id <id>`
- `--agent <id>` (agent id; overrides routing bindings)
- `--thinking <off|minimal|low|medium|high|xhigh>` (provider support varies; not model-gated at CLI level)
- `--thinking <level>` (validated against the selected model's provider profile)
- `--verbose <on|off>`
- `--channel <channel>` (delivery channel; omit to use the main session channel)
- `--reply-to <target>` (delivery target override, separate from session routing)

View File

@@ -42,9 +42,9 @@ For model selection rules, see [/concepts/models](/concepts/models).
`buildAuthDoctorHint`,
`matchesContextOverflowError`, `classifyFailoverReason`,
`isCacheTtlEligible`, `buildMissingAuthMessage`, `suppressBuiltInModel`,
`augmentModelCatalog`, `isBinaryThinking`, `supportsXHighThinking`,
`supportsAdaptiveThinking`, `supportsMaxThinking`,
`resolveDefaultThinkingLevel`, `applyConfigDefaults`, `isModernModelRef`,
`augmentModelCatalog`, `resolveThinkingProfile`, `isBinaryThinking`,
`supportsXHighThinking`, `resolveDefaultThinkingLevel`,
`applyConfigDefaults`, `isModernModelRef`,
`prepareRuntimeAuth`, `resolveUsageAuth`, `fetchUsageSnapshot`, and
`onModelSelected`.
- Note: provider runtime `capabilities` is shared runner metadata (provider
@@ -132,12 +132,11 @@ Typical split:
vendor-owned error for direct resolution failures
- `augmentModelCatalog`: provider appends synthetic/final catalog rows after
discovery and config merging
- `isBinaryThinking`: provider owns binary on/off thinking UX
- `supportsXHighThinking`: provider opts selected models into `xhigh`
- `supportsAdaptiveThinking`: provider opts selected models into `adaptive`
- `supportsMaxThinking`: provider opts selected models into `max`
- `resolveDefaultThinkingLevel`: provider owns default `/think` policy for a
model family
- `resolveThinkingProfile`: provider owns the exact `/think` level set,
optional display labels, and default level for a selected model
- `isBinaryThinking`: compatibility hook for binary on/off thinking UX
- `supportsXHighThinking`: compatibility hook for selected `xhigh` models
- `resolveDefaultThinkingLevel`: compatibility hook for default `/think` policy
- `applyConfigDefaults`: provider applies provider-specific global defaults
during config materialization based on auth mode, env, or model family
- `isModernModelRef`: provider owns live/smoke preferred-model matching

View File

@@ -658,8 +658,7 @@ Provider plugins now have two layers:
`buildAuthDoctorHint`, `matchesContextOverflowError`,
`classifyFailoverReason`, `isCacheTtlEligible`,
`buildMissingAuthMessage`, `suppressBuiltInModel`, `augmentModelCatalog`,
`isBinaryThinking`, `supportsXHighThinking`, `supportsAdaptiveThinking`,
`supportsMaxThinking`,
`resolveThinkingProfile`, `isBinaryThinking`, `supportsXHighThinking`,
`resolveDefaultThinkingLevel`, `isModernModelRef`, `prepareRuntimeAuth`,
`resolveUsageAuth`, `fetchUsageSnapshot`, `createEmbeddingProvider`,
`buildReplayPolicy`,
@@ -723,20 +722,19 @@ The "When to use" column is the quick decision guide.
| 30 | `buildMissingAuthMessage` | Replacement for the generic missing-auth recovery message | Provider needs a provider-specific missing-auth recovery hint |
| 31 | `suppressBuiltInModel` | Stale upstream model suppression plus optional user-facing error hint | Provider needs to hide stale upstream rows or replace them with a vendor hint |
| 32 | `augmentModelCatalog` | Synthetic/final catalog rows appended after discovery | Provider needs synthetic forward-compat rows in `models list` and pickers |
| 33 | `isBinaryThinking` | On/off reasoning toggle for binary-thinking providers | Provider exposes only binary thinking on/off |
| 34 | `supportsXHighThinking` | `xhigh` reasoning support for selected models | Provider wants `xhigh` on only a subset of models |
| 35 | `supportsAdaptiveThinking` | `adaptive` thinking support for selected models | Provider wants `adaptive` shown only for models with provider-managed adaptive thinking |
| 36 | `supportsMaxThinking` | `max` reasoning support for selected models | Provider wants `max` shown only for models with provider max thinking |
| 37 | `resolveDefaultThinkingLevel` | Default `/think` level for a specific model family | Provider owns default `/think` policy for a model family |
| 38 | `isModernModelRef` | Modern-model matcher for live profile filters and smoke selection | Provider owns live/smoke preferred-model matching |
| 39 | `prepareRuntimeAuth` | Exchange a configured credential into the actual runtime token/key just before inference | Provider needs a token exchange or short-lived request credential |
| 40 | `resolveUsageAuth` | Resolve usage/billing credentials for `/usage` and related status surfaces | Provider needs custom usage/quota token parsing or a different usage credential |
| 41 | `fetchUsageSnapshot` | Fetch and normalize provider-specific usage/quota snapshots after auth is resolved | Provider needs a provider-specific usage endpoint or payload parser |
| 42 | `createEmbeddingProvider` | Build a provider-owned embedding adapter for memory/search | Memory embedding behavior belongs with the provider plugin |
| 43 | `buildReplayPolicy` | Return a replay policy controlling transcript handling for the provider | Provider needs custom transcript policy (for example, thinking-block stripping) |
| 44 | `sanitizeReplayHistory` | Rewrite replay history after generic transcript cleanup | Provider needs provider-specific replay rewrites beyond shared compaction helpers |
| 45 | `validateReplayTurns` | Final replay-turn validation or reshaping before the embedded runner | Provider transport needs stricter turn validation after generic sanitation |
| 46 | `onModelSelected` | Run provider-owned post-selection side effects | Provider needs telemetry or provider-owned state when a model becomes active |
| 33 | `resolveThinkingProfile` | Model-specific `/think` level set, display labels, and default | Provider exposes a custom thinking ladder or binary label for selected models |
| 34 | `isBinaryThinking` | On/off reasoning toggle compatibility hook | Provider exposes only binary thinking on/off |
| 35 | `supportsXHighThinking` | `xhigh` reasoning support compatibility hook | Provider wants `xhigh` on only a subset of models |
| 36 | `resolveDefaultThinkingLevel` | Default `/think` level compatibility hook | Provider owns default `/think` policy for a model family |
| 37 | `isModernModelRef` | Modern-model matcher for live profile filters and smoke selection | Provider owns live/smoke preferred-model matching |
| 38 | `prepareRuntimeAuth` | Exchange a configured credential into the actual runtime token/key just before inference | Provider needs a token exchange or short-lived request credential |
| 39 | `resolveUsageAuth` | Resolve usage/billing credentials for `/usage` and related status surfaces | Provider needs custom usage/quota token parsing or a different usage credential |
| 40 | `fetchUsageSnapshot` | Fetch and normalize provider-specific usage/quota snapshots after auth is resolved | Provider needs a provider-specific usage endpoint or payload parser |
| 41 | `createEmbeddingProvider` | Build a provider-owned embedding adapter for memory/search | Memory embedding behavior belongs with the provider plugin |
| 42 | `buildReplayPolicy` | Return a replay policy controlling transcript handling for the provider | Provider needs custom transcript policy (for example, thinking-block stripping) |
| 43 | `sanitizeReplayHistory` | Rewrite replay history after generic transcript cleanup | Provider needs provider-specific replay rewrites beyond shared compaction helpers |
| 44 | `validateReplayTurns` | Final replay-turn validation or reshaping before the embedded runner | Provider transport needs stricter turn validation after generic sanitation |
| 45 | `onModelSelected` | Run provider-owned post-selection side effects | Provider needs telemetry or provider-owned state when a model becomes active |
`normalizeModelId`, `normalizeTransport`, and `normalizeConfig` first check the
matched provider plugin, then fall through other hook-capable provider plugins
@@ -808,7 +806,7 @@ api.registerProvider({
- Anthropic uses `resolveDynamicModel`, `capabilities`, `buildAuthDoctorHint`,
`resolveUsageAuth`, `fetchUsageSnapshot`, `isCacheTtlEligible`,
`supportsAdaptiveThinking`, `supportsMaxThinking`, `resolveDefaultThinkingLevel`, `applyConfigDefaults`, `isModernModelRef`,
`resolveThinkingProfile`, `applyConfigDefaults`, `isModernModelRef`,
and `wrapStreamFn` because it owns Claude 4.6 forward-compat,
provider-family hints, auth repair guidance, usage endpoint integration,
prompt-cache eligibility, auth-aware config defaults, Claude
@@ -822,7 +820,7 @@ api.registerProvider({
provider's beta-header rules.
- OpenAI uses `resolveDynamicModel`, `normalizeResolvedModel`, and
`capabilities` plus `buildMissingAuthMessage`, `suppressBuiltInModel`,
`augmentModelCatalog`, `supportsXHighThinking`, and `isModernModelRef`
`augmentModelCatalog`, `resolveThinkingProfile`, and `isModernModelRef`
because it owns GPT-5.4 forward-compat, the direct OpenAI
`openai-completions` -> `openai-responses` normalization, Codex-aware auth
hints, Spark suppression, synthetic OpenAI list rows, and GPT-5 thinking /
@@ -864,7 +862,7 @@ api.registerProvider({
`anthropic-by-model` replay family so Claude-specific replay cleanup stays
scoped to Claude ids instead of every `anthropic-messages` transport.
- Amazon Bedrock uses `buildReplayPolicy`, `matchesContextOverflowError`,
`classifyFailoverReason`, and `resolveDefaultThinkingLevel` because it owns
`classifyFailoverReason`, and `resolveThinkingProfile` because it owns
Bedrock-specific throttle/not-ready/context-overflow error classification
for Anthropic-on-Bedrock traffic; its replay policy still shares the same
Claude-only `anthropic-by-model` guard.
@@ -879,7 +877,7 @@ api.registerProvider({
thinking-block dropping on the Anthropic side while overriding reasoning
output mode back to native, and the `minimax-fast-mode` stream family owns
fast-mode model rewrites on the shared stream path.
- Moonshot uses `catalog` plus `wrapStreamFn` because it still uses the shared
- Moonshot uses `catalog`, `resolveThinkingProfile`, and `wrapStreamFn` because it still uses the shared
OpenAI transport but needs provider-owned thinking payload normalization; the
`moonshot-thinking` stream family maps config plus `/think` state onto its
native binary thinking payload.
@@ -890,7 +888,7 @@ api.registerProvider({
injection on the shared proxy stream path while skipping `kilo/auto` and
other proxy model ids that do not support explicit reasoning payloads.
- Z.AI uses `resolveDynamicModel`, `prepareExtraParams`, `wrapStreamFn`,
`isCacheTtlEligible`, `isBinaryThinking`, `isModernModelRef`,
`isCacheTtlEligible`, `resolveThinkingProfile`, `isModernModelRef`,
`resolveUsageAuth`, and `fetchUsageSnapshot` because it owns GLM-5 fallback,
`tool_stream` defaults, binary thinking UX, modern-model matching, and both
usage auth + quota fetching; the `tool-stream-default-on` stream family keeps

View File

@@ -533,20 +533,19 @@ API key auth, and dynamic model resolution.
| 29 | `buildMissingAuthMessage` | Custom missing-auth hint |
| 30 | `suppressBuiltInModel` | Hide stale upstream rows |
| 31 | `augmentModelCatalog` | Synthetic forward-compat rows |
| 32 | `isBinaryThinking` | Binary thinking on/off |
| 33 | `supportsXHighThinking` | `xhigh` reasoning support |
| 34 | `supportsAdaptiveThinking` | Adaptive thinking support |
| 35 | `supportsMaxThinking` | `max` reasoning support |
| 36 | `resolveDefaultThinkingLevel` | Default `/think` policy |
| 37 | `isModernModelRef` | Live/smoke model matching |
| 38 | `prepareRuntimeAuth` | Token exchange before inference |
| 39 | `resolveUsageAuth` | Custom usage credential parsing |
| 40 | `fetchUsageSnapshot` | Custom usage endpoint |
| 41 | `createEmbeddingProvider` | Provider-owned embedding adapter for memory/search |
| 42 | `buildReplayPolicy` | Custom transcript replay/compaction policy |
| 43 | `sanitizeReplayHistory` | Provider-specific replay rewrites after generic cleanup |
| 44 | `validateReplayTurns` | Strict replay-turn validation before the embedded runner |
| 45 | `onModelSelected` | Post-selection callback (e.g. telemetry) |
| 32 | `resolveThinkingProfile` | Model-specific `/think` option set |
| 33 | `isBinaryThinking` | Binary thinking on/off compatibility |
| 34 | `supportsXHighThinking` | `xhigh` reasoning support compatibility |
| 35 | `resolveDefaultThinkingLevel` | Default `/think` policy compatibility |
| 36 | `isModernModelRef` | Live/smoke model matching |
| 37 | `prepareRuntimeAuth` | Token exchange before inference |
| 38 | `resolveUsageAuth` | Custom usage credential parsing |
| 39 | `fetchUsageSnapshot` | Custom usage endpoint |
| 40 | `createEmbeddingProvider` | Provider-owned embedding adapter for memory/search |
| 41 | `buildReplayPolicy` | Custom transcript replay/compaction policy |
| 42 | `sanitizeReplayHistory` | Provider-specific replay rewrites after generic cleanup |
| 43 | `validateReplayTurns` | Strict replay-turn validation before the embedded runner |
| 44 | `onModelSelected` | Post-selection callback (e.g. telemetry) |
Prompt tuning note:

View File

@@ -65,7 +65,7 @@ programmatic delivery.
| `--reply-to \<target\>` | Delivery target override |
| `--reply-channel \<name\>` | Delivery channel override |
| `--reply-account \<id\>` | Delivery account id override |
| `--thinking \<level\>` | Set thinking level (off, minimal, low, medium, high, xhigh) |
| `--thinking \<level\>` | Set thinking level for the selected model profile |
| `--verbose \<on\|full\|off\>` | Set verbose level |
| `--timeout \<seconds\>` | Override agent timeout |
| `--json` | Output structured JSON |

View File

@@ -93,7 +93,7 @@ Built-in commands available today:
- `/compact [instructions]` compacts the session context. See [/concepts/compaction](/concepts/compaction).
- `/stop` aborts the current run.
- `/session idle <duration|off>` and `/session max-age <duration|off>` manage thread-binding expiry.
- `/think <off|minimal|low|medium|high|xhigh>` sets the thinking level. Aliases: `/thinking`, `/t`.
- `/think <level>` sets the thinking level. Options come from the active model's provider profile; common levels are `off`, `minimal`, `low`, `medium`, and `high`, with custom levels such as `xhigh`, `adaptive`, `max`, or binary `on` only where supported. Aliases: `/thinking`, `/t`.
- `/verbose on|off|full` toggles verbose output. Alias: `/v`.
- `/trace on|off` toggles plugin trace output for the current session.
- `/fast [status|on|off]` shows or sets fast mode.

View File

@@ -21,8 +21,9 @@ title: "Thinking Levels"
- `x-high`, `x_high`, `extra-high`, `extra high`, and `extra_high` map to `xhigh`.
- `highest` maps to `high`.
- Provider notes:
- `adaptive` is only advertised in native command menus and pickers for providers/models that declare adaptive thinking support. It remains accepted as a typed directive for compatibility with existing configs and aliases.
- `max` is only advertised in native command menus and pickers for providers/models that declare max thinking support. Existing stored `max` settings are remapped to the largest supported level for the selected model when the model does not support `max`.
- Thinking menus and pickers are provider-profile driven. Provider plugins declare the exact level set for the selected model, including labels such as binary `on`.
- `adaptive`, `xhigh`, and `max` are only advertised for provider/model profiles that support them. Typed directives for unsupported levels are rejected with that model's valid options.
- Existing stored unsupported levels, including old `max` values after switching models, are remapped to the largest supported level for the selected model.
- Anthropic Claude 4.6 models default to `adaptive` when no explicit thinking level is set.
- Anthropic Claude Opus 4.7 does not default to adaptive thinking. Its API effort default remains provider-owned unless you explicitly set a thinking level.
- Anthropic Claude Opus 4.7 maps `/think xhigh` to adaptive thinking plus `output_config.effort: "xhigh"`, because `/think` is a thinking directive and `xhigh` is the Opus 4.7 effort setting.
@@ -38,7 +39,7 @@ title: "Thinking Levels"
2. Session override (set by sending a directive-only message).
3. Per-agent default (`agents.list[].thinkingDefault` in config).
4. Global default (`agents.defaults.thinkingDefault` in config).
5. Fallback: `adaptive` for Anthropic Claude 4.6 models, `off` for Anthropic Claude Opus 4.7 unless explicitly configured, `low` for other reasoning-capable models, `off` otherwise.
5. Fallback: provider-declared default when available, `low` for other catalog models marked reasoning-capable, `off` otherwise.
## Setting a session default
@@ -111,10 +112,13 @@ title: "Thinking Levels"
- The web chat thinking selector mirrors the session's stored level from the inbound session store/config when the page loads.
- Picking another level writes the session override immediately via `sessions.patch`; it does not wait for the next send and it is not a one-shot `thinkingOnce` override.
- The first option is always `Default (<resolved level>)`, where the resolved default comes from the active session model: `adaptive` for Claude 4.6 on Anthropic, `off` for Anthropic Claude Opus 4.7 unless configured, `low` for other reasoning-capable models, `off` otherwise.
- The picker stays provider-aware:
- most providers show `off | minimal | low | medium | high`
- Anthropic/Bedrock Claude 4.6 shows `off | minimal | low | medium | high | adaptive`
- Anthropic Claude Opus 4.7 shows `off | minimal | low | medium | high | xhigh | adaptive | max`
- Z.AI shows binary `off | on`
- The first option is always `Default (<resolved level>)`, where the resolved default comes from the active session model's provider thinking profile.
- The picker uses `thinkingOptions` returned by the gateway session row. The browser UI does not keep its own provider regex list; plugins own model-specific level sets.
- `/think:<level>` still works and updates the same stored session level, so chat directives and the picker stay in sync.
## Provider profiles
- Provider plugins can expose `resolveThinkingProfile(ctx)` to define the model's supported levels and default.
- Each profile level has a stored canonical `id` (`off`, `minimal`, `low`, `medium`, `high`, `xhigh`, `adaptive`, or `max`) and may include a display `label`. Binary providers use `{ id: "low", label: "on" }`.
- Published legacy hooks (`supportsXHighThinking`, `isBinaryThinking`, and `resolveDefaultThinkingLevel`) remain as compatibility adapters, but new custom level sets should use `resolveThinkingProfile`.
- Gateway rows expose `thinkingOptions` and `thinkingDefault` so ACP/chat clients render the same profile that runtime validation uses.

View File

@@ -96,23 +96,22 @@ describe("amazon-bedrock provider plugin", () => {
const provider = await registerSingleProviderPlugin(amazonBedrockPlugin);
expect(
provider.resolveDefaultThinkingLevel?.({
provider.resolveThinkingProfile?.({
provider: "amazon-bedrock",
modelId: "us.anthropic.claude-opus-4-6-v1",
} as never),
).toBe("adaptive");
).toMatchObject({
levels: expect.arrayContaining([{ id: "adaptive" }]),
defaultLevel: "adaptive",
});
expect(
provider.resolveDefaultThinkingLevel?.({
provider.resolveThinkingProfile?.({
provider: "amazon-bedrock",
modelId: "amazon.nova-micro-v1:0",
} as never),
).toBeUndefined();
expect(
provider.supportsAdaptiveThinking?.({
provider: "amazon-bedrock",
modelId: "us.anthropic.claude-opus-4-6-v1",
} as never),
).toBe(true);
).toMatchObject({
levels: expect.not.arrayContaining([{ id: "adaptive" }]),
});
});
it("owns Anthropic-style replay policy for Claude Bedrock models", async () => {

View File

@@ -191,8 +191,16 @@ export function registerAmazonBedrockPlugin(api: OpenClawPluginApi): void {
}
return undefined;
},
supportsAdaptiveThinking: ({ modelId }) => claude46ModelRe.test(modelId.trim()),
resolveDefaultThinkingLevel: ({ modelId }) =>
claude46ModelRe.test(modelId.trim()) ? "adaptive" : undefined,
resolveThinkingProfile: ({ modelId }) => ({
levels: [
{ id: "off" },
{ id: "minimal" },
{ id: "low" },
{ id: "medium" },
{ id: "high" },
...(claude46ModelRe.test(modelId.trim()) ? [{ id: "adaptive" as const }] : []),
],
defaultLevel: claude46ModelRe.test(modelId.trim()) ? "adaptive" : undefined,
}),
});
}

View File

@@ -225,53 +225,31 @@ describe("anthropic provider replay hooks", () => {
reasoning: true,
});
expect(
provider.resolveDefaultThinkingLevel?.({
provider.resolveThinkingProfile?.({
provider: "anthropic",
modelId: "claude-opus-4-7",
} as never),
).toBe("off");
).toMatchObject({
levels: expect.arrayContaining([{ id: "xhigh" }, { id: "adaptive" }, { id: "max" }]),
defaultLevel: "off",
});
expect(
provider.resolveDefaultThinkingLevel?.({
provider.resolveThinkingProfile?.({
provider: "anthropic",
modelId: "claude-opus-4-6",
} as never),
).toBe("adaptive");
).toMatchObject({
levels: expect.arrayContaining([{ id: "adaptive" }]),
defaultLevel: "adaptive",
});
expect(
provider.supportsXHighThinking?.({
provider: "anthropic",
modelId: "claude-opus-4-7",
} as never),
).toBe(true);
expect(
provider.supportsXHighThinking?.({
provider: "anthropic",
modelId: "claude-opus-4-6",
} as never),
provider
.resolveThinkingProfile?.({
provider: "anthropic",
modelId: "claude-opus-4-6",
} as never)
?.levels.some((level) => level.id === "xhigh" || level.id === "max"),
).toBe(false);
expect(
provider.supportsMaxThinking?.({
provider: "anthropic",
modelId: "claude-opus-4-7",
} as never),
).toBe(true);
expect(
provider.supportsMaxThinking?.({
provider: "anthropic",
modelId: "claude-opus-4-6",
} as never),
).toBe(false);
expect(
provider.supportsAdaptiveThinking?.({
provider: "anthropic",
modelId: "claude-opus-4-7",
} as never),
).toBe(true);
expect(
provider.supportsAdaptiveThinking?.({
provider: "anthropic",
modelId: "claude-opus-4-6",
} as never),
).toBe(true);
});
it("resolves claude-cli synthetic oauth auth", async () => {

View File

@@ -494,16 +494,26 @@ export function buildAnthropicProvider(): ProviderPlugin {
buildReplayPolicy: buildAnthropicReplayPolicy,
isModernModelRef: ({ modelId }) => matchesAnthropicModernModel(modelId),
resolveReasoningOutputMode: () => "native",
supportsXHighThinking: ({ modelId }) => isAnthropicOpus47Model(modelId),
supportsAdaptiveThinking: ({ modelId }) => supportsAnthropicAdaptiveThinking(modelId),
supportsMaxThinking: ({ modelId }) => isAnthropicOpus47Model(modelId),
resolveThinkingProfile: ({ modelId }) => {
const levels: Array<{
id: "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive" | "max";
}> = [{ id: "off" }, { id: "minimal" }, { id: "low" }, { id: "medium" }, { id: "high" }];
if (isAnthropicOpus47Model(modelId)) {
levels.push({ id: "xhigh" }, { id: "adaptive" }, { id: "max" });
} else if (supportsAnthropicAdaptiveThinking(modelId)) {
levels.push({ id: "adaptive" });
}
return {
levels,
defaultLevel: isAnthropicOpus47Model(modelId)
? "off"
: matchesAnthropicModernModel(modelId) &&
shouldUseAnthropicAdaptiveThinkingDefault(modelId)
? "adaptive"
: undefined,
};
},
wrapStreamFn: wrapAnthropicProviderStream,
resolveDefaultThinkingLevel: ({ modelId }) =>
isAnthropicOpus47Model(modelId)
? "off"
: matchesAnthropicModernModel(modelId) && shouldUseAnthropicAdaptiveThinkingDefault(modelId)
? "adaptive"
: undefined,
resolveUsageAuth: async (ctx) => await ctx.resolveOAuthToken(),
fetchUsageSnapshot: async (ctx) =>
await fetchClaudeUsage(ctx.token, ctx.timeoutMs, ctx.fetchFn),

View File

@@ -160,7 +160,11 @@ describe("codex provider", () => {
reasoning: true,
compat: { supportsReasoningEffort: true },
});
expect(provider.supportsXHighThinking?.({ provider: "codex", modelId: "o4-mini" })).toBe(true);
expect(
provider
.resolveThinkingProfile?.({ provider: "codex", modelId: "o4-mini" } as never)
?.levels.some((level) => level.id === "xhigh"),
).toBe(true);
});
it("declares synthetic auth because the harness owns Codex credentials", () => {

View File

@@ -89,7 +89,16 @@ export function buildCodexProvider(options: BuildCodexProviderOptions = {}): Pro
source: "codex-app-server",
mode: "token",
}),
supportsXHighThinking: ({ modelId }) => isKnownXHighCodexModel(modelId),
resolveThinkingProfile: ({ modelId }) => ({
levels: [
{ id: "off" },
{ id: "minimal" },
{ id: "low" },
{ id: "medium" },
{ id: "high" },
...(isKnownXHighCodexModel(modelId) ? [{ id: "xhigh" as const }] : []),
],
}),
isModernModelRef: ({ modelId }) => isModernCodexModel(modelId),
};
}

View File

@@ -34,10 +34,9 @@ const resolveConfiguredBindingRouteMock = vi.hoisted(() =>
vi.fn<ResolveConfiguredBindingRoute>(() => createUnboundConfiguredRouteResult()),
);
const providerThinkingMocks = vi.hoisted(() => ({
resolveProviderAdaptiveThinking: vi.fn(),
resolveProviderBinaryThinking: vi.fn(),
resolveProviderDefaultThinkingLevel: vi.fn(),
resolveProviderMaxThinking: vi.fn(),
resolveProviderThinkingProfile: vi.fn(),
resolveProviderXHighThinking: vi.fn(),
}));
const buildModelsProviderDataMock = vi.hoisted(() => vi.fn());
@@ -129,10 +128,9 @@ let resolveDiscordNativeChoiceContext: typeof import("./native-command-ui.js").r
async function loadDiscordThinkAutocompleteModulesForTest() {
vi.resetModules();
vi.doMock("../../../../src/plugins/provider-thinking.js", () => ({
resolveProviderAdaptiveThinking: providerThinkingMocks.resolveProviderAdaptiveThinking,
resolveProviderBinaryThinking: providerThinkingMocks.resolveProviderBinaryThinking,
resolveProviderDefaultThinkingLevel: providerThinkingMocks.resolveProviderDefaultThinkingLevel,
resolveProviderMaxThinking: providerThinkingMocks.resolveProviderMaxThinking,
resolveProviderThinkingProfile: providerThinkingMocks.resolveProviderThinkingProfile,
resolveProviderXHighThinking: providerThinkingMocks.resolveProviderXHighThinking,
}));
const commandAuth = await import("openclaw/plugin-sdk/command-auth");
@@ -147,9 +145,8 @@ async function loadDiscordThinkAutocompleteModulesForTest() {
describe("discord native /think autocomplete", () => {
beforeAll(async () => {
providerThinkingMocks.resolveProviderBinaryThinking.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderAdaptiveThinking.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderDefaultThinkingLevel.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderMaxThinking.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderThinkingProfile.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
provider === "openai-codex" && ["gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)
? true
@@ -176,14 +173,10 @@ describe("discord native /think autocomplete", () => {
resolveConfiguredBindingRouteMock.mockReturnValue(createUnboundConfiguredRouteResult());
providerThinkingMocks.resolveProviderBinaryThinking.mockReset();
providerThinkingMocks.resolveProviderBinaryThinking.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderAdaptiveThinking.mockReset();
providerThinkingMocks.resolveProviderAdaptiveThinking.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderDefaultThinkingLevel.mockReset();
providerThinkingMocks.resolveProviderDefaultThinkingLevel.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderMaxThinking.mockReset();
providerThinkingMocks.resolveProviderMaxThinking.mockImplementation(({ provider, context }) =>
provider === "anthropic" && context.modelId === "claude-opus-4-7" ? true : undefined,
);
providerThinkingMocks.resolveProviderThinkingProfile.mockReset();
providerThinkingMocks.resolveProviderThinkingProfile.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderXHighThinking.mockReset();
providerThinkingMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
provider === "openai-codex" && ["gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)
@@ -275,6 +268,12 @@ describe("discord native /think autocomplete", () => {
});
it("includes max only for provider-advertised models", async () => {
providerThinkingMocks.resolveProviderThinkingProfile.mockImplementation(
({ provider, context }) =>
provider === "anthropic" && context.modelId === "claude-opus-4-7"
? { levels: [{ id: "off" }, { id: "max" }] }
: undefined,
);
fs.writeFileSync(
STORE_PATH,
JSON.stringify({

View File

@@ -138,10 +138,20 @@ export default definePluginEntry({
resolveDynamicModel: (ctx) => resolveCopilotForwardCompatModel(ctx),
wrapStreamFn: wrapCopilotProviderStream,
buildReplayPolicy: ({ modelId }) => buildGithubCopilotReplayPolicy(modelId),
supportsXHighThinking: ({ modelId }) =>
COPILOT_XHIGH_MODEL_IDS.includes(
(normalizeOptionalLowercaseString(modelId) ?? "") as never,
),
resolveThinkingProfile: ({ modelId }) => ({
levels: [
{ id: "off" },
{ id: "minimal" },
{ id: "low" },
{ id: "medium" },
{ id: "high" },
...(COPILOT_XHIGH_MODEL_IDS.includes(
(normalizeOptionalLowercaseString(modelId) ?? "") as never,
)
? [{ id: "xhigh" as const }]
: []),
],
}),
prepareRuntimeAuth: async (ctx) => {
const { resolveCopilotApiToken } = await loadGithubCopilotRuntime();
const token = await resolveCopilotApiToken({

View File

@@ -1,9 +1,19 @@
import type {
ProviderDefaultThinkingPolicyContext,
ProviderThinkingProfile,
} from "openclaw/plugin-sdk/core";
import { buildProviderReplayFamilyHooks } from "openclaw/plugin-sdk/provider-model-shared";
import { createGoogleThinkingStreamWrapper } from "./thinking-api.js";
import { createGoogleThinkingStreamWrapper, isGoogleGemini3ProModel } from "./thinking-api.js";
export const GOOGLE_GEMINI_PROVIDER_HOOKS = {
...buildProviderReplayFamilyHooks({
family: "google-gemini",
}),
resolveThinkingProfile: ({ modelId }: ProviderDefaultThinkingPolicyContext) =>
({
levels: isGoogleGemini3ProModel(modelId)
? [{ id: "off" }, { id: "low" }, { id: "high" }]
: [{ id: "off" }, { id: "minimal" }, { id: "low" }, { id: "medium" }, { id: "high" }],
}) satisfies ProviderThinkingProfile,
wrapStreamFn: createGoogleThinkingStreamWrapper,
};

View File

@@ -7,17 +7,17 @@ describe("kimi provider plugin", () => {
const provider = await registerSingleProviderPlugin(plugin);
expect(
provider.isBinaryThinking?.({
provider: "kimi",
modelId: "kimi-code",
} as never),
).toBe(true);
expect(
provider.resolveDefaultThinkingLevel?.({
provider.resolveThinkingProfile?.({
provider: "kimi",
modelId: "kimi-code",
reasoning: true,
} as never),
).toBe("off");
).toEqual({
levels: [
{ id: "off", label: "off" },
{ id: "low", label: "on" },
],
defaultLevel: "off",
});
});
});

View File

@@ -96,8 +96,13 @@ export default definePluginEntry({
},
},
buildReplayPolicy: () => KIMI_REPLAY_POLICY,
isBinaryThinking: () => true,
resolveDefaultThinkingLevel: () => "off",
resolveThinkingProfile: () => ({
levels: [
{ id: "off", label: "off" },
{ id: "low", label: "on" },
],
defaultLevel: "off",
}),
wrapStreamFn: wrapKimiProviderStream,
});
},

View File

@@ -178,7 +178,7 @@ describe("llm-task tool (json-only)", () => {
it("throws on unsupported xhigh thinking level", async () => {
const tool = createLlmTaskTool(fakeApi());
await expect(tool.execute("id", { prompt: "x", thinking: "xhigh" })).rejects.toThrow(
/only supported/i,
/not supported/i,
);
});

View File

@@ -4,11 +4,10 @@ import { Type } from "@sinclair/typebox";
import Ajv from "ajv";
import { normalizeOptionalString } from "openclaw/plugin-sdk/text-runtime";
import {
formatXHighModelHint,
formatThinkingLevels,
isThinkingLevelSupported,
normalizeThinkLevel,
resolvePreferredOpenClawTmpDir,
resolveSupportedThinkingLevel,
supportsXHighThinking,
} from "../api.js";
import type { OpenClawPluginApi } from "../api.js";
@@ -145,15 +144,17 @@ export function createLlmTaskTool(api: OpenClawPluginApi) {
);
}
let resolvedThinkLevel = thinkLevel;
if (thinkLevel === "xhigh" && !supportsXHighThinking(provider, model)) {
throw new Error(`Thinking level "xhigh" is only supported for ${formatXHighModelHint()}.`);
}
if (thinkLevel === "max") {
resolvedThinkLevel = resolveSupportedThinkingLevel({
if (
thinkLevel &&
!isThinkingLevelSupported({
provider,
model,
level: thinkLevel,
});
})
) {
throw new Error(
`Thinking level "${thinkLevel}" is not supported for ${provider}/${model}. Use one of: ${formatThinkingLevels(provider, model)}.`,
);
}
const timeoutMs =

View File

@@ -1,5 +1,5 @@
import { defineSingleProviderPluginEntry } from "openclaw/plugin-sdk/provider-entry";
import { applyMistralModelCompat } from "./api.js";
import { applyMistralModelCompat, MISTRAL_SMALL_LATEST_ID } from "./api.js";
import { mistralMediaUnderstandingProvider } from "./media-understanding-provider.js";
import { mistralMemoryEmbeddingProviderAdapter } from "./memory-embedding-adapter.js";
import { applyMistralConfig, MISTRAL_DEFAULT_MODEL_REF } from "./onboard.js";
@@ -46,6 +46,10 @@ export default defineSingleProviderPluginEntry({
normalizeResolvedModel: ({ model }) => applyMistralModelCompat(model),
contributeResolvedModelCompat: ({ modelId, model }) =>
contributeMistralResolvedModelCompat({ modelId, model }),
resolveThinkingProfile: ({ modelId }) =>
modelId === MISTRAL_SMALL_LATEST_ID
? { levels: [{ id: "off" }, { id: "high" }], defaultLevel: "off" }
: undefined,
buildReplayPolicy: () => buildMistralReplayPolicy(),
},
register(api) {

View File

@@ -58,6 +58,13 @@ export default defineSingleProviderPluginEntry({
applyMoonshotNativeStreamingUsageCompat(providerConfig),
...OPENAI_COMPATIBLE_REPLAY_HOOKS,
...MOONSHOT_THINKING_STREAM_HOOKS,
resolveThinkingProfile: () => ({
levels: [
{ id: "off", label: "off" },
{ id: "low", label: "on" },
],
defaultLevel: "off",
}),
},
register(api) {
api.registerMediaUnderstandingProvider(moonshotMediaUnderstandingProvider);

View File

@@ -387,8 +387,18 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
},
resolveDynamicModel: (ctx) => resolveCodexForwardCompatModel(ctx),
buildAuthDoctorHint: (ctx) => buildOpenAICodexAuthDoctorHint(ctx),
supportsXHighThinking: ({ modelId }) =>
matchesExactOrPrefix(modelId, OPENAI_CODEX_XHIGH_MODEL_IDS),
resolveThinkingProfile: ({ modelId }) => ({
levels: [
{ id: "off" },
{ id: "minimal" },
{ id: "low" },
{ id: "medium" },
{ id: "high" },
...(matchesExactOrPrefix(modelId, OPENAI_CODEX_XHIGH_MODEL_IDS)
? [{ id: "xhigh" as const }]
: []),
],
}),
isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_CODEX_MODERN_MODEL_IDS),
preferRuntimeResolvedModel: (ctx) => {
if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) {

View File

@@ -121,16 +121,20 @@ describe("buildOpenAIProvider", () => {
const provider = buildOpenAIProvider();
expect(
provider.supportsXHighThinking?.({
provider: "openai",
modelId: "gpt-5.4-mini",
} as never),
provider
.resolveThinkingProfile?.({
provider: "openai",
modelId: "gpt-5.4-mini",
} as never)
?.levels.some((level) => level.id === "xhigh"),
).toBe(true);
expect(
provider.supportsXHighThinking?.({
provider: "openai",
modelId: "gpt-5.4-nano",
} as never),
provider
.resolveThinkingProfile?.({
provider: "openai",
modelId: "gpt-5.4-nano",
} as never)
?.levels.some((level) => level.id === "xhigh"),
).toBe(true);
const entries = provider.augmentModelCatalog?.({

View File

@@ -218,7 +218,18 @@ export function buildOpenAIProvider(): ProviderPlugin {
matchesContextOverflowError: ({ errorMessage }) =>
/content_filter.*(?:prompt|input).*(?:too long|exceed)/i.test(errorMessage),
resolveReasoningOutputMode: () => "native",
supportsXHighThinking: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_XHIGH_MODEL_IDS),
resolveThinkingProfile: ({ modelId }) => ({
levels: [
{ id: "off" },
{ id: "minimal" },
{ id: "low" },
{ id: "medium" },
{ id: "high" },
...(matchesExactOrPrefix(modelId, OPENAI_XHIGH_MODEL_IDS)
? [{ id: "xhigh" as const }]
: []),
],
}),
isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_MODERN_MODEL_IDS),
buildMissingAuthMessage: (ctx) => {
if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) {

View File

@@ -197,6 +197,7 @@ export default defineSingleProviderPluginEntry({
shouldContributeXaiCompat({ modelId, model }) ? resolveXaiModelCompatPatch() : undefined,
normalizeModelId: ({ modelId }) => normalizeXaiModelId(modelId),
resolveDynamicModel: (ctx) => resolveXaiForwardCompatModel({ providerId: PROVIDER_ID, ctx }),
resolveThinkingProfile: () => ({ levels: [{ id: "off" }], defaultLevel: "off" }),
isModernModelRef: ({ modelId }) => isModernXaiModel(modelId),
},
register(api) {

View File

@@ -280,7 +280,13 @@ export default definePluginEntry({
...OPENAI_COMPATIBLE_REPLAY_HOOKS,
prepareExtraParams: (ctx) => defaultToolStreamExtraParams(ctx.extraParams),
...TOOL_STREAM_DEFAULT_ON_HOOKS,
isBinaryThinking: () => true,
resolveThinkingProfile: () => ({
levels: [
{ id: "off", label: "off" },
{ id: "low", label: "on" },
],
defaultLevel: "off",
}),
isModernModelRef: ({ modelId }) => {
const lower = normalizeLowercaseStringOrEmpty(modelId);
return (

View File

@@ -90,6 +90,7 @@ vi.mock("../auto-reply/thinking.js", () => ({
formatXHighModelHint: () => "model-x",
normalizeThinkLevel: (v?: string) => v || undefined,
normalizeVerboseLevel: (v?: string) => v || undefined,
isThinkingLevelSupported: () => true,
resolveSupportedThinkingLevel: ({ level }: { level?: string }) => level,
supportsXHighThinking: () => false,
}));

View File

@@ -1,10 +1,9 @@
import {
formatThinkingLevels,
formatXHighModelHint,
isThinkingLevelSupported,
normalizeThinkLevel,
normalizeVerboseLevel,
resolveSupportedThinkingLevel,
supportsXHighThinking,
type VerboseLevel,
} from "../auto-reply/thinking.js";
import { formatCliCommand } from "../cli/command-format.js";
@@ -783,33 +782,27 @@ async function agentCommandInternal(
catalog: catalogForThinking,
});
}
if (resolvedThinkLevel === "xhigh" && !supportsXHighThinking(provider, model)) {
if (!isThinkingLevelSupported({ provider, model, level: resolvedThinkLevel })) {
const explicitThink = Boolean(thinkOnce || thinkOverride);
if (explicitThink) {
throw new Error(`Thinking level "xhigh" is only supported for ${formatXHighModelHint()}.`);
throw new Error(
`Thinking level "${resolvedThinkLevel}" is not supported for ${provider}/${model}. Use one of: ${formatThinkingLevels(provider, model)}.`,
);
}
resolvedThinkLevel = "high";
if (sessionEntry && sessionStore && sessionKey && sessionEntry.thinkingLevel === "xhigh") {
const entry = sessionEntry;
entry.thinkingLevel = "high";
entry.updatedAt = Date.now();
await persistSessionEntry({
sessionStore,
sessionKey,
storePath,
entry,
});
}
}
if (resolvedThinkLevel === "max") {
const fallbackThinkLevel = resolveSupportedThinkingLevel({
provider,
model,
level: resolvedThinkLevel,
});
if (fallbackThinkLevel !== resolvedThinkLevel) {
const previousThinkLevel = resolvedThinkLevel;
resolvedThinkLevel = fallbackThinkLevel;
if (sessionEntry && sessionStore && sessionKey && sessionEntry.thinkingLevel === "max") {
if (
sessionEntry &&
sessionStore &&
sessionKey &&
sessionEntry.thinkingLevel === previousThinkLevel
) {
const entry = sessionEntry;
entry.thinkingLevel = fallbackThinkLevel;
entry.updatedAt = Date.now();

View File

@@ -10,9 +10,8 @@ import { applyModelOverrideToSessionEntry } from "../../sessions/model-overrides
import { normalizeLowercaseStringOrEmpty } from "../../shared/string-coerce.js";
import {
formatThinkingLevels,
formatXHighModelHint,
isThinkingLevelSupported,
resolveSupportedThinkingLevel,
supportsXHighThinking,
} from "../thinking.js";
import type { ReplyPayload } from "../types.js";
import { resolveModelSelectionFromDirective } from "./directive-handling.model-selection.js";
@@ -291,41 +290,38 @@ export async function handleDirectiveOnly(
if (
directives.hasThinkDirective &&
directives.thinkLevel === "xhigh" &&
!supportsXHighThinking(resolvedProvider, resolvedModel)
directives.thinkLevel &&
!isThinkingLevelSupported({
provider: resolvedProvider,
model: resolvedModel,
level: directives.thinkLevel,
})
) {
return {
text: `Thinking level "xhigh" is only supported for ${formatXHighModelHint()}.`,
text: `Thinking level "${directives.thinkLevel}" is not supported for ${resolvedProvider}/${resolvedModel}. Use one of: ${formatThinkingLevels(resolvedProvider, resolvedModel)}.`,
};
}
const resolvedDirectiveThinkLevel =
directives.hasThinkDirective && directives.thinkLevel
? resolveSupportedThinkingLevel({
provider: resolvedProvider,
model: resolvedModel,
level: directives.thinkLevel,
})
: directives.thinkLevel;
const resolvedDirectiveThinkLevel = directives.thinkLevel;
const nextThinkLevel = directives.hasThinkDirective
? resolvedDirectiveThinkLevel
: ((sessionEntry?.thinkingLevel as ThinkLevel | undefined) ?? currentThinkLevel);
const shouldDowngradeXHigh =
const remappedUnsupportedThinkLevel =
!directives.hasThinkDirective &&
nextThinkLevel === "xhigh" &&
!supportsXHighThinking(resolvedProvider, resolvedModel);
const remappedMaxThinkLevel =
nextThinkLevel === "max"
nextThinkLevel &&
!isThinkingLevelSupported({
provider: resolvedProvider,
model: resolvedModel,
level: nextThinkLevel,
})
? resolveSupportedThinkingLevel({
provider: resolvedProvider,
model: resolvedModel,
level: nextThinkLevel,
})
: undefined;
const shouldRemapMax =
nextThinkLevel === "max" &&
remappedMaxThinkLevel !== undefined &&
remappedMaxThinkLevel !== "max";
const shouldRemapUnsupportedThinkLevel =
Boolean(remappedUnsupportedThinkLevel) && remappedUnsupportedThinkLevel !== nextThinkLevel;
const prevElevatedLevel =
currentElevatedLevel ??
@@ -351,8 +347,7 @@ export async function handleDirectiveOnly(
(directives.hasExecDirective && directives.hasExecOptions && allowInternalExecPersistence) ||
Boolean(modelSelection) ||
directives.hasQueueDirective ||
shouldDowngradeXHigh ||
shouldRemapMax;
shouldRemapUnsupportedThinkLevel;
const fastModeChanged =
directives.hasFastDirective &&
directives.fastMode !== undefined &&
@@ -366,11 +361,8 @@ export async function handleDirectiveOnly(
if (directives.hasFastDirective && directives.fastMode !== undefined) {
sessionEntry.fastMode = directives.fastMode;
}
if (shouldDowngradeXHigh) {
sessionEntry.thinkingLevel = "high";
}
if (shouldRemapMax && remappedMaxThinkLevel) {
sessionEntry.thinkingLevel = remappedMaxThinkLevel;
if (shouldRemapUnsupportedThinkLevel && remappedUnsupportedThinkLevel) {
sessionEntry.thinkingLevel = remappedUnsupportedThinkLevel;
}
if (
directives.hasVerboseDirective &&
@@ -573,14 +565,13 @@ export async function handleDirectiveOnly(
if (directives.hasExecDirective && directives.hasExecOptions && !allowInternalExecPersistence) {
parts.push(formatDirectiveAck(formatInternalExecPersistenceDeniedText()));
}
if (shouldDowngradeXHigh) {
if (
!directives.hasThinkDirective &&
shouldRemapUnsupportedThinkLevel &&
remappedUnsupportedThinkLevel
) {
parts.push(
`Thinking level set to high (xhigh not supported for ${resolvedProvider}/${resolvedModel}).`,
);
}
if (!directives.hasThinkDirective && shouldRemapMax && remappedMaxThinkLevel) {
parts.push(
`Thinking level set to ${remappedMaxThinkLevel} (max not supported for ${resolvedProvider}/${resolvedModel}).`,
`Thinking level set to ${remappedUnsupportedThinkLevel} (${nextThinkLevel} not supported for ${resolvedProvider}/${resolvedModel}).`,
);
}
if (modelSelection) {

View File

@@ -27,11 +27,11 @@ import { resolveEnvelopeFormatOptions } from "../envelope.js";
import type { MsgContext, TemplateContext } from "../templating.js";
import {
type ElevatedLevel,
formatXHighModelHint,
formatThinkingLevels,
isThinkingLevelSupported,
normalizeThinkLevel,
type ReasoningLevel,
resolveSupportedThinkingLevel,
supportsXHighThinking,
type ThinkLevel,
type VerboseLevel,
} from "../thinking.js";
@@ -414,10 +414,7 @@ export async function runPreparedReply(
if (!resolvedThinkLevel && prefixedBodyBase) {
const parts = prefixedBodyBase.split(/\s+/);
const maybeLevel = normalizeThinkLevel(parts[0]);
if (
maybeLevel &&
(maybeLevel === "max" || maybeLevel !== "xhigh" || supportsXHighThinking(provider, model))
) {
if (maybeLevel && isThinkingLevelSupported({ provider, model, level: maybeLevel })) {
resolvedThinkLevel = maybeLevel;
prefixedBodyBase = parts.slice(1).join(" ").trim();
}
@@ -487,15 +484,28 @@ export async function runPreparedReply(
if (!resolvedThinkLevel) {
resolvedThinkLevel = await modelState.resolveDefaultThinkingLevel();
}
if (resolvedThinkLevel === "max") {
if (!isThinkingLevelSupported({ provider, model, level: resolvedThinkLevel })) {
const explicitThink = directives.hasThinkDirective && directives.thinkLevel !== undefined;
if (explicitThink) {
typing.cleanup();
return {
text: `Thinking level "${resolvedThinkLevel}" is not supported for ${provider}/${model}. Use one of: ${formatThinkingLevels(provider, model)}.`,
};
}
const fallbackThinkLevel = resolveSupportedThinkingLevel({
provider,
model,
level: resolvedThinkLevel,
});
if (fallbackThinkLevel !== resolvedThinkLevel) {
const previousThinkLevel = resolvedThinkLevel;
resolvedThinkLevel = fallbackThinkLevel;
if (sessionEntry && sessionStore && sessionKey && sessionEntry.thinkingLevel === "max") {
if (
sessionEntry &&
sessionStore &&
sessionKey &&
sessionEntry.thinkingLevel === previousThinkLevel
) {
sessionEntry.thinkingLevel = fallbackThinkLevel;
sessionEntry.updatedAt = Date.now();
sessionStore[sessionKey] = sessionEntry;
@@ -508,27 +518,6 @@ export async function runPreparedReply(
}
}
}
if (resolvedThinkLevel === "xhigh" && !supportsXHighThinking(provider, model)) {
const explicitThink = directives.hasThinkDirective && directives.thinkLevel !== undefined;
if (explicitThink) {
typing.cleanup();
return {
text: `Thinking level "xhigh" is only supported for ${formatXHighModelHint()}. Use /think high or switch to one of those models.`,
};
}
resolvedThinkLevel = "high";
if (sessionEntry && sessionStore && sessionKey && sessionEntry.thinkingLevel === "xhigh") {
sessionEntry.thinkingLevel = "high";
sessionEntry.updatedAt = Date.now();
sessionStore[sessionKey] = sessionEntry;
if (storePath) {
const { updateSessionStore } = await loadSessionStoreRuntime();
await updateSessionStore(storePath, (store) => {
store[sessionKey] = sessionEntry;
});
}
}
}
const sessionIdFinal = sessionId ?? crypto.randomUUID();
const sessionFilePathOptions = resolveSessionFilePathOptions({ agentId, storePath });
const resolvePreparedSessionState = (): {

View File

@@ -28,7 +28,17 @@ export type ThinkingCatalogEntry = {
reasoning?: boolean;
};
const BASE_THINKING_LEVELS: ThinkLevel[] = ["off", "minimal", "low", "medium", "high"];
export const BASE_THINKING_LEVELS: ThinkLevel[] = ["off", "minimal", "low", "medium", "high"];
export const THINKING_LEVEL_RANKS: Record<ThinkLevel, number> = {
off: 0,
minimal: 10,
low: 20,
medium: 30,
high: 40,
adaptive: 50,
xhigh: 60,
max: 70,
};
const NO_THINKING_LEVELS: ThinkLevel[] = [...BASE_THINKING_LEVELS];
export function isBinaryThinkingProvider(provider?: string | null): boolean {
@@ -102,10 +112,6 @@ export function formatXHighModelHint(): string {
return "provider models that advertise xhigh reasoning";
}
export function formatMaxModelHint(): string {
return "provider models that advertise max reasoning";
}
export function resolveThinkingDefaultForModel(params: {
provider: string;
model: string;

View File

@@ -1,10 +1,9 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
const providerRuntimeMocks = vi.hoisted(() => ({
resolveProviderAdaptiveThinking: vi.fn(),
resolveProviderBinaryThinking: vi.fn(),
resolveProviderDefaultThinkingLevel: vi.fn(),
resolveProviderMaxThinking: vi.fn(),
resolveProviderThinkingProfile: vi.fn(),
resolveProviderXHighThinking: vi.fn(),
}));
@@ -12,29 +11,27 @@ let listThinkingLevelLabels: typeof import("./thinking.js").listThinkingLevelLab
let listThinkingLevels: typeof import("./thinking.js").listThinkingLevels;
let normalizeReasoningLevel: typeof import("./thinking.js").normalizeReasoningLevel;
let normalizeThinkLevel: typeof import("./thinking.js").normalizeThinkLevel;
let resolveSupportedThinkingLevel: typeof import("./thinking.js").resolveSupportedThinkingLevel;
let resolveThinkingDefaultForModel: typeof import("./thinking.js").resolveThinkingDefaultForModel;
async function loadFreshThinkingModuleForTest() {
vi.resetModules();
vi.doMock("../plugins/provider-thinking.js", () => ({
resolveProviderAdaptiveThinking: providerRuntimeMocks.resolveProviderAdaptiveThinking,
resolveProviderBinaryThinking: providerRuntimeMocks.resolveProviderBinaryThinking,
resolveProviderDefaultThinkingLevel: providerRuntimeMocks.resolveProviderDefaultThinkingLevel,
resolveProviderMaxThinking: providerRuntimeMocks.resolveProviderMaxThinking,
resolveProviderThinkingProfile: providerRuntimeMocks.resolveProviderThinkingProfile,
resolveProviderXHighThinking: providerRuntimeMocks.resolveProviderXHighThinking,
}));
return await import("./thinking.js");
}
beforeEach(async () => {
providerRuntimeMocks.resolveProviderAdaptiveThinking.mockReset();
providerRuntimeMocks.resolveProviderAdaptiveThinking.mockReturnValue(undefined);
providerRuntimeMocks.resolveProviderBinaryThinking.mockReset();
providerRuntimeMocks.resolveProviderBinaryThinking.mockReturnValue(undefined);
providerRuntimeMocks.resolveProviderDefaultThinkingLevel.mockReset();
providerRuntimeMocks.resolveProviderDefaultThinkingLevel.mockReturnValue(undefined);
providerRuntimeMocks.resolveProviderMaxThinking.mockReset();
providerRuntimeMocks.resolveProviderMaxThinking.mockReturnValue(undefined);
providerRuntimeMocks.resolveProviderThinkingProfile.mockReset();
providerRuntimeMocks.resolveProviderThinkingProfile.mockReturnValue(undefined);
providerRuntimeMocks.resolveProviderXHighThinking.mockReset();
providerRuntimeMocks.resolveProviderXHighThinking.mockReturnValue(undefined);
@@ -43,6 +40,7 @@ beforeEach(async () => {
listThinkingLevels,
normalizeReasoningLevel,
normalizeThinkLevel,
resolveSupportedThinkingLevel,
resolveThinkingDefaultForModel,
} = await loadFreshThinkingModuleForTest());
});
@@ -126,18 +124,6 @@ describe("listThinkingLevels", () => {
expect(listThinkingLevels(undefined, "gpt-4.1-mini")).not.toContain("xhigh");
});
it("uses provider runtime hooks for adaptive support", () => {
providerRuntimeMocks.resolveProviderAdaptiveThinking.mockReturnValue(true);
expect(listThinkingLevels("demo", "demo-model")).toContain("adaptive");
});
it("uses provider runtime hooks for max support", () => {
providerRuntimeMocks.resolveProviderMaxThinking.mockReturnValue(true);
expect(listThinkingLevels("demo", "demo-model")).toContain("max");
});
it("does not include max without provider support", () => {
expect(listThinkingLevels("openai", "gpt-5.4")).not.toContain("max");
});
@@ -147,13 +133,40 @@ describe("listThinkingLevels", () => {
expect(listThinkingLevels("openai", "gpt-5.4")).not.toContain("adaptive");
});
it("includes adaptive for provider-advertised models", () => {
providerRuntimeMocks.resolveProviderAdaptiveThinking.mockImplementation(
({ provider, context }) =>
provider === "anthropic" && context.modelId === "claude-opus-4-6" ? true : undefined,
it("uses provider thinking profiles for adaptive and max support", () => {
providerRuntimeMocks.resolveProviderThinkingProfile.mockImplementation(({ provider }) =>
provider === "anthropic"
? { levels: [{ id: "off" }, { id: "adaptive" }, { id: "max" }] }
: undefined,
);
expect(listThinkingLevels("anthropic", "claude-opus-4-6")).toContain("adaptive");
expect(listThinkingLevels("anthropic", "claude-opus-4-7")).toContain("max");
});
it("uses provider thinking profiles ahead of legacy hooks", () => {
providerRuntimeMocks.resolveProviderThinkingProfile.mockReturnValue({
levels: [{ id: "off" }, { id: "low", label: "on" }],
defaultLevel: "off",
});
providerRuntimeMocks.resolveProviderXHighThinking.mockReturnValue(true);
expect(listThinkingLevels("demo", "demo-model")).toEqual(["off", "low"]);
expect(listThinkingLevelLabels("demo", "demo-model")).toEqual(["off", "on"]);
});
it("maps stale unsupported levels to the largest profile level", () => {
providerRuntimeMocks.resolveProviderThinkingProfile.mockReturnValue({
levels: [{ id: "off" }, { id: "high" }],
});
expect(
resolveSupportedThinkingLevel({
provider: "demo",
model: "demo-model",
level: "max",
}),
).toBe("high");
});
});

View File

@@ -1,7 +1,9 @@
import { normalizeProviderId } from "../agents/provider-id.js";
import {
listThinkingLevels as listThinkingLevelsFallback,
BASE_THINKING_LEVELS,
normalizeThinkLevel,
resolveThinkingDefaultForModel as resolveThinkingDefaultForModelFallback,
THINKING_LEVEL_RANKS,
} from "./thinking.shared.js";
import type { ThinkLevel, ThinkingCatalogEntry } from "./thinking.shared.js";
export {
@@ -29,118 +31,182 @@ export type {
VerboseLevel,
} from "./thinking.shared.js";
import {
resolveProviderAdaptiveThinking,
resolveProviderBinaryThinking,
resolveProviderDefaultThinkingLevel,
resolveProviderMaxThinking,
resolveProviderThinkingProfile,
resolveProviderXHighThinking,
} from "../plugins/provider-thinking.js";
import type { ProviderThinkingProfile } from "../plugins/provider-thinking.types.js";
import {
normalizeOptionalLowercaseString,
normalizeOptionalString,
} from "../shared/string-coerce.js";
export function isBinaryThinkingProvider(provider?: string | null, model?: string | null): boolean {
const providerRaw = normalizeOptionalString(provider);
type ThinkingLevelOption = {
id: ThinkLevel;
label: string;
rank: number;
};
type ResolvedThinkingProfile = {
levels: ThinkingLevelOption[];
defaultLevel?: ThinkLevel | null;
};
function resolveThinkingPolicyContext(params: {
provider?: string | null;
model?: string | null;
catalog?: ThinkingCatalogEntry[];
}) {
const providerRaw = normalizeOptionalString(params.provider);
const normalizedProvider = providerRaw ? normalizeProviderId(providerRaw) : "";
if (!normalizedProvider) {
return false;
const modelId = normalizeOptionalString(params.model) ?? "";
const modelKey = normalizeOptionalLowercaseString(params.model) ?? "";
const candidate = params.catalog?.find(
(entry) => normalizeProviderId(entry.provider) === normalizedProvider && entry.id === modelId,
);
return { normalizedProvider, modelId, modelKey, reasoning: candidate?.reasoning };
}
function normalizeProfileLevel(
level: ProviderThinkingProfile["levels"][number],
): ThinkingLevelOption | undefined {
const normalized = normalizeThinkLevel(level.id);
if (!normalized) {
return undefined;
}
return {
id: normalized,
label: normalizeOptionalString(level.label) ?? normalized,
rank: Number.isFinite(level.rank) ? (level.rank as number) : THINKING_LEVEL_RANKS[normalized],
};
}
function normalizeThinkingProfile(profile: ProviderThinkingProfile): ResolvedThinkingProfile {
const byId = new Map<ThinkLevel, ThinkingLevelOption>();
for (const raw of profile.levels) {
const level = normalizeProfileLevel(raw);
if (level) {
byId.set(level.id, level);
}
}
const levels = [...byId.values()].toSorted((a, b) => a.rank - b.rank);
const rawDefaultLevel = profile.defaultLevel
? normalizeThinkLevel(profile.defaultLevel)
: undefined;
const defaultLevel = rawDefaultLevel && byId.has(rawDefaultLevel) ? rawDefaultLevel : undefined;
return { levels, defaultLevel };
}
function buildBaseThinkingProfile(defaultLevel?: ThinkLevel | null): ResolvedThinkingProfile {
return {
levels: BASE_THINKING_LEVELS.map((id) => ({
id,
label: id,
rank: THINKING_LEVEL_RANKS[id],
})),
defaultLevel,
};
}
function buildBinaryThinkingProfile(defaultLevel?: ThinkLevel | null): ResolvedThinkingProfile {
return {
levels: [
{ id: "off", label: "off", rank: THINKING_LEVEL_RANKS.off },
{ id: "low", label: "on", rank: THINKING_LEVEL_RANKS.low },
],
defaultLevel,
};
}
function appendProfileLevel(profile: ResolvedThinkingProfile, id: ThinkLevel) {
if (profile.levels.some((level) => level.id === id)) {
return;
}
profile.levels.push({ id, label: id, rank: THINKING_LEVEL_RANKS[id] });
profile.levels = profile.levels.toSorted((a, b) => a.rank - b.rank);
}
export function resolveThinkingProfile(params: {
provider?: string | null;
model?: string | null;
catalog?: ThinkingCatalogEntry[];
}): ResolvedThinkingProfile {
const context = resolveThinkingPolicyContext(params);
if (!context.normalizedProvider) {
return buildBaseThinkingProfile();
}
const providerContext = {
provider: context.normalizedProvider,
modelId: context.modelId,
reasoning: context.reasoning,
};
const pluginProfile = resolveProviderThinkingProfile({
provider: context.normalizedProvider,
context: providerContext,
});
if (pluginProfile) {
const normalized = normalizeThinkingProfile(pluginProfile);
if (normalized.levels.length > 0) {
return normalized;
}
}
const pluginDecision = resolveProviderBinaryThinking({
provider: normalizedProvider,
const defaultLevel = resolveProviderDefaultThinkingLevel({
provider: context.normalizedProvider,
context: providerContext,
});
const binaryDecision = resolveProviderBinaryThinking({
provider: context.normalizedProvider,
context: {
provider: normalizedProvider,
modelId: normalizeOptionalString(model) ?? "",
provider: context.normalizedProvider,
modelId: context.modelId,
},
});
if (typeof pluginDecision === "boolean") {
return pluginDecision;
const profile =
binaryDecision === true
? buildBinaryThinkingProfile(defaultLevel)
: buildBaseThinkingProfile(defaultLevel);
const policyContext = {
provider: context.normalizedProvider,
modelId: context.modelKey || context.modelId,
};
if (
resolveProviderXHighThinking({
provider: context.normalizedProvider,
context: policyContext,
}) === true
) {
appendProfileLevel(profile, "xhigh");
}
return false;
return profile;
}
export function isBinaryThinkingProvider(provider?: string | null, model?: string | null): boolean {
const profile = resolveThinkingProfile({ provider, model });
return profile.levels.length === 2 && profile.levels.some((level) => level.label === "on");
}
function supportsThinkingLevel(
provider: string | null | undefined,
model: string | null | undefined,
level: ThinkLevel,
): boolean {
return resolveThinkingProfile({ provider, model }).levels.some((entry) => entry.id === level);
}
export function supportsXHighThinking(provider?: string | null, model?: string | null): boolean {
const modelKey = normalizeOptionalLowercaseString(model);
if (!modelKey) {
return false;
}
const providerRaw = normalizeOptionalString(provider);
const providerKey = providerRaw ? normalizeProviderId(providerRaw) : "";
if (providerKey) {
const pluginDecision = resolveProviderXHighThinking({
provider: providerKey,
context: {
provider: providerKey,
modelId: modelKey,
},
});
if (typeof pluginDecision === "boolean") {
return pluginDecision;
}
}
return false;
}
export function supportsAdaptiveThinking(provider?: string | null, model?: string | null): boolean {
const modelKey = normalizeOptionalLowercaseString(model);
if (!modelKey) {
return false;
}
const providerRaw = normalizeOptionalString(provider);
const providerKey = providerRaw ? normalizeProviderId(providerRaw) : "";
if (!providerKey) {
return false;
}
const pluginDecision = resolveProviderAdaptiveThinking({
provider: providerKey,
context: {
provider: providerKey,
modelId: modelKey,
},
});
return pluginDecision === true;
}
export function supportsMaxThinking(provider?: string | null, model?: string | null): boolean {
const modelKey = normalizeOptionalLowercaseString(model);
if (!modelKey) {
return false;
}
const providerRaw = normalizeOptionalString(provider);
const providerKey = providerRaw ? normalizeProviderId(providerRaw) : "";
if (!providerKey) {
return false;
}
const pluginDecision = resolveProviderMaxThinking({
provider: providerKey,
context: {
provider: providerKey,
modelId: modelKey,
},
});
return pluginDecision === true;
return supportsThinkingLevel(provider, model, "xhigh");
}
export function listThinkingLevels(provider?: string | null, model?: string | null): ThinkLevel[] {
const levels = listThinkingLevelsFallback(provider, model);
if (supportsXHighThinking(provider, model)) {
levels.push("xhigh");
}
if (supportsAdaptiveThinking(provider, model)) {
levels.push("adaptive");
}
if (supportsMaxThinking(provider, model)) {
levels.push("max");
}
return levels;
const profile = resolveThinkingProfile({ provider, model });
return profile.levels.map((level) => level.id);
}
export function listThinkingLevelLabels(provider?: string | null, model?: string | null): string[] {
if (isBinaryThinkingProvider(provider, model)) {
return ["off", "on"];
}
return listThinkingLevels(provider, model);
const profile = resolveThinkingProfile({ provider, model });
return profile.levels.map((level) => level.label);
}
export function formatThinkingLevels(
@@ -156,20 +222,13 @@ export function resolveThinkingDefaultForModel(params: {
model: string;
catalog?: ThinkingCatalogEntry[];
}): ThinkLevel {
const normalizedProvider = normalizeProviderId(params.provider);
const candidate = params.catalog?.find(
(entry) => entry.provider === params.provider && entry.id === params.model,
);
const pluginDecision = resolveProviderDefaultThinkingLevel({
provider: normalizedProvider,
context: {
provider: normalizedProvider,
modelId: params.model,
reasoning: candidate?.reasoning,
},
const profile = resolveThinkingProfile({
provider: params.provider,
model: params.model,
catalog: params.catalog,
});
if (pluginDecision) {
return pluginDecision;
if (profile.defaultLevel) {
return profile.defaultLevel;
}
return resolveThinkingDefaultForModelFallback(params);
}
@@ -178,19 +237,19 @@ export function resolveLargestSupportedThinkingLevel(
provider?: string | null,
model?: string | null,
): ThinkLevel {
if (isBinaryThinkingProvider(provider, model)) {
return "low";
}
if (supportsMaxThinking(provider, model)) {
return "max";
}
if (supportsXHighThinking(provider, model)) {
return "xhigh";
}
if (supportsAdaptiveThinking(provider, model)) {
return "adaptive";
}
return "high";
const profile = resolveThinkingProfile({ provider, model });
return (
profile.levels.filter((level) => level.id !== "off").toSorted((a, b) => b.rank - a.rank)[0]
?.id ?? "off"
);
}
export function isThinkingLevelSupported(params: {
provider?: string | null;
model?: string | null;
level: ThinkLevel;
}): boolean {
return supportsThinkingLevel(params.provider, params.model, params.level);
}
export function resolveSupportedThinkingLevel(params: {
@@ -198,10 +257,8 @@ export function resolveSupportedThinkingLevel(params: {
model?: string | null;
level: ThinkLevel;
}): ThinkLevel {
if (params.level !== "max") {
if (isThinkingLevelSupported(params)) {
return params.level;
}
return supportsMaxThinking(params.provider, params.model)
? "max"
: resolveLargestSupportedThinkingLevel(params.provider, params.model);
return resolveLargestSupportedThinkingLevel(params.provider, params.model);
}

View File

@@ -28,7 +28,10 @@ export function registerAgentCommands(program: Command, args: { agentChannelOpti
.option("-t, --to <number>", "Recipient number in E.164 used to derive the session key")
.option("--session-id <id>", "Use an explicit session id")
.option("--agent <id>", "Agent id (overrides routing bindings)")
.option("--thinking <level>", "Thinking level: off | minimal | low | medium | high | xhigh")
.option(
"--thinking <level>",
"Thinking level: off | minimal | low | medium | high | xhigh | adaptive | max where supported",
)
.option("--verbose <on|off>", "Persist agent verbose level for the session")
.option(
"--channel <channel>",

View File

@@ -13,9 +13,9 @@ export { resolveAgentTimeoutMs } from "../../agents/timeout.js";
export { deriveSessionTotalTokens, hasNonzeroUsage } from "../../agents/usage.js";
export { DEFAULT_IDENTITY_FILENAME, ensureAgentWorkspace } from "../../agents/workspace.js";
export {
isThinkingLevelSupported,
normalizeThinkLevel,
resolveSupportedThinkingLevel,
supportsXHighThinking,
} from "../../auto-reply/thinking.js";
export { resolveSessionTranscriptPath } from "../../config/sessions/paths.js";
export { setSessionRuntimeModel } from "../../config/sessions/types.js";

View File

@@ -78,6 +78,7 @@ const hasNonzeroUsageMock = createMock();
const ensureAgentWorkspaceMock = createMock();
const normalizeThinkLevelMock = createMock();
const normalizeVerboseLevelMock = createMock();
const isThinkingLevelSupportedMock = createMock();
const resolveSupportedThinkingLevelMock = createMock();
const supportsXHighThinkingMock = createMock();
const resolveSessionTranscriptPathMock = createMock();
@@ -110,6 +111,7 @@ vi.mock("./run.runtime.js", () => ({
DEFAULT_IDENTITY_FILENAME: "IDENTITY.md",
ensureAgentWorkspace: ensureAgentWorkspaceMock,
normalizeThinkLevel: normalizeThinkLevelMock,
isThinkingLevelSupported: isThinkingLevelSupportedMock,
resolveSupportedThinkingLevel: resolveSupportedThinkingLevelMock,
supportsXHighThinking: supportsXHighThinkingMock,
resolveSessionTranscriptPath: resolveSessionTranscriptPathMock,
@@ -308,6 +310,7 @@ function resetRunConfigMocks(): void {
hasNonzeroUsageMock.mockReturnValue(true);
ensureAgentWorkspaceMock.mockResolvedValue({ dir: "/tmp/workspace" });
normalizeThinkLevelMock.mockImplementation((value: unknown) => value);
isThinkingLevelSupportedMock.mockReturnValue(true);
resolveSupportedThinkingLevelMock.mockImplementation(({ level }: { level?: unknown }) => level);
supportsXHighThinkingMock.mockReturnValue(false);
buildSafeExternalPromptMock.mockImplementation(

View File

@@ -46,11 +46,11 @@ import {
resolveCronStyleNow,
resolveDefaultAgentId,
resolveHookExternalContentSource,
isThinkingLevelSupported,
resolveSupportedThinkingLevel,
resolveSessionTranscriptPath,
resolveThinkingDefault,
setSessionRuntimeModel,
supportsXHighThinking,
} from "./run.runtime.js";
import type { RunCronAgentTurnResult } from "./run.types.js";
import { resolveCronAgentSessionKey } from "./session-key.js";
@@ -500,13 +500,7 @@ async function prepareCronRunContext(params: {
catalog: await loadCatalog(),
});
}
if (thinkLevel === "xhigh" && !supportsXHighThinking(provider, model)) {
logWarn(
`[cron:${input.job.id}] Thinking level "xhigh" is not supported for ${provider}/${model}; downgrading to "high".`,
);
thinkLevel = "high";
}
if (thinkLevel === "max") {
if (!isThinkingLevelSupported({ provider, model, level: thinkLevel })) {
const fallbackThinkLevel = resolveSupportedThinkingLevel({
provider,
model,
@@ -514,7 +508,7 @@ async function prepareCronRunContext(params: {
});
if (fallbackThinkLevel !== thinkLevel) {
logWarn(
`[cron:${input.job.id}] Thinking level "max" is not supported for ${provider}/${model}; downgrading to "${fallbackThinkLevel}".`,
`[cron:${input.job.id}] Thinking level "${thinkLevel}" is not supported for ${provider}/${model}; downgrading to "${fallbackThinkLevel}".`,
);
thinkLevel = fallbackThinkLevel;
}

View File

@@ -25,6 +25,7 @@ import {
listSubagentRunsForController,
resolveSubagentSessionStatus,
} from "../agents/subagent-registry-read.js";
import { listThinkingLevelLabels, resolveThinkingDefaultForModel } from "../auto-reply/thinking.js";
import { loadConfig } from "../config/config.js";
import { resolveAgentModelFallbackValues } from "../config/model-input.js";
import { resolveStateDir } from "../config/paths.js";
@@ -1372,6 +1373,11 @@ export function buildGatewaySessionRow(params: {
}
}
const rowModelProvider = selectedModel?.provider ?? modelProvider;
const rowModel = selectedModel?.model ?? model;
const thinkingProvider = rowModelProvider ?? DEFAULT_PROVIDER;
const thinkingModel = rowModel ?? DEFAULT_MODEL;
return {
key,
spawnedBy: subagentOwner || entry?.spawnedBy,
@@ -1396,6 +1402,11 @@ export function buildGatewaySessionRow(params: {
systemSent: entry?.systemSent,
abortedLastRun: entry?.abortedLastRun,
thinkingLevel: entry?.thinkingLevel,
thinkingOptions: listThinkingLevelLabels(thinkingProvider, thinkingModel),
thinkingDefault: resolveThinkingDefaultForModel({
provider: thinkingProvider,
model: thinkingModel,
}),
fastMode: entry?.fastMode,
verboseLevel: entry?.verboseLevel,
traceLevel: entry?.traceLevel,
@@ -1414,8 +1425,8 @@ export function buildGatewaySessionRow(params: {
parentSessionKey: subagentOwner || entry?.parentSessionKey,
childSessions,
responseUsage: entry?.responseUsage,
modelProvider: selectedModel?.provider ?? modelProvider,
model: selectedModel?.model ?? model,
modelProvider: rowModelProvider,
model: rowModel,
contextTokens,
deliveryContext: deliveryFields.deliveryContext,
lastChannel: deliveryFields.lastChannel ?? entry?.lastChannel,

View File

@@ -39,6 +39,8 @@ export type GatewaySessionRow = {
systemSent?: boolean;
abortedLastRun?: boolean;
thinkingLevel?: string;
thinkingOptions?: string[];
thinkingDefault?: string;
fastMode?: boolean;
verboseLevel?: string;
traceLevel?: string;

View File

@@ -9,14 +9,13 @@ import {
import { normalizeGroupActivation } from "../auto-reply/group-activation.js";
import {
formatThinkingLevels,
formatXHighModelHint,
isThinkingLevelSupported,
normalizeElevatedLevel,
normalizeFastMode,
normalizeReasoningLevel,
normalizeThinkLevel,
normalizeUsageDisplay,
resolveSupportedThinkingLevel,
supportsXHighThinking,
} from "../auto-reply/thinking.js";
import type { SessionEntry } from "../config/sessions.js";
import type { OpenClawConfig } from "../config/types.openclaw.js";
@@ -435,25 +434,31 @@ export async function applySessionsPatchToStore(params: {
}
}
if (next.thinkingLevel === "xhigh") {
if (next.thinkingLevel) {
const effectiveProvider = next.providerOverride ?? resolvedDefault.provider;
const effectiveModel = next.modelOverride ?? resolvedDefault.model;
if (!supportsXHighThinking(effectiveProvider, effectiveModel)) {
const thinkingLevel = normalizeThinkLevel(next.thinkingLevel);
if (!thinkingLevel) {
delete next.thinkingLevel;
} else if (
!isThinkingLevelSupported({
provider: effectiveProvider,
model: effectiveModel,
level: thinkingLevel,
})
) {
if ("thinkingLevel" in patch) {
return invalid(`thinkingLevel "xhigh" is only supported for ${formatXHighModelHint()}`);
return invalid(
`thinkingLevel "${thinkingLevel}" is not supported for ${effectiveProvider}/${effectiveModel} (use ${formatThinkingLevels(effectiveProvider, effectiveModel, "|")})`,
);
}
next.thinkingLevel = "high";
next.thinkingLevel = resolveSupportedThinkingLevel({
provider: effectiveProvider,
model: effectiveModel,
level: thinkingLevel,
});
}
}
if (next.thinkingLevel === "max") {
const effectiveProvider = next.providerOverride ?? resolvedDefault.provider;
const effectiveModel = next.modelOverride ?? resolvedDefault.model;
next.thinkingLevel = resolveSupportedThinkingLevel({
provider: effectiveProvider,
model: effectiveModel,
level: next.thinkingLevel,
});
}
if ("sendPolicy" in patch) {
const raw = patch.sendPolicy;

View File

@@ -81,6 +81,7 @@ export type {
ProviderTransportTurnState,
ProviderToolSchemaDiagnostic,
ProviderResolveUsageAuthContext,
ProviderThinkingProfile,
ProviderThinkingPolicyContext,
ProviderValidateReplayTurnsContext,
ProviderWebSocketSessionPolicy,

View File

@@ -6,6 +6,7 @@ export { resolvePreferredOpenClawTmpDir } from "../infra/tmp-openclaw-dir.js";
export {
formatThinkingLevels,
formatXHighModelHint,
isThinkingLevelSupported,
normalizeThinkLevel,
resolveSupportedThinkingLevel,
supportsXHighThinking,

View File

@@ -63,6 +63,7 @@ import type {
ProviderTransportTurnState,
ProviderToolSchemaDiagnostic,
ProviderResolveUsageAuthContext,
ProviderThinkingProfile,
ProviderThinkingPolicyContext,
ProviderValidateReplayTurnsContext,
ProviderWebSocketSessionPolicy,
@@ -119,6 +120,7 @@ export type {
ProviderPrepareRuntimeAuthContext,
ProviderSanitizeReplayHistoryContext,
ProviderResolveUsageAuthContext,
ProviderThinkingProfile,
ProviderResolveDynamicModelContext,
ProviderResolveTransportTurnStateContext,
ProviderResolveWebSocketSessionPolicyContext,

View File

@@ -19,6 +19,7 @@ import {
} from "./provider-hook-runtime.js";
import { resolveBundledProviderPolicySurface } from "./provider-public-artifacts.js";
import type { ProviderRuntimeModel } from "./provider-runtime-model.types.js";
import type { ProviderThinkingProfile } from "./provider-thinking.types.js";
import { resolveCatalogHookProviderPluginIds } from "./providers.js";
import { getActivePluginRegistryWorkspaceDirFromState } from "./runtime-state.js";
import { resolveRuntimeTextTransforms } from "./text-transforms.runtime.js";
@@ -640,24 +641,14 @@ export function resolveProviderXHighThinking(params: {
return resolveProviderRuntimePlugin(params)?.supportsXHighThinking?.(params.context);
}
export function resolveProviderAdaptiveThinking(params: {
export function resolveProviderThinkingProfile(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderThinkingPolicyContext;
}) {
return resolveProviderRuntimePlugin(params)?.supportsAdaptiveThinking?.(params.context);
}
export function resolveProviderMaxThinking(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderThinkingPolicyContext;
}) {
return resolveProviderRuntimePlugin(params)?.supportsMaxThinking?.(params.context);
context: ProviderDefaultThinkingPolicyContext;
}): ProviderThinkingProfile | null | undefined {
return resolveProviderRuntimePlugin(params)?.resolveThinkingProfile?.(params.context);
}
export function resolveProviderDefaultThinkingLevel(params: {

View File

@@ -1,6 +1,7 @@
import { normalizeProviderId } from "../agents/provider-id.js";
import type {
ProviderDefaultThinkingPolicyContext,
ProviderThinkingProfile,
ProviderThinkingPolicyContext,
} from "./provider-thinking.types.js";
@@ -8,22 +9,13 @@ type ThinkingProviderPlugin = {
id: string;
aliases?: string[];
isBinaryThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
supportsAdaptiveThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
supportsMaxThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
supportsXHighThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
resolveThinkingProfile?: (
ctx: ProviderDefaultThinkingPolicyContext,
) => ProviderThinkingProfile | null | undefined;
resolveDefaultThinkingLevel?: (
ctx: ProviderDefaultThinkingPolicyContext,
) =>
| "off"
| "minimal"
| "low"
| "medium"
| "high"
| "xhigh"
| "adaptive"
| "max"
| null
| undefined;
) => "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive" | null | undefined;
};
const PLUGIN_REGISTRY_STATE = Symbol.for("openclaw.pluginRegistryState");
@@ -73,16 +65,10 @@ export function resolveProviderXHighThinking(
return resolveActiveThinkingProvider(params.provider)?.supportsXHighThinking?.(params.context);
}
export function resolveProviderAdaptiveThinking(
params: ThinkingHookParams<ProviderThinkingPolicyContext>,
export function resolveProviderThinkingProfile(
params: ThinkingHookParams<ProviderDefaultThinkingPolicyContext>,
) {
return resolveActiveThinkingProvider(params.provider)?.supportsAdaptiveThinking?.(params.context);
}
export function resolveProviderMaxThinking(
params: ThinkingHookParams<ProviderThinkingPolicyContext>,
) {
return resolveActiveThinkingProvider(params.provider)?.supportsMaxThinking?.(params.context);
return resolveActiveThinkingProvider(params.provider)?.resolveThinkingProfile?.(params.context);
}
export function resolveProviderDefaultThinkingLevel(

View File

@@ -20,3 +20,33 @@ export type ProviderThinkingPolicyContext = {
export type ProviderDefaultThinkingPolicyContext = ProviderThinkingPolicyContext & {
reasoning?: boolean;
};
export type ProviderThinkingLevelId =
| "off"
| "minimal"
| "low"
| "medium"
| "high"
| "xhigh"
| "adaptive"
| "max";
export type ProviderThinkingLevel = {
id: ProviderThinkingLevelId;
/**
* Optional display label. Use this when the stored value differs from the
* provider-facing UX, for example binary providers storing `low` but showing
* `on`.
*/
label?: string;
/**
* Relative strength used when downgrading a stored level that the selected
* model no longer supports.
*/
rank?: number;
};
export type ProviderThinkingProfile = {
levels: ProviderThinkingLevel[] | ReadonlyArray<ProviderThinkingLevel>;
defaultLevel?: ProviderThinkingLevelId | null;
};

View File

@@ -109,6 +109,7 @@ import type { createVpsAwareOAuthHandlers } from "./provider-oauth-flow.js";
import type { ProviderRuntimeModel } from "./provider-runtime-model.types.js";
import type {
ProviderDefaultThinkingPolicyContext,
ProviderThinkingProfile,
ProviderThinkingPolicyContext,
} from "./provider-thinking.types.js";
import type { PluginRuntime } from "./runtime/types.js";
@@ -902,6 +903,7 @@ export type ProviderBuiltInModelSuppressionResult = {
export type {
ProviderDefaultThinkingPolicyContext,
ProviderThinkingProfile,
ProviderThinkingPolicyContext,
} from "./provider-thinking.types.js";
@@ -1399,45 +1401,40 @@ export type ProviderPlugin = {
*
* Return true when the provider exposes a coarse on/off reasoning control
* instead of the normal multi-level ladder shown by `/think`.
*
* @deprecated Prefer `resolveThinkingProfile`.
*/
isBinaryThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
/**
* Provider-owned xhigh reasoning support.
*
* Return true only for models that should expose the `xhigh` thinking level.
*
* @deprecated Prefer `resolveThinkingProfile`.
*/
supportsXHighThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
/**
* Provider-owned adaptive thinking support.
* Provider-owned thinking level profile.
*
* Return true only for models that should expose the `adaptive` thinking level.
* Prefer this over the individual thinking capability hooks when a provider
* or model exposes a custom set of thinking levels. OpenClaw stores the
* canonical `id`, shows `label` when provided, and downgrades stale stored
* values by profile rank.
*/
supportsAdaptiveThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
/**
* Provider-owned max thinking support.
*
* Return true only for models that should expose the `max` thinking level.
*/
supportsMaxThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
resolveThinkingProfile?: (
ctx: ProviderDefaultThinkingPolicyContext,
) => ProviderThinkingProfile | null | undefined;
/**
* Provider-owned default thinking level.
*
* Use this to keep model-family defaults (for example Claude 4.6 =>
* adaptive) out of core command logic.
*
* @deprecated Prefer `resolveThinkingProfile`.
*/
resolveDefaultThinkingLevel?: (
ctx: ProviderDefaultThinkingPolicyContext,
) =>
| "off"
| "minimal"
| "low"
| "medium"
| "high"
| "xhigh"
| "adaptive"
| "max"
| null
| undefined;
) => "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive" | null | undefined;
/**
* Provider-owned system-prompt contribution.
*

View File

@@ -139,8 +139,7 @@ function resolveThinkingTargetModel(state: AppViewState): {
}
function buildThinkingOptions(
provider: string | null,
model: string | null,
labels: readonly string[],
currentOverride: string,
): ChatThinkingSelectOption[] {
const seen = new Set<string>();
@@ -160,9 +159,9 @@ function buildThinkingOptions(
);
};
for (const label of listThinkingLevelLabels(provider, model)) {
for (const label of labels) {
const normalized = normalizeThinkLevel(label) ?? normalizeLowercaseStringOrEmpty(label);
addOption(normalized);
addOption(normalized, label);
}
if (currentOverride) {
addOption(currentOverride);
@@ -178,18 +177,22 @@ function resolveChatThinkingSelectState(state: AppViewState): ChatThinkingSelect
? (normalizeThinkLevel(persisted) ?? persisted.trim())
: "";
const { provider, model } = resolveThinkingTargetModel(state);
const labels =
activeRow?.thinkingOptions ??
(provider && model ? listThinkingLevelLabels(provider, model) : listThinkingLevelLabels());
const defaultLevel =
provider && model
activeRow?.thinkingDefault ??
(provider && model
? resolveThinkingDefaultForModel({
provider,
model,
catalog: state.chatModelCatalog ?? [],
})
: "off";
: "off");
return {
currentOverride,
defaultLabel: `Default (${defaultLevel})`,
options: buildThinkingOptions(provider, model, currentOverride),
options: buildThinkingOptions(labels, currentOverride),
};
}

View File

@@ -527,7 +527,21 @@ describe("executeSlashCommand directives", () => {
});
it("accepts minimal and xhigh thinking levels", async () => {
const request = vi.fn().mockResolvedValueOnce({ ok: true }).mockResolvedValueOnce({ ok: true });
const request = vi.fn(async (method: string, payload?: unknown) => {
if (method === "sessions.list") {
return {
sessions: [
row("agent:main:main", {
thinkingOptions: ["off", "minimal", "low", "medium", "high", "xhigh"],
}),
],
};
}
if (method === "sessions.patch") {
return { ok: true, ...((payload ?? {}) as object) };
}
throw new Error(`unexpected method: ${method}`);
});
const minimal = await executeSlashCommand(
{ request } as unknown as GatewayBrowserClient,
@@ -544,11 +558,13 @@ describe("executeSlashCommand directives", () => {
expect(minimal.content).toBe("Thinking level set to **minimal**.");
expect(xhigh.content).toBe("Thinking level set to **xhigh**.");
expect(request).toHaveBeenNthCalledWith(1, "sessions.patch", {
expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {});
expect(request).toHaveBeenNthCalledWith(2, "sessions.patch", {
key: "agent:main:main",
thinkingLevel: "minimal",
});
expect(request).toHaveBeenNthCalledWith(2, "sessions.patch", {
expect(request).toHaveBeenNthCalledWith(3, "sessions.list", {});
expect(request).toHaveBeenNthCalledWith(4, "sessions.patch", {
key: "agent:main:main",
thinkingLevel: "xhigh",
});

View File

@@ -258,7 +258,7 @@ async function executeThink(
return {
content: formatDirectiveOptions(
`Current thinking level: ${resolveCurrentThinkingLevel(session, models)}.`,
formatThinkingLevels(session?.modelProvider, session?.model),
formatThinkingOptionsForSession(session),
),
};
} catch (err) {
@@ -271,7 +271,7 @@ async function executeThink(
try {
const session = await loadCurrentSession(client, sessionKey);
return {
content: `Unrecognized thinking level "${rawLevel}". Valid levels: ${formatThinkingLevels(session?.modelProvider, session?.model)}.`,
content: `Unrecognized thinking level "${rawLevel}". Valid levels: ${formatThinkingOptionsForSession(session)}.`,
};
} catch (err) {
return { content: `Failed to validate thinking level: ${String(err)}` };
@@ -279,6 +279,12 @@ async function executeThink(
}
try {
const session = await loadCurrentSession(client, sessionKey);
if (!isThinkingLevelOptionForSession(session, level)) {
return {
content: `Unsupported thinking level "${rawLevel}" for this model. Valid levels: ${formatThinkingOptionsForSession(session)}.`,
};
}
await client.request("sessions.patch", { key: sessionKey, thinkingLevel: level });
return {
content: `Thinking level set to **${level}**.`,
@@ -594,6 +600,26 @@ function formatDirectiveOptions(text: string, options: string): string {
return `${text}\nOptions: ${options}.`;
}
function formatThinkingOptionsForSession(
session: GatewaySessionRow | undefined,
separator = ", ",
): string {
if (session?.thinkingOptions?.length) {
return session.thinkingOptions.join(separator);
}
return formatThinkingLevels(session?.modelProvider, session?.model);
}
function isThinkingLevelOptionForSession(
session: GatewaySessionRow | undefined,
level: string,
): boolean {
const labels = session?.thinkingOptions?.length
? session.thinkingOptions
: formatThinkingOptionsForSession(session).split(/\s*,\s*/);
return labels.some((label) => normalizeThinkLevel(label) === level);
}
async function loadCurrentSession(
client: GatewayBrowserClient,
sessionKey: string,
@@ -651,7 +677,13 @@ function resolveCurrentThinkingLevel(
): string {
const persisted = normalizeThinkLevel(session?.thinkingLevel);
if (persisted) {
return persisted;
return (
session?.thinkingOptions?.find((label) => normalizeThinkLevel(label) === persisted) ??
persisted
);
}
if (session?.thinkingDefault) {
return session.thinkingDefault;
}
if (!session?.modelProvider || !session.model) {
return "off";

View File

@@ -7,12 +7,6 @@ export type ThinkingCatalogEntry = {
};
const BASE_THINKING_LEVELS = ["off", "minimal", "low", "medium", "high"] as const;
const BINARY_THINKING_LEVELS = ["off", "on"] as const;
const ANTHROPIC_CLAUDE_46_MODEL_RE = /^claude-(?:opus|sonnet)-4(?:\.|-)6(?:$|[-.])/i;
const ANTHROPIC_OPUS_47_MODEL_RE = /^claude-opus-4(?:\.|-)7(?:$|[-.])/i;
const AMAZON_BEDROCK_CLAUDE_46_MODEL_RE = /claude-(?:opus|sonnet)-4(?:\.|-)6(?:$|[-.])/i;
const OPENAI_XHIGH_MODEL_RE =
/^(?:gpt-5\.[2-9](?:\.\d+)?|gpt-5\.[2-9](?:\.\d+)?-pro|gpt-5\.\d+-codex|gpt-5\.\d+-codex-spark|gpt-5\.1-codex|gpt-5\.2-codex)(?:$|-)/i;
export function normalizeThinkingProviderId(provider?: string | null): string {
if (!provider) {
@@ -29,7 +23,8 @@ export function normalizeThinkingProviderId(provider?: string | null): string {
}
export function isBinaryThinkingProvider(provider?: string | null): boolean {
return normalizeThinkingProviderId(provider) === "zai";
void provider;
return false;
}
export function normalizeThinkLevel(raw?: string | null): string | undefined {
@@ -71,49 +66,13 @@ export function normalizeThinkLevel(raw?: string | null): string | undefined {
return undefined;
}
function supportsAdaptiveThinking(provider?: string | null, model?: string | null): boolean {
const normalizedProvider = normalizeThinkingProviderId(provider);
const modelId = model?.trim() ?? "";
if (normalizedProvider === "anthropic") {
return ANTHROPIC_CLAUDE_46_MODEL_RE.test(modelId) || ANTHROPIC_OPUS_47_MODEL_RE.test(modelId);
}
if (normalizedProvider === "amazon-bedrock") {
return AMAZON_BEDROCK_CLAUDE_46_MODEL_RE.test(modelId);
}
return false;
}
function supportsXHighThinking(provider?: string | null, model?: string | null): boolean {
const normalizedProvider = normalizeThinkingProviderId(provider);
const modelId = model?.trim() ?? "";
if (normalizedProvider === "anthropic") {
return ANTHROPIC_OPUS_47_MODEL_RE.test(modelId);
}
if (["openai", "openai-codex", "github-copilot", "codex"].includes(normalizedProvider)) {
return OPENAI_XHIGH_MODEL_RE.test(modelId);
}
return false;
}
function supportsMaxThinking(provider?: string | null, model?: string | null): boolean {
return normalizeThinkingProviderId(provider) === "anthropic"
? ANTHROPIC_OPUS_47_MODEL_RE.test(model?.trim() ?? "")
: false;
}
export function listThinkingLevelLabels(
provider?: string | null,
model?: string | null,
): readonly string[] {
if (isBinaryThinkingProvider(provider)) {
return BINARY_THINKING_LEVELS;
}
return [
...BASE_THINKING_LEVELS,
...(supportsXHighThinking(provider, model) ? ["xhigh"] : []),
...(supportsAdaptiveThinking(provider, model) ? ["adaptive"] : []),
...(supportsMaxThinking(provider, model) ? ["max"] : []),
];
void provider;
void model;
return BASE_THINKING_LEVELS;
}
export function formatThinkingLevels(provider?: string | null, model?: string | null): string {
@@ -125,14 +84,6 @@ export function resolveThinkingDefaultForModel(params: {
model: string;
catalog?: ThinkingCatalogEntry[];
}): string {
const normalizedProvider = normalizeThinkingProviderId(params.provider);
const modelId = params.model.trim();
if (normalizedProvider === "anthropic" && ANTHROPIC_CLAUDE_46_MODEL_RE.test(modelId)) {
return "adaptive";
}
if (normalizedProvider === "amazon-bedrock" && AMAZON_BEDROCK_CLAUDE_46_MODEL_RE.test(modelId)) {
return "adaptive";
}
const candidate = params.catalog?.find(
(entry) => entry.provider === params.provider && entry.id === params.model,
);

View File

@@ -411,6 +411,8 @@ export type GatewaySessionRow = {
systemSent?: boolean;
abortedLastRun?: boolean;
thinkingLevel?: string;
thinkingOptions?: string[];
thinkingDefault?: string;
fastMode?: boolean;
verboseLevel?: string;
reasoningLevel?: string;

View File

@@ -63,8 +63,7 @@ export type SessionsProps = {
onRestoreCheckpoint: (sessionKey: string, checkpointId: string) => void | Promise<void>;
};
const THINK_LEVELS = ["", "off", "minimal", "low", "medium", "high", "xhigh"] as const;
const BINARY_THINK_LEVELS = ["", "off", "on"] as const;
const DEFAULT_THINK_LEVELS = ["off", "minimal", "low", "medium", "high"] as const;
const VERBOSE_LEVELS = [
{ value: "", label: "inherit" },
{ value: "off", label: "off (explicit)" },
@@ -79,23 +78,13 @@ const FAST_LEVELS = [
const REASONING_LEVELS = ["", "off", "on", "stream"] as const;
const PAGE_SIZES = [10, 25, 50, 100] as const;
function normalizeProviderId(provider?: string | null): string {
if (!provider) {
return "";
}
const normalized = normalizeLowercaseStringOrEmpty(provider);
if (normalized === "z.ai" || normalized === "z-ai") {
return "zai";
}
return normalized;
function resolveThinkLevelOptions(row: GatewaySessionRow): readonly string[] {
const options = row.thinkingOptions?.length ? row.thinkingOptions : DEFAULT_THINK_LEVELS;
return ["", ...options];
}
function isBinaryThinkingProvider(provider?: string | null): boolean {
return normalizeProviderId(provider) === "zai";
}
function resolveThinkLevelOptions(provider?: string | null): readonly string[] {
return isBinaryThinkingProvider(provider) ? BINARY_THINK_LEVELS : THINK_LEVELS;
function isBinaryThinkingRow(row: GatewaySessionRow): boolean {
return row.thinkingOptions?.includes("on") === true;
}
function withCurrentOption(options: readonly string[], current: string): string[] {
@@ -453,9 +442,9 @@ export function renderSessions(props: SessionsProps) {
function renderRows(row: GatewaySessionRow, props: SessionsProps) {
const updated = row.updatedAt ? formatRelativeTimestamp(row.updatedAt) : t("common.na");
const rawThinking = row.thinkingLevel ?? "";
const isBinaryThinking = isBinaryThinkingProvider(row.modelProvider);
const isBinaryThinking = isBinaryThinkingRow(row);
const thinking = resolveThinkLevelDisplay(rawThinking, isBinaryThinking);
const thinkLevels = withCurrentOption(resolveThinkLevelOptions(row.modelProvider), thinking);
const thinkLevels = withCurrentOption(resolveThinkLevelOptions(row), thinking);
const fastMode = row.fastMode === true ? "on" : row.fastMode === false ? "off" : "";
const fastLevels = withCurrentLabeledOption(FAST_LEVELS, fastMode);
const verbose = row.verboseLevel ?? "";