fix(translator): handle Claude thinking type "auto" like adaptive

This commit is contained in:
hkfires
2026-03-01 10:30:19 +08:00
parent 1ae994b4aa
commit b148820c35
5 changed files with 10 additions and 16 deletions

View File

@@ -440,14 +440,8 @@ func ConvertClaudeRequestToAntigravity(modelName string, inputRawJSON []byte, _
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.thinkingBudget", budget)
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.includeThoughts", true)
}
case "auto":
// Amp sends thinking.type="auto" — use max budget from model config
// Antigravity API for Claude models requires a concrete positive budget,
// not -1. Use a high default that ApplyThinking will cap to model max.
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.thinkingBudget", 64000)
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.includeThoughts", true)
case "adaptive":
// Keep adaptive as a high level sentinel; ApplyThinking resolves it
case "adaptive", "auto":
// Keep adaptive/auto as a high level sentinel; ApplyThinking resolves it
// to model-specific max capability.
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.thinkingLevel", "high")
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.includeThoughts", true)

View File

@@ -230,8 +230,8 @@ func ConvertClaudeRequestToCodex(modelName string, inputRawJSON []byte, _ bool)
reasoningEffort = effort
}
}
case "adaptive":
// Claude adaptive means "enable with max capacity"; keep it as highest level
case "adaptive", "auto":
// Claude adaptive/auto means "enable with max capacity"; keep it as highest level
// and let ApplyThinking normalize per target model capability.
reasoningEffort = string(thinking.LevelXHigh)
case "disabled":

View File

@@ -180,8 +180,8 @@ func ConvertClaudeRequestToCLI(modelName string, inputRawJSON []byte, _ bool) []
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.thinkingBudget", budget)
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.includeThoughts", true)
}
case "adaptive":
// Keep adaptive as a high level sentinel; ApplyThinking resolves it
case "adaptive", "auto":
// Keep adaptive/auto as a high level sentinel; ApplyThinking resolves it
// to model-specific max capability.
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.thinkingLevel", "high")
out, _ = sjson.Set(out, "request.generationConfig.thinkingConfig.includeThoughts", true)

View File

@@ -161,8 +161,8 @@ func ConvertClaudeRequestToGemini(modelName string, inputRawJSON []byte, _ bool)
out, _ = sjson.Set(out, "generationConfig.thinkingConfig.thinkingBudget", budget)
out, _ = sjson.Set(out, "generationConfig.thinkingConfig.includeThoughts", true)
}
case "adaptive":
// Keep adaptive as a high level sentinel; ApplyThinking resolves it
case "adaptive", "auto":
// Keep adaptive/auto as a high level sentinel; ApplyThinking resolves it
// to model-specific max capability.
out, _ = sjson.Set(out, "generationConfig.thinkingConfig.thinkingLevel", "high")
out, _ = sjson.Set(out, "generationConfig.thinkingConfig.includeThoughts", true)

View File

@@ -75,8 +75,8 @@ func ConvertClaudeRequestToOpenAI(modelName string, inputRawJSON []byte, stream
out, _ = sjson.Set(out, "reasoning_effort", effort)
}
}
case "adaptive":
// Claude adaptive means "enable with max capacity"; keep it as highest level
case "adaptive", "auto":
// Claude adaptive/auto means "enable with max capacity"; keep it as highest level
// and let ApplyThinking normalize per target model capability.
out, _ = sjson.Set(out, "reasoning_effort", string(thinking.LevelXHigh))
case "disabled":