mirror of
https://github.com/router-for-me/CLIProxyAPIPlus.git
synced 2026-04-26 11:05:16 +00:00
feat(models): add Thinking support to GitHub Copilot models
Enhance the model definitions by introducing Thinking support with various levels for each model.
This commit is contained in:
@@ -144,6 +144,7 @@ func GetGitHubCopilotModels() []*ModelInfo {
|
|||||||
ContextLength: 200000,
|
ContextLength: 200000,
|
||||||
MaxCompletionTokens: 32768,
|
MaxCompletionTokens: 32768,
|
||||||
SupportedEndpoints: []string{"/chat/completions", "/responses"},
|
SupportedEndpoints: []string{"/chat/completions", "/responses"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "gpt-5-mini",
|
ID: "gpt-5-mini",
|
||||||
@@ -156,6 +157,7 @@ func GetGitHubCopilotModels() []*ModelInfo {
|
|||||||
ContextLength: 128000,
|
ContextLength: 128000,
|
||||||
MaxCompletionTokens: 16384,
|
MaxCompletionTokens: 16384,
|
||||||
SupportedEndpoints: []string{"/chat/completions", "/responses"},
|
SupportedEndpoints: []string{"/chat/completions", "/responses"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "gpt-5-codex",
|
ID: "gpt-5-codex",
|
||||||
@@ -168,6 +170,7 @@ func GetGitHubCopilotModels() []*ModelInfo {
|
|||||||
ContextLength: 200000,
|
ContextLength: 200000,
|
||||||
MaxCompletionTokens: 32768,
|
MaxCompletionTokens: 32768,
|
||||||
SupportedEndpoints: []string{"/responses"},
|
SupportedEndpoints: []string{"/responses"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"low", "medium", "high"}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "gpt-5.1",
|
ID: "gpt-5.1",
|
||||||
@@ -180,6 +183,7 @@ func GetGitHubCopilotModels() []*ModelInfo {
|
|||||||
ContextLength: 200000,
|
ContextLength: 200000,
|
||||||
MaxCompletionTokens: 32768,
|
MaxCompletionTokens: 32768,
|
||||||
SupportedEndpoints: []string{"/chat/completions", "/responses"},
|
SupportedEndpoints: []string{"/chat/completions", "/responses"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high", "xhigh"}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "gpt-5.1-codex",
|
ID: "gpt-5.1-codex",
|
||||||
@@ -192,6 +196,7 @@ func GetGitHubCopilotModels() []*ModelInfo {
|
|||||||
ContextLength: 200000,
|
ContextLength: 200000,
|
||||||
MaxCompletionTokens: 32768,
|
MaxCompletionTokens: 32768,
|
||||||
SupportedEndpoints: []string{"/responses"},
|
SupportedEndpoints: []string{"/responses"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high", "xhigh"}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "gpt-5.1-codex-mini",
|
ID: "gpt-5.1-codex-mini",
|
||||||
@@ -204,6 +209,7 @@ func GetGitHubCopilotModels() []*ModelInfo {
|
|||||||
ContextLength: 128000,
|
ContextLength: 128000,
|
||||||
MaxCompletionTokens: 16384,
|
MaxCompletionTokens: 16384,
|
||||||
SupportedEndpoints: []string{"/responses"},
|
SupportedEndpoints: []string{"/responses"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high", "xhigh"}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "gpt-5.1-codex-max",
|
ID: "gpt-5.1-codex-max",
|
||||||
@@ -216,6 +222,7 @@ func GetGitHubCopilotModels() []*ModelInfo {
|
|||||||
ContextLength: 200000,
|
ContextLength: 200000,
|
||||||
MaxCompletionTokens: 32768,
|
MaxCompletionTokens: 32768,
|
||||||
SupportedEndpoints: []string{"/responses"},
|
SupportedEndpoints: []string{"/responses"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high", "xhigh"}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "gpt-5.2",
|
ID: "gpt-5.2",
|
||||||
@@ -228,6 +235,7 @@ func GetGitHubCopilotModels() []*ModelInfo {
|
|||||||
ContextLength: 200000,
|
ContextLength: 200000,
|
||||||
MaxCompletionTokens: 32768,
|
MaxCompletionTokens: 32768,
|
||||||
SupportedEndpoints: []string{"/chat/completions", "/responses"},
|
SupportedEndpoints: []string{"/chat/completions", "/responses"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high", "xhigh"}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "gpt-5.2-codex",
|
ID: "gpt-5.2-codex",
|
||||||
@@ -240,6 +248,7 @@ func GetGitHubCopilotModels() []*ModelInfo {
|
|||||||
ContextLength: 200000,
|
ContextLength: 200000,
|
||||||
MaxCompletionTokens: 32768,
|
MaxCompletionTokens: 32768,
|
||||||
SupportedEndpoints: []string{"/responses"},
|
SupportedEndpoints: []string{"/responses"},
|
||||||
|
Thinking: &ThinkingSupport{Levels: []string{"none", "low", "medium", "high", "xhigh"}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "claude-haiku-4.5",
|
ID: "claude-haiku-4.5",
|
||||||
|
|||||||
@@ -123,6 +123,16 @@ func (e *GitHubCopilotExecutor) Execute(ctx context.Context, auth *cliproxyauth.
|
|||||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), false)
|
||||||
body = e.normalizeModel(req.Model, body)
|
body = e.normalizeModel(req.Model, body)
|
||||||
body = flattenAssistantContent(body)
|
body = flattenAssistantContent(body)
|
||||||
|
|
||||||
|
thinkingProvider := "openai"
|
||||||
|
if useResponses {
|
||||||
|
thinkingProvider = "codex"
|
||||||
|
}
|
||||||
|
body, err = thinking.ApplyThinking(body, req.Model, from.String(), thinkingProvider, e.Identifier())
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
requestedModel := payloadRequestedModel(opts, req.Model)
|
requestedModel := payloadRequestedModel(opts, req.Model)
|
||||||
body = applyPayloadConfigWithRoot(e.cfg, req.Model, to.String(), "", body, originalTranslated, requestedModel)
|
body = applyPayloadConfigWithRoot(e.cfg, req.Model, to.String(), "", body, originalTranslated, requestedModel)
|
||||||
body, _ = sjson.SetBytes(body, "stream", false)
|
body, _ = sjson.SetBytes(body, "stream", false)
|
||||||
@@ -229,6 +239,16 @@ func (e *GitHubCopilotExecutor) ExecuteStream(ctx context.Context, auth *cliprox
|
|||||||
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
body := sdktranslator.TranslateRequest(from, to, req.Model, bytes.Clone(req.Payload), true)
|
||||||
body = e.normalizeModel(req.Model, body)
|
body = e.normalizeModel(req.Model, body)
|
||||||
body = flattenAssistantContent(body)
|
body = flattenAssistantContent(body)
|
||||||
|
|
||||||
|
thinkingProvider := "openai"
|
||||||
|
if useResponses {
|
||||||
|
thinkingProvider = "codex"
|
||||||
|
}
|
||||||
|
body, err = thinking.ApplyThinking(body, req.Model, from.String(), thinkingProvider, e.Identifier())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
requestedModel := payloadRequestedModel(opts, req.Model)
|
requestedModel := payloadRequestedModel(opts, req.Model)
|
||||||
body = applyPayloadConfigWithRoot(e.cfg, req.Model, to.String(), "", body, originalTranslated, requestedModel)
|
body = applyPayloadConfigWithRoot(e.cfg, req.Model, to.String(), "", body, originalTranslated, requestedModel)
|
||||||
body, _ = sjson.SetBytes(body, "stream", true)
|
body, _ = sjson.SetBytes(body, "stream", true)
|
||||||
|
|||||||
Reference in New Issue
Block a user