From deaa64b080d53bdcc04b6e329edf4709e24cc9ba Mon Sep 17 00:00:00 2001 From: hkfires <10558748+hkfires@users.noreply.github.com> Date: Sat, 20 Sep 2025 13:35:27 +0800 Subject: [PATCH 1/4] feat(gemini-web): Add support for real Nano Banana model --- internal/client/gemini-web/client.go | 37 +++++++++++++++++++++++----- internal/client/gemini-web_client.go | 1 + 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/internal/client/gemini-web/client.go b/internal/client/gemini-web/client.go index 6701fbe3..fbdd4d08 100644 --- a/internal/client/gemini-web/client.go +++ b/internal/client/gemini-web/client.go @@ -33,6 +33,10 @@ type GeminiClient struct { accountLabel string } +var NanoBananaModel = map[string]struct{}{ + "gemini-2.5-flash-image-preview": {}, +} + // NewGeminiClient creates a client. Pass empty strings to auto-detect via browser cookies (not implemented in Go port). func NewGeminiClient(secure1psid string, secure1psidts string, proxy string, opts ...func(*GeminiClient)) *GeminiClient { c := &GeminiClient{ @@ -239,6 +243,14 @@ func (c *GeminiClient) GenerateContent(prompt string, files []string, model Mode } } +func ensureAnyLen(slice []any, index int) []any { + if index < len(slice) { + return slice + } + gap := index + 1 - len(slice) + return append(slice, make([]any, gap)...) +} + func (c *GeminiClient) generateOnce(prompt string, files []string, model Model, gem *Gem, chat *ChatSession) (ModelOutput, error) { var empty ModelOutput // Build f.req @@ -266,6 +278,14 @@ func (c *GeminiClient) generateOnce(prompt string, files []string, model Model, } inner := []any{item0, nil, item2} + requestedModel := strings.ToLower(model.Name) + if chat != nil && chat.RequestedModel() != "" { + requestedModel = chat.RequestedModel() + } + if _, ok := NanoBananaModel[requestedModel]; ok { + inner = ensureAnyLen(inner, 49) + inner[49] = 14 + } if gem != nil { // pad with 16 nils then gem ID for i := 0; i < 16; i++ { @@ -674,16 +694,17 @@ func truncateForLog(s string, n int) string { // StartChat returns a ChatSession attached to the client func (c *GeminiClient) StartChat(model Model, gem *Gem, metadata []string) *ChatSession { - return &ChatSession{client: c, metadata: normalizeMeta(metadata), model: model, gem: gem} + return &ChatSession{client: c, metadata: normalizeMeta(metadata), model: model, gem: gem, requestedModel: strings.ToLower(model.Name)} } // ChatSession holds conversation metadata type ChatSession struct { - client *GeminiClient - metadata []string // cid, rid, rcid - lastOutput *ModelOutput - model Model - gem *Gem + client *GeminiClient + metadata []string // cid, rid, rcid + lastOutput *ModelOutput + model Model + gem *Gem + requestedModel string } func (cs *ChatSession) String() string { @@ -710,6 +731,10 @@ func normalizeMeta(v []string) []string { func (cs *ChatSession) Metadata() []string { return cs.metadata } func (cs *ChatSession) SetMetadata(v []string) { cs.metadata = normalizeMeta(v) } +func (cs *ChatSession) RequestedModel() string { return cs.requestedModel } +func (cs *ChatSession) SetRequestedModel(name string) { + cs.requestedModel = strings.ToLower(name) +} func (cs *ChatSession) CID() string { if len(cs.metadata) > 0 { return cs.metadata[0] diff --git a/internal/client/gemini-web_client.go b/internal/client/gemini-web_client.go index 5c76918a..44f3224b 100644 --- a/internal/client/gemini-web_client.go +++ b/internal/client/gemini-web_client.go @@ -394,6 +394,7 @@ func (c *GeminiWebClient) prepareChat(ctx context.Context, modelName string, raw c.appendUpstreamRequestLog(ctx, modelName, res.tagged, true, res.prompt, len(uploadedFiles), res.reuse, res.metaLen) gem := c.getConfiguredGem() res.chat = c.gwc.StartChat(model, gem, meta) + res.chat.SetRequestedModel(modelName) return res, nil } From 41effa5aebd335c91500933e1d3db7c5dd475c8b Mon Sep 17 00:00:00 2001 From: hkfires <10558748+hkfires@users.noreply.github.com> Date: Sat, 20 Sep 2025 19:34:53 +0800 Subject: [PATCH 2/4] feat(gemini-web): Add support for image generation with Gemini models through the OpenAI chat completions translator. --- .../chat-completions/gemini_openai_request.go | 25 +++++++ .../gemini_openai_response.go | 68 ++++++++++++++++++- 2 files changed, 90 insertions(+), 3 deletions(-) diff --git a/internal/translator/gemini/openai/chat-completions/gemini_openai_request.go b/internal/translator/gemini/openai/chat-completions/gemini_openai_request.go index 6e842ab2..97320333 100644 --- a/internal/translator/gemini/openai/chat-completions/gemini_openai_request.go +++ b/internal/translator/gemini/openai/chat-completions/gemini_openai_request.go @@ -170,6 +170,31 @@ func ConvertOpenAIRequestToGemini(modelName string, inputRawJSON []byte, _ bool) node := []byte(`{"role":"model","parts":[{"text":""}]}`) node, _ = sjson.SetBytes(node, "parts.0.text", content.String()) out, _ = sjson.SetRawBytes(out, "contents.-1", node) + } else if content.IsArray() { + // Assistant multimodal content (e.g. text + image) -> single model content with parts + node := []byte(`{"role":"model","parts":[]}`) + p := 0 + for _, item := range content.Array() { + switch item.Get("type").String() { + case "text": + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".text", item.Get("text").String()) + p++ + case "image_url": + // If the assistant returned an inline data URL, preserve it for history fidelity. + imageURL := item.Get("image_url.url").String() + if len(imageURL) > 5 { // expect data:... + pieces := strings.SplitN(imageURL[5:], ";", 2) + if len(pieces) == 2 && len(pieces[1]) > 7 { + mime := pieces[0] + data := pieces[1][7:] + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".inlineData.mime_type", mime) + node, _ = sjson.SetBytes(node, "parts."+itoa(p)+".inlineData.data", data) + p++ + } + } + } + } + out, _ = sjson.SetRawBytes(out, "contents.-1", node) } else if !content.Exists() || content.Type == gjson.Null { // Tool calls -> single model content with functionCall parts tcs := m.Get("tool_calls") diff --git a/internal/translator/gemini/openai/chat-completions/gemini_openai_response.go b/internal/translator/gemini/openai/chat-completions/gemini_openai_response.go index 420812cb..f7c23b78 100644 --- a/internal/translator/gemini/openai/chat-completions/gemini_openai_response.go +++ b/internal/translator/gemini/openai/chat-completions/gemini_openai_response.go @@ -8,6 +8,7 @@ package chat_completions import ( "bytes" "context" + "encoding/json" "fmt" "time" @@ -99,6 +100,10 @@ func ConvertGeminiResponseToOpenAI(_ context.Context, _ string, originalRequestR partResult := partResults[i] partTextResult := partResult.Get("text") functionCallResult := partResult.Get("functionCall") + inlineDataResult := partResult.Get("inlineData") + if !inlineDataResult.Exists() { + inlineDataResult = partResult.Get("inline_data") + } if partTextResult.Exists() { // Handle text content, distinguishing between regular content and reasoning/thoughts. @@ -124,6 +129,34 @@ func ConvertGeminiResponseToOpenAI(_ context.Context, _ string, originalRequestR } template, _ = sjson.Set(template, "choices.0.delta.role", "assistant") template, _ = sjson.SetRaw(template, "choices.0.delta.tool_calls.-1", functionCallTemplate) + } else if inlineDataResult.Exists() { + data := inlineDataResult.Get("data").String() + if data == "" { + continue + } + mimeType := inlineDataResult.Get("mimeType").String() + if mimeType == "" { + mimeType = inlineDataResult.Get("mime_type").String() + } + if mimeType == "" { + mimeType = "image/png" + } + imageURL := fmt.Sprintf("data:%s;base64,%s", mimeType, data) + imagePayload, err := json.Marshal(map[string]any{ + "type": "image_url", + "image_url": map[string]string{ + "url": imageURL, + }, + }) + if err != nil { + continue + } + imagesResult := gjson.Get(template, "choices.0.delta.images") + if !imagesResult.Exists() || !imagesResult.IsArray() { + template, _ = sjson.SetRaw(template, "choices.0.delta.images", `[]`) + } + template, _ = sjson.Set(template, "choices.0.delta.role", "assistant") + template, _ = sjson.SetRaw(template, "choices.0.delta.images.-1", string(imagePayload)) } } } @@ -193,6 +226,10 @@ func ConvertGeminiResponseToOpenAINonStream(_ context.Context, _ string, origina partResult := partsResults[i] partTextResult := partResult.Get("text") functionCallResult := partResult.Get("functionCall") + inlineDataResult := partResult.Get("inlineData") + if !inlineDataResult.Exists() { + inlineDataResult = partResult.Get("inline_data") + } if partTextResult.Exists() { // Append text content, distinguishing between regular content and reasoning. @@ -217,9 +254,34 @@ func ConvertGeminiResponseToOpenAINonStream(_ context.Context, _ string, origina } template, _ = sjson.Set(template, "choices.0.message.role", "assistant") template, _ = sjson.SetRaw(template, "choices.0.message.tool_calls.-1", functionCallItemTemplate) - } else { - // If no usable content is found, return an empty string. - return "" + } else if inlineDataResult.Exists() { + data := inlineDataResult.Get("data").String() + if data == "" { + continue + } + mimeType := inlineDataResult.Get("mimeType").String() + if mimeType == "" { + mimeType = inlineDataResult.Get("mime_type").String() + } + if mimeType == "" { + mimeType = "image/png" + } + imageURL := fmt.Sprintf("data:%s;base64,%s", mimeType, data) + imagePayload, err := json.Marshal(map[string]any{ + "type": "image_url", + "image_url": map[string]string{ + "url": imageURL, + }, + }) + if err != nil { + continue + } + imagesResult := gjson.Get(template, "choices.0.message.images") + if !imagesResult.Exists() || !imagesResult.IsArray() { + template, _ = sjson.SetRaw(template, "choices.0.message.images", `[]`) + } + template, _ = sjson.Set(template, "choices.0.message.role", "assistant") + template, _ = sjson.SetRaw(template, "choices.0.message.images.-1", string(imagePayload)) } } } From 9253bdbf77c4e9909e2ecf4acde133941ba35877 Mon Sep 17 00:00:00 2001 From: hkfires <10558748+hkfires@users.noreply.github.com> Date: Sat, 20 Sep 2025 15:48:40 +0800 Subject: [PATCH 3/4] feat(provider): Introduce dedicated provider type for Gemini-Web --- internal/client/gemini-web_client.go | 6 +++--- internal/constant/constant.go | 1 + .../openai/chat-completions/init.go | 20 +++++++++++++++++++ .../gemini-web/openai/responses/init.go | 20 +++++++++++++++++++ internal/translator/init.go | 3 +++ 5 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 internal/translator/gemini-web/openai/chat-completions/init.go create mode 100644 internal/translator/gemini-web/openai/responses/init.go diff --git a/internal/client/gemini-web_client.go b/internal/client/gemini-web_client.go index 44f3224b..2a1aa37c 100644 --- a/internal/client/gemini-web_client.go +++ b/internal/client/gemini-web_client.go @@ -207,7 +207,7 @@ func (c *GeminiWebClient) registerModelsOnce() { if c.modelsRegistered { return } - c.RegisterModels(GEMINI, geminiWeb.GetGeminiWebAliasedModels()) + c.RegisterModels(GEMINIWEB, geminiWeb.GetGeminiWebAliasedModels()) c.modelsRegistered = true } @@ -219,8 +219,8 @@ func (c *GeminiWebClient) EnsureRegistered() { } } -func (c *GeminiWebClient) Type() string { return GEMINI } -func (c *GeminiWebClient) Provider() string { return GEMINI } +func (c *GeminiWebClient) Type() string { return GEMINIWEB } +func (c *GeminiWebClient) Provider() string { return GEMINIWEB } func (c *GeminiWebClient) CanProvideModel(modelName string) bool { geminiWeb.EnsureGeminiWebAliasMap() _, ok := geminiWeb.GeminiWebAliasMap[strings.ToLower(modelName)] diff --git a/internal/constant/constant.go b/internal/constant/constant.go index 4e39d93f..bfa7558d 100644 --- a/internal/constant/constant.go +++ b/internal/constant/constant.go @@ -3,6 +3,7 @@ package constant const ( GEMINI = "gemini" GEMINICLI = "gemini-cli" + GEMINIWEB = "gemini-web" CODEX = "codex" CLAUDE = "claude" OPENAI = "openai" diff --git a/internal/translator/gemini-web/openai/chat-completions/init.go b/internal/translator/gemini-web/openai/chat-completions/init.go new file mode 100644 index 00000000..9384bd04 --- /dev/null +++ b/internal/translator/gemini-web/openai/chat-completions/init.go @@ -0,0 +1,20 @@ +package chat_completions + +import ( + . "github.com/luispater/CLIProxyAPI/v5/internal/constant" + "github.com/luispater/CLIProxyAPI/v5/internal/interfaces" + geminiChat "github.com/luispater/CLIProxyAPI/v5/internal/translator/gemini/openai/chat-completions" + "github.com/luispater/CLIProxyAPI/v5/internal/translator/translator" +) + +func init() { + translator.Register( + OPENAI, + GEMINIWEB, + geminiChat.ConvertOpenAIRequestToGemini, + interfaces.TranslateResponse{ + Stream: geminiChat.ConvertGeminiResponseToOpenAI, + NonStream: geminiChat.ConvertGeminiResponseToOpenAINonStream, + }, + ) +} diff --git a/internal/translator/gemini-web/openai/responses/init.go b/internal/translator/gemini-web/openai/responses/init.go new file mode 100644 index 00000000..c7ed6149 --- /dev/null +++ b/internal/translator/gemini-web/openai/responses/init.go @@ -0,0 +1,20 @@ +package responses + +import ( + . "github.com/luispater/CLIProxyAPI/v5/internal/constant" + "github.com/luispater/CLIProxyAPI/v5/internal/interfaces" + geminiResponses "github.com/luispater/CLIProxyAPI/v5/internal/translator/gemini/openai/responses" + "github.com/luispater/CLIProxyAPI/v5/internal/translator/translator" +) + +func init() { + translator.Register( + OPENAI_RESPONSE, + GEMINIWEB, + geminiResponses.ConvertOpenAIResponsesRequestToGemini, + interfaces.TranslateResponse{ + Stream: geminiResponses.ConvertGeminiResponseToOpenAIResponses, + NonStream: geminiResponses.ConvertGeminiResponseToOpenAIResponsesNonStream, + }, + ) +} diff --git a/internal/translator/init.go b/internal/translator/init.go index 4905fc1f..f54db620 100644 --- a/internal/translator/init.go +++ b/internal/translator/init.go @@ -23,6 +23,9 @@ import ( _ "github.com/luispater/CLIProxyAPI/v5/internal/translator/gemini/openai/chat-completions" _ "github.com/luispater/CLIProxyAPI/v5/internal/translator/gemini/openai/responses" + _ "github.com/luispater/CLIProxyAPI/v5/internal/translator/gemini-web/openai/chat-completions" + _ "github.com/luispater/CLIProxyAPI/v5/internal/translator/gemini-web/openai/responses" + _ "github.com/luispater/CLIProxyAPI/v5/internal/translator/openai/claude" _ "github.com/luispater/CLIProxyAPI/v5/internal/translator/openai/gemini" _ "github.com/luispater/CLIProxyAPI/v5/internal/translator/openai/gemini-cli" From e5a6fd2d4f35a624a52fc5021fe0d0294c77f137 Mon Sep 17 00:00:00 2001 From: Luis Pater Date: Sun, 21 Sep 2025 11:16:03 +0800 Subject: [PATCH 4/4] refactor: standardize `dataTag` processing across response translators - Unified `dataTag` initialization by removing spaces after `data:`. - Replaced manual slicing with `bytes.TrimSpace` for consistent and robust handling of JSON payloads. --- internal/client/gemini-cli_client.go | 6 ++--- internal/client/gemini_client.go | 6 ++--- .../client/openai-compatibility_client.go | 25 +++++-------------- internal/client/qwen_client.go | 12 ++++----- .../claude/gemini/claude_gemini_response.go | 6 ++--- .../claude_openai_response.go | 6 ++--- .../claude_openai-responses_response.go | 4 +-- .../codex/claude/codex_claude_response.go | 4 +-- .../codex/gemini/codex_gemini_response.go | 6 ++--- .../chat-completions/codex_openai_response.go | 6 ++--- .../codex_openai-responses_response.go | 8 +++--- 11 files changed, 38 insertions(+), 51 deletions(-) diff --git a/internal/client/gemini-cli_client.go b/internal/client/gemini-cli_client.go index c2b48683..8c923748 100644 --- a/internal/client/gemini-cli_client.go +++ b/internal/client/gemini-cli_client.go @@ -554,7 +554,7 @@ func (c *GeminiCLIClient) SendRawMessageStream(ctx context.Context, modelName st rawJSON, _ = sjson.SetBytes(rawJSON, "project", c.GetProjectID()) rawJSON, _ = sjson.SetBytes(rawJSON, "model", modelName) - dataTag := []byte("data: ") + dataTag := []byte("data:") errChan := make(chan *interfaces.ErrorMessage) dataChan := make(chan []byte) // log.Debugf(string(rawJSON)) @@ -619,7 +619,7 @@ func (c *GeminiCLIClient) SendRawMessageStream(ctx context.Context, modelName st for scanner.Scan() { line := scanner.Bytes() if bytes.HasPrefix(line, dataTag) { - lines := translator.Response(handlerType, c.Type(), newCtx, modelName, originalRequestRawJSON, rawJSON, line[6:], ¶m) + lines := translator.Response(handlerType, c.Type(), newCtx, modelName, originalRequestRawJSON, rawJSON, bytes.TrimSpace(line[5:]), ¶m) for i := 0; i < len(lines); i++ { dataChan <- []byte(lines[i]) } @@ -630,7 +630,7 @@ func (c *GeminiCLIClient) SendRawMessageStream(ctx context.Context, modelName st for scanner.Scan() { line := scanner.Bytes() if bytes.HasPrefix(line, dataTag) { - dataChan <- line[6:] + dataChan <- bytes.TrimSpace(line[5:]) } c.AddAPIResponseData(ctx, line) } diff --git a/internal/client/gemini_client.go b/internal/client/gemini_client.go index 10e43d2a..8ff5de60 100644 --- a/internal/client/gemini_client.go +++ b/internal/client/gemini_client.go @@ -298,7 +298,7 @@ func (c *GeminiClient) SendRawMessageStream(ctx context.Context, modelName strin handlerType := handler.HandlerType() rawJSON = translator.Request(handlerType, c.Type(), modelName, rawJSON, true) - dataTag := []byte("data: ") + dataTag := []byte("data:") errChan := make(chan *interfaces.ErrorMessage) dataChan := make(chan []byte) // log.Debugf(string(rawJSON)) @@ -342,7 +342,7 @@ func (c *GeminiClient) SendRawMessageStream(ctx context.Context, modelName strin for scanner.Scan() { line := scanner.Bytes() if bytes.HasPrefix(line, dataTag) { - lines := translator.Response(handlerType, c.Type(), newCtx, modelName, originalRequestRawJSON, rawJSON, line[6:], ¶m) + lines := translator.Response(handlerType, c.Type(), newCtx, modelName, originalRequestRawJSON, rawJSON, bytes.TrimSpace(line[5:]), ¶m) for i := 0; i < len(lines); i++ { dataChan <- []byte(lines[i]) } @@ -353,7 +353,7 @@ func (c *GeminiClient) SendRawMessageStream(ctx context.Context, modelName strin for scanner.Scan() { line := scanner.Bytes() if bytes.HasPrefix(line, dataTag) { - dataChan <- line[6:] + dataChan <- bytes.TrimSpace(line[5:]) } c.AddAPIResponseData(ctx, line) } diff --git a/internal/client/openai-compatibility_client.go b/internal/client/openai-compatibility_client.go index 990bc610..56139b0c 100644 --- a/internal/client/openai-compatibility_client.go +++ b/internal/client/openai-compatibility_client.go @@ -291,9 +291,8 @@ func (c *OpenAICompatibilityClient) SendRawMessageStream(ctx context.Context, mo handlerType := handler.HandlerType() rawJSON = translator.Request(handlerType, c.Type(), modelName, rawJSON, true) - dataTag := []byte("data: ") - dataUglyTag := []byte("data:") // Some APIs providers don't add space after "data:", fuck for them all - doneTag := []byte("data: [DONE]") + dataTag := []byte("data:") + doneTag := []byte("[DONE]") errChan := make(chan *interfaces.ErrorMessage) dataChan := make(chan []byte) // log.Debugf(string(rawJSON)) @@ -332,19 +331,10 @@ func (c *OpenAICompatibilityClient) SendRawMessageStream(ctx context.Context, mo for scanner.Scan() { line := scanner.Bytes() if bytes.HasPrefix(line, dataTag) { - if bytes.Equal(line, doneTag) { + if bytes.Equal(bytes.TrimSpace(line[5:]), doneTag) { break } - lines := translator.Response(handlerType, c.Type(), newCtx, modelName, originalRequestRawJSON, rawJSON, line[6:], ¶m) - for i := 0; i < len(lines); i++ { - c.AddAPIResponseData(ctx, line) - dataChan <- []byte(lines[i]) - } - } else if bytes.HasPrefix(line, dataUglyTag) { - if bytes.Equal(line, doneTag) { - break - } - lines := translator.Response(handlerType, c.Type(), newCtx, modelName, originalRequestRawJSON, rawJSON, line[5:], ¶m) + lines := translator.Response(handlerType, c.Type(), newCtx, modelName, originalRequestRawJSON, rawJSON, bytes.TrimSpace(line[5:]), ¶m) for i := 0; i < len(lines); i++ { c.AddAPIResponseData(ctx, line) dataChan <- []byte(lines[i]) @@ -356,13 +346,10 @@ func (c *OpenAICompatibilityClient) SendRawMessageStream(ctx context.Context, mo for scanner.Scan() { line := scanner.Bytes() if bytes.HasPrefix(line, dataTag) { - if bytes.Equal(line, doneTag) { + if bytes.Equal(bytes.TrimSpace(line[5:]), doneTag) { break } - c.AddAPIResponseData(newCtx, line[6:]) - dataChan <- line[6:] - } else if bytes.HasPrefix(line, dataUglyTag) { - c.AddAPIResponseData(newCtx, line[5:]) + c.AddAPIResponseData(newCtx, bytes.TrimSpace(line[5:])) dataChan <- line[5:] } } diff --git a/internal/client/qwen_client.go b/internal/client/qwen_client.go index 9eff9a46..ab22977c 100644 --- a/internal/client/qwen_client.go +++ b/internal/client/qwen_client.go @@ -215,8 +215,8 @@ func (c *QwenClient) SendRawMessageStream(ctx context.Context, modelName string, handlerType := handler.HandlerType() rawJSON = translator.Request(handlerType, c.Type(), modelName, rawJSON, true) - dataTag := []byte("data: ") - doneTag := []byte("data: [DONE]") + dataTag := []byte("data:") + doneTag := []byte("[DONE]") errChan := make(chan *interfaces.ErrorMessage) dataChan := make(chan []byte) @@ -264,7 +264,7 @@ func (c *QwenClient) SendRawMessageStream(ctx context.Context, modelName string, for scanner.Scan() { line := scanner.Bytes() if bytes.HasPrefix(line, dataTag) { - lines := translator.Response(handlerType, c.Type(), ctx, modelName, originalRequestRawJSON, rawJSON, line[6:], ¶m) + lines := translator.Response(handlerType, c.Type(), ctx, modelName, originalRequestRawJSON, rawJSON, bytes.TrimSpace(line[5:]), ¶m) for i := 0; i < len(lines); i++ { dataChan <- []byte(lines[i]) } @@ -274,9 +274,9 @@ func (c *QwenClient) SendRawMessageStream(ctx context.Context, modelName string, } else { for scanner.Scan() { line := scanner.Bytes() - if !bytes.HasPrefix(line, doneTag) { - if bytes.HasPrefix(line, dataTag) { - dataChan <- line[6:] + if bytes.HasPrefix(line, dataTag) { + if !bytes.Equal(bytes.TrimSpace(line[5:]), doneTag) { + dataChan <- bytes.TrimSpace(line[5:]) } } c.AddAPIResponseData(ctx, line) diff --git a/internal/translator/claude/gemini/claude_gemini_response.go b/internal/translator/claude/gemini/claude_gemini_response.go index aab4b344..74de0c0b 100644 --- a/internal/translator/claude/gemini/claude_gemini_response.go +++ b/internal/translator/claude/gemini/claude_gemini_response.go @@ -17,7 +17,7 @@ import ( ) var ( - dataTag = []byte("data: ") + dataTag = []byte("data:") ) // ConvertAnthropicResponseToGeminiParams holds parameters for response conversion @@ -64,7 +64,7 @@ func ConvertClaudeResponseToGemini(_ context.Context, modelName string, original if !bytes.HasPrefix(rawJSON, dataTag) { return []string{} } - rawJSON = rawJSON[6:] + rawJSON = bytes.TrimSpace(rawJSON[5:]) root := gjson.ParseBytes(rawJSON) eventType := root.Get("type").String() @@ -336,7 +336,7 @@ func ConvertClaudeResponseToGeminiNonStream(_ context.Context, modelName string, line := scanner.Bytes() // log.Debug(string(line)) if bytes.HasPrefix(line, dataTag) { - jsonData := line[6:] + jsonData := bytes.TrimSpace(line[5:]) streamingEvents = append(streamingEvents, jsonData) } } diff --git a/internal/translator/claude/openai/chat-completions/claude_openai_response.go b/internal/translator/claude/openai/chat-completions/claude_openai_response.go index 7cdbdfd0..0d11aedc 100644 --- a/internal/translator/claude/openai/chat-completions/claude_openai_response.go +++ b/internal/translator/claude/openai/chat-completions/claude_openai_response.go @@ -18,7 +18,7 @@ import ( ) var ( - dataTag = []byte("data: ") + dataTag = []byte("data:") ) // ConvertAnthropicResponseToOpenAIParams holds parameters for response conversion @@ -62,7 +62,7 @@ func ConvertClaudeResponseToOpenAI(_ context.Context, modelName string, original if !bytes.HasPrefix(rawJSON, dataTag) { return []string{} } - rawJSON = rawJSON[6:] + rawJSON = bytes.TrimSpace(rawJSON[5:]) root := gjson.ParseBytes(rawJSON) eventType := root.Get("type").String() @@ -289,7 +289,7 @@ func ConvertClaudeResponseToOpenAINonStream(_ context.Context, _ string, origina if !bytes.HasPrefix(line, dataTag) { continue } - chunks = append(chunks, line[6:]) + chunks = append(chunks, bytes.TrimSpace(rawJSON[5:])) } // Base OpenAI non-streaming response template diff --git a/internal/translator/claude/openai/responses/claude_openai-responses_response.go b/internal/translator/claude/openai/responses/claude_openai-responses_response.go index 8f956e07..f0d0d2a7 100644 --- a/internal/translator/claude/openai/responses/claude_openai-responses_response.go +++ b/internal/translator/claude/openai/responses/claude_openai-responses_response.go @@ -34,7 +34,7 @@ type claudeToResponsesState struct { ReasoningIndex int } -var dataTag = []byte("data: ") +var dataTag = []byte("data:") func emitEvent(event string, payload string) string { return fmt.Sprintf("event: %s\ndata: %s\n\n", event, payload) @@ -51,7 +51,7 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin if !bytes.HasPrefix(rawJSON, dataTag) { return []string{} } - rawJSON = rawJSON[6:] + rawJSON = bytes.TrimSpace(rawJSON[5:]) root := gjson.ParseBytes(rawJSON) ev := root.Get("type").String() var out []string diff --git a/internal/translator/codex/claude/codex_claude_response.go b/internal/translator/codex/claude/codex_claude_response.go index 704568e1..64d4cc67 100644 --- a/internal/translator/codex/claude/codex_claude_response.go +++ b/internal/translator/codex/claude/codex_claude_response.go @@ -16,7 +16,7 @@ import ( ) var ( - dataTag = []byte("data: ") + dataTag = []byte("data:") ) // ConvertCodexResponseToClaude performs sophisticated streaming response format conversion. @@ -45,7 +45,7 @@ func ConvertCodexResponseToClaude(_ context.Context, _ string, originalRequestRa if !bytes.HasPrefix(rawJSON, dataTag) { return []string{} } - rawJSON = rawJSON[6:] + rawJSON = bytes.TrimSpace(rawJSON[5:]) output := "" rootResult := gjson.ParseBytes(rawJSON) diff --git a/internal/translator/codex/gemini/codex_gemini_response.go b/internal/translator/codex/gemini/codex_gemini_response.go index 67559ac2..20d255a4 100644 --- a/internal/translator/codex/gemini/codex_gemini_response.go +++ b/internal/translator/codex/gemini/codex_gemini_response.go @@ -16,7 +16,7 @@ import ( ) var ( - dataTag = []byte("data: ") + dataTag = []byte("data:") ) // ConvertCodexResponseToGeminiParams holds parameters for response conversion. @@ -53,7 +53,7 @@ func ConvertCodexResponseToGemini(_ context.Context, modelName string, originalR if !bytes.HasPrefix(rawJSON, dataTag) { return []string{} } - rawJSON = rawJSON[6:] + rawJSON = bytes.TrimSpace(rawJSON[5:]) rootResult := gjson.ParseBytes(rawJSON) typeResult := rootResult.Get("type") @@ -161,7 +161,7 @@ func ConvertCodexResponseToGeminiNonStream(_ context.Context, modelName string, if !bytes.HasPrefix(line, dataTag) { continue } - rawJSON = line[6:] + rawJSON = bytes.TrimSpace(rawJSON[5:]) rootResult := gjson.ParseBytes(rawJSON) diff --git a/internal/translator/codex/openai/chat-completions/codex_openai_response.go b/internal/translator/codex/openai/chat-completions/codex_openai_response.go index 9a596426..7ecf05be 100644 --- a/internal/translator/codex/openai/chat-completions/codex_openai_response.go +++ b/internal/translator/codex/openai/chat-completions/codex_openai_response.go @@ -16,7 +16,7 @@ import ( ) var ( - dataTag = []byte("data: ") + dataTag = []byte("data:") ) // ConvertCliToOpenAIParams holds parameters for response conversion. @@ -54,7 +54,7 @@ func ConvertCodexResponseToOpenAI(_ context.Context, modelName string, originalR if !bytes.HasPrefix(rawJSON, dataTag) { return []string{} } - rawJSON = rawJSON[6:] + rawJSON = bytes.TrimSpace(rawJSON[5:]) // Initialize the OpenAI SSE template. template := `{"id":"","object":"chat.completion.chunk","created":12345,"model":"model","choices":[{"index":0,"delta":{"role":null,"content":null,"reasoning_content":null,"tool_calls":null},"finish_reason":null,"native_finish_reason":null}]}` @@ -175,7 +175,7 @@ func ConvertCodexResponseToOpenAINonStream(_ context.Context, _ string, original if !bytes.HasPrefix(line, dataTag) { continue } - rawJSON = line[6:] + rawJSON = bytes.TrimSpace(rawJSON[5:]) rootResult := gjson.ParseBytes(rawJSON) // Verify this is a response.completed event diff --git a/internal/translator/codex/openai/responses/codex_openai-responses_response.go b/internal/translator/codex/openai/responses/codex_openai-responses_response.go index 9707e05e..0652ef4b 100644 --- a/internal/translator/codex/openai/responses/codex_openai-responses_response.go +++ b/internal/translator/codex/openai/responses/codex_openai-responses_response.go @@ -13,8 +13,8 @@ import ( // ConvertCodexResponseToOpenAIResponses converts OpenAI Chat Completions streaming chunks // to OpenAI Responses SSE events (response.*). func ConvertCodexResponseToOpenAIResponses(ctx context.Context, modelName string, originalRequestRawJSON, requestRawJSON, rawJSON []byte, param *any) []string { - if bytes.HasPrefix(rawJSON, []byte("data: ")) { - rawJSON = rawJSON[6:] + if bytes.HasPrefix(rawJSON, []byte("data:")) { + rawJSON = bytes.TrimSpace(rawJSON[5:]) if typeResult := gjson.GetBytes(rawJSON, "type"); typeResult.Exists() { typeStr := typeResult.String() if typeStr == "response.created" || typeStr == "response.in_progress" || typeStr == "response.completed" { @@ -32,14 +32,14 @@ func ConvertCodexResponseToOpenAIResponsesNonStream(_ context.Context, modelName scanner := bufio.NewScanner(bytes.NewReader(rawJSON)) buffer := make([]byte, 10240*1024) scanner.Buffer(buffer, 10240*1024) - dataTag := []byte("data: ") + dataTag := []byte("data:") for scanner.Scan() { line := scanner.Bytes() if !bytes.HasPrefix(line, dataTag) { continue } - rawJSON = line[6:] + rawJSON = bytes.TrimSpace(rawJSON[5:]) rootResult := gjson.ParseBytes(rawJSON) // Verify this is a response.completed event