mirror of
https://github.com/router-for-me/CLIProxyAPIPlus.git
synced 2026-04-24 06:40:31 +00:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
293cc8c1a3 | ||
|
|
453e744abf | ||
|
|
653439698e | ||
|
|
5418bbc338 | ||
|
|
89254cfc97 | ||
|
|
6bd9a034f7 | ||
|
|
26fc65b051 | ||
|
|
ed5ec5b55c | ||
|
|
df777650ac | ||
|
|
8fac6b147a | ||
|
|
10f8c795ac | ||
|
|
3e4858a624 | ||
|
|
1231dc9cda | ||
|
|
c84ff42bcd | ||
|
|
1b8cb7b77b |
@@ -434,7 +434,7 @@ func main() {
|
|||||||
usage.SetStatisticsEnabled(cfg.UsageStatisticsEnabled)
|
usage.SetStatisticsEnabled(cfg.UsageStatisticsEnabled)
|
||||||
coreauth.SetQuotaCooldownDisabled(cfg.DisableCooling)
|
coreauth.SetQuotaCooldownDisabled(cfg.DisableCooling)
|
||||||
|
|
||||||
if err = logging.ConfigureLogOutput(cfg.LoggingToFile); err != nil {
|
if err = logging.ConfigureLogOutput(cfg.LoggingToFile, cfg.LogsMaxTotalSizeMB); err != nil {
|
||||||
log.Errorf("failed to configure log output: %v", err)
|
log.Errorf("failed to configure log output: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,6 +47,10 @@ incognito-browser: true
|
|||||||
# When true, write application logs to rotating files instead of stdout
|
# When true, write application logs to rotating files instead of stdout
|
||||||
logging-to-file: false
|
logging-to-file: false
|
||||||
|
|
||||||
|
# Maximum total size (MB) of log files under the logs directory. When exceeded, the oldest log
|
||||||
|
# files are deleted until within the limit. Set to 0 to disable.
|
||||||
|
logs-max-total-size-mb: 0
|
||||||
|
|
||||||
# When false, disable in-memory usage statistics aggregation
|
# When false, disable in-memory usage statistics aggregation
|
||||||
usage-statistics-enabled: false
|
usage-statistics-enabled: false
|
||||||
|
|
||||||
|
|||||||
@@ -134,7 +134,43 @@ func (fh *FallbackHandler) WrapHandler(handler gin.HandlerFunc) gin.HandlerFunc
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Normalize model (handles dynamic thinking suffixes)
|
// Normalize model (handles dynamic thinking suffixes)
|
||||||
normalizedModel, _ := util.NormalizeThinkingModel(modelName)
|
normalizedModel, thinkingMetadata := util.NormalizeThinkingModel(modelName)
|
||||||
|
thinkingSuffix := ""
|
||||||
|
if thinkingMetadata != nil && strings.HasPrefix(modelName, normalizedModel) {
|
||||||
|
thinkingSuffix = modelName[len(normalizedModel):]
|
||||||
|
}
|
||||||
|
|
||||||
|
resolveMappedModel := func() (string, []string) {
|
||||||
|
if fh.modelMapper == nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mappedModel := fh.modelMapper.MapModel(modelName)
|
||||||
|
if mappedModel == "" {
|
||||||
|
mappedModel = fh.modelMapper.MapModel(normalizedModel)
|
||||||
|
}
|
||||||
|
mappedModel = strings.TrimSpace(mappedModel)
|
||||||
|
if mappedModel == "" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Preserve dynamic thinking suffix (e.g. "(xhigh)") when mapping applies, unless the target
|
||||||
|
// already specifies its own thinking suffix.
|
||||||
|
if thinkingSuffix != "" {
|
||||||
|
_, mappedThinkingMetadata := util.NormalizeThinkingModel(mappedModel)
|
||||||
|
if mappedThinkingMetadata == nil {
|
||||||
|
mappedModel += thinkingSuffix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mappedBaseModel, _ := util.NormalizeThinkingModel(mappedModel)
|
||||||
|
mappedProviders := util.GetProviderName(mappedBaseModel)
|
||||||
|
if len(mappedProviders) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return mappedModel, mappedProviders
|
||||||
|
}
|
||||||
|
|
||||||
// Track resolved model for logging (may change if mapping is applied)
|
// Track resolved model for logging (may change if mapping is applied)
|
||||||
resolvedModel := normalizedModel
|
resolvedModel := normalizedModel
|
||||||
@@ -147,21 +183,15 @@ func (fh *FallbackHandler) WrapHandler(handler gin.HandlerFunc) gin.HandlerFunc
|
|||||||
if forceMappings {
|
if forceMappings {
|
||||||
// FORCE MODE: Check model mappings FIRST (takes precedence over local API keys)
|
// FORCE MODE: Check model mappings FIRST (takes precedence over local API keys)
|
||||||
// This allows users to route Amp requests to their preferred OAuth providers
|
// This allows users to route Amp requests to their preferred OAuth providers
|
||||||
if fh.modelMapper != nil {
|
if mappedModel, mappedProviders := resolveMappedModel(); mappedModel != "" {
|
||||||
if mappedModel := fh.modelMapper.MapModel(normalizedModel); mappedModel != "" {
|
// Mapping found and provider available - rewrite the model in request body
|
||||||
// Mapping found - check if we have a provider for the mapped model
|
bodyBytes = rewriteModelInRequest(bodyBytes, mappedModel)
|
||||||
mappedProviders := util.GetProviderName(mappedModel)
|
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
if len(mappedProviders) > 0 {
|
// Store mapped model in context for handlers that check it (like gemini bridge)
|
||||||
// Mapping found and provider available - rewrite the model in request body
|
c.Set(MappedModelContextKey, mappedModel)
|
||||||
bodyBytes = rewriteModelInRequest(bodyBytes, mappedModel)
|
resolvedModel = mappedModel
|
||||||
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
usedMapping = true
|
||||||
// Store mapped model in context for handlers that check it (like gemini bridge)
|
providers = mappedProviders
|
||||||
c.Set(MappedModelContextKey, mappedModel)
|
|
||||||
resolvedModel = mappedModel
|
|
||||||
usedMapping = true
|
|
||||||
providers = mappedProviders
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no mapping applied, check for local providers
|
// If no mapping applied, check for local providers
|
||||||
@@ -174,21 +204,15 @@ func (fh *FallbackHandler) WrapHandler(handler gin.HandlerFunc) gin.HandlerFunc
|
|||||||
|
|
||||||
if len(providers) == 0 {
|
if len(providers) == 0 {
|
||||||
// No providers configured - check if we have a model mapping
|
// No providers configured - check if we have a model mapping
|
||||||
if fh.modelMapper != nil {
|
if mappedModel, mappedProviders := resolveMappedModel(); mappedModel != "" {
|
||||||
if mappedModel := fh.modelMapper.MapModel(normalizedModel); mappedModel != "" {
|
// Mapping found and provider available - rewrite the model in request body
|
||||||
// Mapping found - check if we have a provider for the mapped model
|
bodyBytes = rewriteModelInRequest(bodyBytes, mappedModel)
|
||||||
mappedProviders := util.GetProviderName(mappedModel)
|
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
if len(mappedProviders) > 0 {
|
// Store mapped model in context for handlers that check it (like gemini bridge)
|
||||||
// Mapping found and provider available - rewrite the model in request body
|
c.Set(MappedModelContextKey, mappedModel)
|
||||||
bodyBytes = rewriteModelInRequest(bodyBytes, mappedModel)
|
resolvedModel = mappedModel
|
||||||
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
usedMapping = true
|
||||||
// Store mapped model in context for handlers that check it (like gemini bridge)
|
providers = mappedProviders
|
||||||
c.Set(MappedModelContextKey, mappedModel)
|
|
||||||
resolvedModel = mappedModel
|
|
||||||
usedMapping = true
|
|
||||||
providers = mappedProviders
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -222,14 +246,14 @@ func (fh *FallbackHandler) WrapHandler(handler gin.HandlerFunc) gin.HandlerFunc
|
|||||||
// Log: Model was mapped to another model
|
// Log: Model was mapped to another model
|
||||||
log.Debugf("amp model mapping: request %s -> %s", normalizedModel, resolvedModel)
|
log.Debugf("amp model mapping: request %s -> %s", normalizedModel, resolvedModel)
|
||||||
logAmpRouting(RouteTypeModelMapping, modelName, resolvedModel, providerName, requestPath)
|
logAmpRouting(RouteTypeModelMapping, modelName, resolvedModel, providerName, requestPath)
|
||||||
rewriter := NewResponseRewriter(c.Writer, normalizedModel)
|
rewriter := NewResponseRewriter(c.Writer, modelName)
|
||||||
c.Writer = rewriter
|
c.Writer = rewriter
|
||||||
// Filter Anthropic-Beta header only for local handling paths
|
// Filter Anthropic-Beta header only for local handling paths
|
||||||
filterAntropicBetaHeader(c)
|
filterAntropicBetaHeader(c)
|
||||||
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
handler(c)
|
handler(c)
|
||||||
rewriter.Flush()
|
rewriter.Flush()
|
||||||
log.Debugf("amp model mapping: response %s -> %s", resolvedModel, normalizedModel)
|
log.Debugf("amp model mapping: response %s -> %s", resolvedModel, modelName)
|
||||||
} else if len(providers) > 0 {
|
} else if len(providers) > 0 {
|
||||||
// Log: Using local provider (free)
|
// Log: Using local provider (free)
|
||||||
logAmpRouting(RouteTypeLocalProvider, modelName, resolvedModel, providerName, requestPath)
|
logAmpRouting(RouteTypeLocalProvider, modelName, resolvedModel, providerName, requestPath)
|
||||||
|
|||||||
73
internal/api/modules/amp/fallback_handlers_test.go
Normal file
73
internal/api/modules/amp/fallback_handlers_test.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package amp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/http/httputil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/config"
|
||||||
|
"github.com/router-for-me/CLIProxyAPI/v6/internal/registry"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFallbackHandler_ModelMapping_PreservesThinkingSuffixAndRewritesResponse(t *testing.T) {
|
||||||
|
gin.SetMode(gin.TestMode)
|
||||||
|
|
||||||
|
reg := registry.GetGlobalRegistry()
|
||||||
|
reg.RegisterClient("test-client-amp-fallback", "codex", []*registry.ModelInfo{
|
||||||
|
{ID: "test/gpt-5.2", OwnedBy: "openai", Type: "codex"},
|
||||||
|
})
|
||||||
|
defer reg.UnregisterClient("test-client-amp-fallback")
|
||||||
|
|
||||||
|
mapper := NewModelMapper([]config.AmpModelMapping{
|
||||||
|
{From: "gpt-5.2", To: "test/gpt-5.2"},
|
||||||
|
})
|
||||||
|
|
||||||
|
fallback := NewFallbackHandlerWithMapper(func() *httputil.ReverseProxy { return nil }, mapper, nil)
|
||||||
|
|
||||||
|
handler := func(c *gin.Context) {
|
||||||
|
var req struct {
|
||||||
|
Model string `json:"model"`
|
||||||
|
}
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"model": req.Model,
|
||||||
|
"seen_model": req.Model,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
r := gin.New()
|
||||||
|
r.POST("/chat/completions", fallback.WrapHandler(handler))
|
||||||
|
|
||||||
|
reqBody := []byte(`{"model":"gpt-5.2(xhigh)"}`)
|
||||||
|
req := httptest.NewRequest(http.MethodPost, "/chat/completions", bytes.NewReader(reqBody))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r.ServeHTTP(w, req)
|
||||||
|
|
||||||
|
if w.Code != http.StatusOK {
|
||||||
|
t.Fatalf("Expected status 200, got %d", w.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp struct {
|
||||||
|
Model string `json:"model"`
|
||||||
|
SeenModel string `json:"seen_model"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||||
|
t.Fatalf("Failed to parse response JSON: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.Model != "gpt-5.2(xhigh)" {
|
||||||
|
t.Errorf("Expected response model gpt-5.2(xhigh), got %s", resp.Model)
|
||||||
|
}
|
||||||
|
if resp.SeenModel != "test/gpt-5.2(xhigh)" {
|
||||||
|
t.Errorf("Expected handler to see test/gpt-5.2(xhigh), got %s", resp.SeenModel)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -59,7 +59,8 @@ func (m *DefaultModelMapper) MapModel(requestedModel string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify target model has available providers
|
// Verify target model has available providers
|
||||||
providers := util.GetProviderName(targetModel)
|
normalizedTarget, _ := util.NormalizeThinkingModel(targetModel)
|
||||||
|
providers := util.GetProviderName(normalizedTarget)
|
||||||
if len(providers) == 0 {
|
if len(providers) == 0 {
|
||||||
log.Debugf("amp model mapping: target model %s has no available providers, skipping mapping", targetModel)
|
log.Debugf("amp model mapping: target model %s has no available providers, skipping mapping", targetModel)
|
||||||
return ""
|
return ""
|
||||||
|
|||||||
@@ -71,6 +71,25 @@ func TestModelMapper_MapModel_WithProvider(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestModelMapper_MapModel_TargetWithThinkingSuffix(t *testing.T) {
|
||||||
|
reg := registry.GetGlobalRegistry()
|
||||||
|
reg.RegisterClient("test-client-thinking", "codex", []*registry.ModelInfo{
|
||||||
|
{ID: "gpt-5.2", OwnedBy: "openai", Type: "codex"},
|
||||||
|
})
|
||||||
|
defer reg.UnregisterClient("test-client-thinking")
|
||||||
|
|
||||||
|
mappings := []config.AmpModelMapping{
|
||||||
|
{From: "gpt-5.2-alias", To: "gpt-5.2(xhigh)"},
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper := NewModelMapper(mappings)
|
||||||
|
|
||||||
|
result := mapper.MapModel("gpt-5.2-alias")
|
||||||
|
if result != "gpt-5.2(xhigh)" {
|
||||||
|
t.Errorf("Expected gpt-5.2(xhigh), got %s", result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestModelMapper_MapModel_CaseInsensitive(t *testing.T) {
|
func TestModelMapper_MapModel_CaseInsensitive(t *testing.T) {
|
||||||
reg := registry.GetGlobalRegistry()
|
reg := registry.GetGlobalRegistry()
|
||||||
reg.RegisterClient("test-client2", "claude", []*registry.ModelInfo{
|
reg.RegisterClient("test-client2", "claude", []*registry.ModelInfo{
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ func (m *AmpModule) registerManagementRoutes(engine *gin.Engine, baseHandler *ha
|
|||||||
var authWithBypass gin.HandlerFunc
|
var authWithBypass gin.HandlerFunc
|
||||||
if auth != nil {
|
if auth != nil {
|
||||||
ampAPI.Use(auth)
|
ampAPI.Use(auth)
|
||||||
authWithBypass = wrapManagementAuth(auth, "/threads", "/auth")
|
authWithBypass = wrapManagementAuth(auth, "/threads", "/auth", "/docs")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dynamic proxy handler that uses m.getProxy() for hot-reload support
|
// Dynamic proxy handler that uses m.getProxy() for hot-reload support
|
||||||
@@ -175,7 +175,11 @@ func (m *AmpModule) registerManagementRoutes(engine *gin.Engine, baseHandler *ha
|
|||||||
if authWithBypass != nil {
|
if authWithBypass != nil {
|
||||||
rootMiddleware = append(rootMiddleware, authWithBypass)
|
rootMiddleware = append(rootMiddleware, authWithBypass)
|
||||||
}
|
}
|
||||||
|
engine.GET("/threads", append(rootMiddleware, proxyHandler)...)
|
||||||
engine.GET("/threads/*path", append(rootMiddleware, proxyHandler)...)
|
engine.GET("/threads/*path", append(rootMiddleware, proxyHandler)...)
|
||||||
|
engine.GET("/docs", append(rootMiddleware, proxyHandler)...)
|
||||||
|
engine.GET("/docs/*path", append(rootMiddleware, proxyHandler)...)
|
||||||
|
|
||||||
engine.GET("/threads.rss", append(rootMiddleware, proxyHandler)...)
|
engine.GET("/threads.rss", append(rootMiddleware, proxyHandler)...)
|
||||||
engine.GET("/news.rss", append(rootMiddleware, proxyHandler)...)
|
engine.GET("/news.rss", append(rootMiddleware, proxyHandler)...)
|
||||||
|
|
||||||
|
|||||||
@@ -865,11 +865,20 @@ func (s *Server) UpdateClients(cfg *config.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if oldCfg != nil && oldCfg.LoggingToFile != cfg.LoggingToFile {
|
if oldCfg == nil || oldCfg.LoggingToFile != cfg.LoggingToFile || oldCfg.LogsMaxTotalSizeMB != cfg.LogsMaxTotalSizeMB {
|
||||||
if err := logging.ConfigureLogOutput(cfg.LoggingToFile); err != nil {
|
if err := logging.ConfigureLogOutput(cfg.LoggingToFile, cfg.LogsMaxTotalSizeMB); err != nil {
|
||||||
log.Errorf("failed to reconfigure log output: %v", err)
|
log.Errorf("failed to reconfigure log output: %v", err)
|
||||||
} else {
|
} else {
|
||||||
log.Debugf("logging_to_file updated from %t to %t", oldCfg.LoggingToFile, cfg.LoggingToFile)
|
if oldCfg == nil {
|
||||||
|
log.Debug("log output configuration refreshed")
|
||||||
|
} else {
|
||||||
|
if oldCfg.LoggingToFile != cfg.LoggingToFile {
|
||||||
|
log.Debugf("logging_to_file updated from %t to %t", oldCfg.LoggingToFile, cfg.LoggingToFile)
|
||||||
|
}
|
||||||
|
if oldCfg.LogsMaxTotalSizeMB != cfg.LogsMaxTotalSizeMB {
|
||||||
|
log.Debugf("logs_max_total_size_mb updated from %d to %d", oldCfg.LogsMaxTotalSizeMB, cfg.LogsMaxTotalSizeMB)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -42,6 +42,10 @@ type Config struct {
|
|||||||
// LoggingToFile controls whether application logs are written to rotating files or stdout.
|
// LoggingToFile controls whether application logs are written to rotating files or stdout.
|
||||||
LoggingToFile bool `yaml:"logging-to-file" json:"logging-to-file"`
|
LoggingToFile bool `yaml:"logging-to-file" json:"logging-to-file"`
|
||||||
|
|
||||||
|
// LogsMaxTotalSizeMB limits the total size (in MB) of log files under the logs directory.
|
||||||
|
// When exceeded, the oldest log files are deleted until within the limit. Set to 0 to disable.
|
||||||
|
LogsMaxTotalSizeMB int `yaml:"logs-max-total-size-mb" json:"logs-max-total-size-mb"`
|
||||||
|
|
||||||
// UsageStatisticsEnabled toggles in-memory usage aggregation; when false, usage data is discarded.
|
// UsageStatisticsEnabled toggles in-memory usage aggregation; when false, usage data is discarded.
|
||||||
UsageStatisticsEnabled bool `yaml:"usage-statistics-enabled" json:"usage-statistics-enabled"`
|
UsageStatisticsEnabled bool `yaml:"usage-statistics-enabled" json:"usage-statistics-enabled"`
|
||||||
|
|
||||||
@@ -382,6 +386,7 @@ func LoadConfigOptional(configFile string, optional bool) (*Config, error) {
|
|||||||
// Set defaults before unmarshal so that absent keys keep defaults.
|
// Set defaults before unmarshal so that absent keys keep defaults.
|
||||||
cfg.Host = "" // Default empty: binds to all interfaces (IPv4 + IPv6)
|
cfg.Host = "" // Default empty: binds to all interfaces (IPv4 + IPv6)
|
||||||
cfg.LoggingToFile = false
|
cfg.LoggingToFile = false
|
||||||
|
cfg.LogsMaxTotalSizeMB = 0
|
||||||
cfg.UsageStatisticsEnabled = false
|
cfg.UsageStatisticsEnabled = false
|
||||||
cfg.DisableCooling = false
|
cfg.DisableCooling = false
|
||||||
cfg.AmpCode.RestrictManagementToLocalhost = false // Default to false: API key auth is sufficient
|
cfg.AmpCode.RestrictManagementToLocalhost = false // Default to false: API key auth is sufficient
|
||||||
@@ -427,6 +432,10 @@ func LoadConfigOptional(configFile string, optional bool) (*Config, error) {
|
|||||||
cfg.RemoteManagement.PanelGitHubRepository = DefaultPanelGitHubRepository
|
cfg.RemoteManagement.PanelGitHubRepository = DefaultPanelGitHubRepository
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.LogsMaxTotalSizeMB < 0 {
|
||||||
|
cfg.LogsMaxTotalSizeMB = 0
|
||||||
|
}
|
||||||
|
|
||||||
// Sync request authentication providers with inline API keys for backwards compatibility.
|
// Sync request authentication providers with inline API keys for backwards compatibility.
|
||||||
syncInlineAccessProvider(&cfg)
|
syncInlineAccessProvider(&cfg)
|
||||||
|
|
||||||
|
|||||||
@@ -76,39 +76,45 @@ func SetupBaseLogger() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ConfigureLogOutput switches the global log destination between rotating files and stdout.
|
// ConfigureLogOutput switches the global log destination between rotating files and stdout.
|
||||||
func ConfigureLogOutput(loggingToFile bool) error {
|
// When logsMaxTotalSizeMB > 0, a background cleaner removes the oldest log files in the logs directory
|
||||||
|
// until the total size is within the limit.
|
||||||
|
func ConfigureLogOutput(loggingToFile bool, logsMaxTotalSizeMB int) error {
|
||||||
SetupBaseLogger()
|
SetupBaseLogger()
|
||||||
|
|
||||||
writerMu.Lock()
|
writerMu.Lock()
|
||||||
defer writerMu.Unlock()
|
defer writerMu.Unlock()
|
||||||
|
|
||||||
|
logDir := "logs"
|
||||||
|
if base := util.WritablePath(); base != "" {
|
||||||
|
logDir = filepath.Join(base, "logs")
|
||||||
|
}
|
||||||
|
|
||||||
|
protectedPath := ""
|
||||||
if loggingToFile {
|
if loggingToFile {
|
||||||
logDir := "logs"
|
|
||||||
if base := util.WritablePath(); base != "" {
|
|
||||||
logDir = filepath.Join(base, "logs")
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
||||||
return fmt.Errorf("logging: failed to create log directory: %w", err)
|
return fmt.Errorf("logging: failed to create log directory: %w", err)
|
||||||
}
|
}
|
||||||
if logWriter != nil {
|
if logWriter != nil {
|
||||||
_ = logWriter.Close()
|
_ = logWriter.Close()
|
||||||
}
|
}
|
||||||
|
protectedPath = filepath.Join(logDir, "main.log")
|
||||||
logWriter = &lumberjack.Logger{
|
logWriter = &lumberjack.Logger{
|
||||||
Filename: filepath.Join(logDir, "main.log"),
|
Filename: protectedPath,
|
||||||
MaxSize: 10,
|
MaxSize: 10,
|
||||||
MaxBackups: 0,
|
MaxBackups: 0,
|
||||||
MaxAge: 0,
|
MaxAge: 0,
|
||||||
Compress: false,
|
Compress: false,
|
||||||
}
|
}
|
||||||
log.SetOutput(logWriter)
|
log.SetOutput(logWriter)
|
||||||
return nil
|
} else {
|
||||||
|
if logWriter != nil {
|
||||||
|
_ = logWriter.Close()
|
||||||
|
logWriter = nil
|
||||||
|
}
|
||||||
|
log.SetOutput(os.Stdout)
|
||||||
}
|
}
|
||||||
|
|
||||||
if logWriter != nil {
|
configureLogDirCleanerLocked(logDir, logsMaxTotalSizeMB, protectedPath)
|
||||||
_ = logWriter.Close()
|
|
||||||
logWriter = nil
|
|
||||||
}
|
|
||||||
log.SetOutput(os.Stdout)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -116,6 +122,8 @@ func closeLogOutputs() {
|
|||||||
writerMu.Lock()
|
writerMu.Lock()
|
||||||
defer writerMu.Unlock()
|
defer writerMu.Unlock()
|
||||||
|
|
||||||
|
stopLogDirCleanerLocked()
|
||||||
|
|
||||||
if logWriter != nil {
|
if logWriter != nil {
|
||||||
_ = logWriter.Close()
|
_ = logWriter.Close()
|
||||||
logWriter = nil
|
logWriter = nil
|
||||||
|
|||||||
166
internal/logging/log_dir_cleaner.go
Normal file
166
internal/logging/log_dir_cleaner.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
package logging
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const logDirCleanerInterval = time.Minute
|
||||||
|
|
||||||
|
var logDirCleanerCancel context.CancelFunc
|
||||||
|
|
||||||
|
func configureLogDirCleanerLocked(logDir string, maxTotalSizeMB int, protectedPath string) {
|
||||||
|
stopLogDirCleanerLocked()
|
||||||
|
|
||||||
|
if maxTotalSizeMB <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
maxBytes := int64(maxTotalSizeMB) * 1024 * 1024
|
||||||
|
if maxBytes <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := strings.TrimSpace(logDir)
|
||||||
|
if dir == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
logDirCleanerCancel = cancel
|
||||||
|
go runLogDirCleaner(ctx, filepath.Clean(dir), maxBytes, strings.TrimSpace(protectedPath))
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopLogDirCleanerLocked() {
|
||||||
|
if logDirCleanerCancel == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logDirCleanerCancel()
|
||||||
|
logDirCleanerCancel = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runLogDirCleaner(ctx context.Context, logDir string, maxBytes int64, protectedPath string) {
|
||||||
|
ticker := time.NewTicker(logDirCleanerInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
cleanOnce := func() {
|
||||||
|
deleted, errClean := enforceLogDirSizeLimit(logDir, maxBytes, protectedPath)
|
||||||
|
if errClean != nil {
|
||||||
|
log.WithError(errClean).Warn("logging: failed to enforce log directory size limit")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if deleted > 0 {
|
||||||
|
log.Debugf("logging: removed %d old log file(s) to enforce log directory size limit", deleted)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanOnce()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
cleanOnce()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func enforceLogDirSizeLimit(logDir string, maxBytes int64, protectedPath string) (int, error) {
|
||||||
|
if maxBytes <= 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := strings.TrimSpace(logDir)
|
||||||
|
if dir == "" {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
dir = filepath.Clean(dir)
|
||||||
|
|
||||||
|
entries, errRead := os.ReadDir(dir)
|
||||||
|
if errRead != nil {
|
||||||
|
if os.IsNotExist(errRead) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
return 0, errRead
|
||||||
|
}
|
||||||
|
|
||||||
|
protected := strings.TrimSpace(protectedPath)
|
||||||
|
if protected != "" {
|
||||||
|
protected = filepath.Clean(protected)
|
||||||
|
}
|
||||||
|
|
||||||
|
type logFile struct {
|
||||||
|
path string
|
||||||
|
size int64
|
||||||
|
modTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
files []logFile
|
||||||
|
total int64
|
||||||
|
)
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := entry.Name()
|
||||||
|
if !isLogFileName(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
info, errInfo := entry.Info()
|
||||||
|
if errInfo != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !info.Mode().IsRegular() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
path := filepath.Join(dir, name)
|
||||||
|
files = append(files, logFile{
|
||||||
|
path: path,
|
||||||
|
size: info.Size(),
|
||||||
|
modTime: info.ModTime(),
|
||||||
|
})
|
||||||
|
total += info.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
if total <= maxBytes {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(files, func(i, j int) bool {
|
||||||
|
return files[i].modTime.Before(files[j].modTime)
|
||||||
|
})
|
||||||
|
|
||||||
|
deleted := 0
|
||||||
|
for _, file := range files {
|
||||||
|
if total <= maxBytes {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if protected != "" && filepath.Clean(file.path) == protected {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if errRemove := os.Remove(file.path); errRemove != nil {
|
||||||
|
log.WithError(errRemove).Warnf("logging: failed to remove old log file: %s", filepath.Base(file.path))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
total -= file.size
|
||||||
|
deleted++
|
||||||
|
}
|
||||||
|
|
||||||
|
return deleted, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isLogFileName(name string) bool {
|
||||||
|
trimmed := strings.TrimSpace(name)
|
||||||
|
if trimmed == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
lower := strings.ToLower(trimmed)
|
||||||
|
return strings.HasSuffix(lower, ".log") || strings.HasSuffix(lower, ".log.gz")
|
||||||
|
}
|
||||||
70
internal/logging/log_dir_cleaner_test.go
Normal file
70
internal/logging/log_dir_cleaner_test.go
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
package logging
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEnforceLogDirSizeLimitDeletesOldest(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
writeLogFile(t, filepath.Join(dir, "old.log"), 60, time.Unix(1, 0))
|
||||||
|
writeLogFile(t, filepath.Join(dir, "mid.log"), 60, time.Unix(2, 0))
|
||||||
|
protected := filepath.Join(dir, "main.log")
|
||||||
|
writeLogFile(t, protected, 60, time.Unix(3, 0))
|
||||||
|
|
||||||
|
deleted, err := enforceLogDirSizeLimit(dir, 120, protected)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if deleted != 1 {
|
||||||
|
t.Fatalf("expected 1 deleted file, got %d", deleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(filepath.Join(dir, "old.log")); !os.IsNotExist(err) {
|
||||||
|
t.Fatalf("expected old.log to be removed, stat error: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(filepath.Join(dir, "mid.log")); err != nil {
|
||||||
|
t.Fatalf("expected mid.log to remain, stat error: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(protected); err != nil {
|
||||||
|
t.Fatalf("expected protected main.log to remain, stat error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnforceLogDirSizeLimitSkipsProtected(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
protected := filepath.Join(dir, "main.log")
|
||||||
|
writeLogFile(t, protected, 200, time.Unix(1, 0))
|
||||||
|
writeLogFile(t, filepath.Join(dir, "other.log"), 50, time.Unix(2, 0))
|
||||||
|
|
||||||
|
deleted, err := enforceLogDirSizeLimit(dir, 100, protected)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if deleted != 1 {
|
||||||
|
t.Fatalf("expected 1 deleted file, got %d", deleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(protected); err != nil {
|
||||||
|
t.Fatalf("expected protected main.log to remain, stat error: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(filepath.Join(dir, "other.log")); !os.IsNotExist(err) {
|
||||||
|
t.Fatalf("expected other.log to be removed, stat error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeLogFile(t *testing.T, path string, size int, modTime time.Time) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
data := make([]byte, size)
|
||||||
|
if err := os.WriteFile(path, data, 0o644); err != nil {
|
||||||
|
t.Fatalf("write file: %v", err)
|
||||||
|
}
|
||||||
|
if err := os.Chtimes(path, modTime, modTime); err != nil {
|
||||||
|
t.Fatalf("set times: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -162,6 +162,21 @@ func GetGeminiModels() []*ModelInfo {
|
|||||||
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"low", "high"}},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
ID: "gemini-3-flash-preview",
|
||||||
|
Object: "model",
|
||||||
|
Created: 1765929600,
|
||||||
|
OwnedBy: "google",
|
||||||
|
Type: "gemini",
|
||||||
|
Name: "models/gemini-3-flash-preview",
|
||||||
|
Version: "3.0",
|
||||||
|
DisplayName: "Gemini 3 Flash Preview",
|
||||||
|
Description: "Gemini 3 Flash Preview",
|
||||||
|
InputTokenLimit: 1048576,
|
||||||
|
OutputTokenLimit: 65536,
|
||||||
|
SupportedGenerationMethods: []string{"generateContent", "countTokens", "createCachedContent", "batchGenerateContent"},
|
||||||
|
Thinking: &ThinkingSupport{Min: 128, Max: 32768, ZeroAllowed: false, DynamicAllowed: true, Levels: []string{"minimal", "low", "medium", "high"}},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
ID: "gemini-3-pro-image-preview",
|
ID: "gemini-3-pro-image-preview",
|
||||||
Object: "model",
|
Object: "model",
|
||||||
|
|||||||
@@ -97,6 +97,7 @@ func (e *AntigravityExecutor) Execute(ctx context.Context, auth *cliproxyauth.Au
|
|||||||
translated = util.ApplyGemini3ThinkingLevelFromMetadataCLI(req.Model, req.Metadata, translated)
|
translated = util.ApplyGemini3ThinkingLevelFromMetadataCLI(req.Model, req.Metadata, translated)
|
||||||
translated = util.ApplyDefaultThinkingIfNeededCLI(req.Model, translated)
|
translated = util.ApplyDefaultThinkingIfNeededCLI(req.Model, translated)
|
||||||
translated = normalizeAntigravityThinking(req.Model, translated)
|
translated = normalizeAntigravityThinking(req.Model, translated)
|
||||||
|
translated = applyPayloadConfigWithRoot(e.cfg, req.Model, "antigravity", "request", translated)
|
||||||
|
|
||||||
baseURLs := antigravityBaseURLFallbackOrder(auth)
|
baseURLs := antigravityBaseURLFallbackOrder(auth)
|
||||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
@@ -191,6 +192,7 @@ func (e *AntigravityExecutor) executeClaudeNonStream(ctx context.Context, auth *
|
|||||||
translated = util.ApplyGemini3ThinkingLevelFromMetadataCLI(req.Model, req.Metadata, translated)
|
translated = util.ApplyGemini3ThinkingLevelFromMetadataCLI(req.Model, req.Metadata, translated)
|
||||||
translated = util.ApplyDefaultThinkingIfNeededCLI(req.Model, translated)
|
translated = util.ApplyDefaultThinkingIfNeededCLI(req.Model, translated)
|
||||||
translated = normalizeAntigravityThinking(req.Model, translated)
|
translated = normalizeAntigravityThinking(req.Model, translated)
|
||||||
|
translated = applyPayloadConfigWithRoot(e.cfg, req.Model, "antigravity", "request", translated)
|
||||||
|
|
||||||
baseURLs := antigravityBaseURLFallbackOrder(auth)
|
baseURLs := antigravityBaseURLFallbackOrder(auth)
|
||||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
@@ -524,6 +526,7 @@ func (e *AntigravityExecutor) ExecuteStream(ctx context.Context, auth *cliproxya
|
|||||||
translated = util.ApplyGemini3ThinkingLevelFromMetadataCLI(req.Model, req.Metadata, translated)
|
translated = util.ApplyGemini3ThinkingLevelFromMetadataCLI(req.Model, req.Metadata, translated)
|
||||||
translated = util.ApplyDefaultThinkingIfNeededCLI(req.Model, translated)
|
translated = util.ApplyDefaultThinkingIfNeededCLI(req.Model, translated)
|
||||||
translated = normalizeAntigravityThinking(req.Model, translated)
|
translated = normalizeAntigravityThinking(req.Model, translated)
|
||||||
|
translated = applyPayloadConfigWithRoot(e.cfg, req.Model, "antigravity", "request", translated)
|
||||||
|
|
||||||
baseURLs := antigravityBaseURLFallbackOrder(auth)
|
baseURLs := antigravityBaseURLFallbackOrder(auth)
|
||||||
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
httpClient := newProxyAwareHTTPClient(ctx, e.cfg, auth, 0)
|
||||||
|
|||||||
@@ -211,6 +211,8 @@ func ConvertClaudeRequestToAntigravity(modelName string, inputRawJSON []byte, _
|
|||||||
tool, _ = sjson.SetRaw(tool, "parametersJsonSchema", inputSchema)
|
tool, _ = sjson.SetRaw(tool, "parametersJsonSchema", inputSchema)
|
||||||
tool, _ = sjson.Delete(tool, "strict")
|
tool, _ = sjson.Delete(tool, "strict")
|
||||||
tool, _ = sjson.Delete(tool, "input_examples")
|
tool, _ = sjson.Delete(tool, "input_examples")
|
||||||
|
tool, _ = sjson.Delete(tool, "type")
|
||||||
|
tool, _ = sjson.Delete(tool, "cache_control")
|
||||||
toolsJSON, _ = sjson.SetRaw(toolsJSON, "0.functionDeclarations.-1", tool)
|
toolsJSON, _ = sjson.SetRaw(toolsJSON, "0.functionDeclarations.-1", tool)
|
||||||
toolDeclCount++
|
toolDeclCount++
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// response.created
|
// response.created
|
||||||
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null,"instructions":""}}`
|
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null,"output":[]}}`
|
||||||
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
||||||
created, _ = sjson.Set(created, "response.id", st.ResponseID)
|
created, _ = sjson.Set(created, "response.id", st.ResponseID)
|
||||||
created, _ = sjson.Set(created, "response.created_at", st.CreatedAt)
|
created, _ = sjson.Set(created, "response.created_at", st.CreatedAt)
|
||||||
@@ -197,11 +197,11 @@ func ConvertClaudeResponseToOpenAIResponses(ctx context.Context, modelName strin
|
|||||||
if st.ReasoningActive {
|
if st.ReasoningActive {
|
||||||
if t := d.Get("thinking"); t.Exists() {
|
if t := d.Get("thinking"); t.Exists() {
|
||||||
st.ReasoningBuf.WriteString(t.String())
|
st.ReasoningBuf.WriteString(t.String())
|
||||||
msg := `{"type":"response.reasoning_summary_text.delta","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"text":""}`
|
msg := `{"type":"response.reasoning_summary_text.delta","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"delta":""}`
|
||||||
msg, _ = sjson.Set(msg, "sequence_number", nextSeq())
|
msg, _ = sjson.Set(msg, "sequence_number", nextSeq())
|
||||||
msg, _ = sjson.Set(msg, "item_id", st.ReasoningItemID)
|
msg, _ = sjson.Set(msg, "item_id", st.ReasoningItemID)
|
||||||
msg, _ = sjson.Set(msg, "output_index", st.ReasoningIndex)
|
msg, _ = sjson.Set(msg, "output_index", st.ReasoningIndex)
|
||||||
msg, _ = sjson.Set(msg, "text", t.String())
|
msg, _ = sjson.Set(msg, "delta", t.String())
|
||||||
out = append(out, emitEvent("response.reasoning_summary_text.delta", msg))
|
out = append(out, emitEvent("response.reasoning_summary_text.delta", msg))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -134,6 +134,8 @@ func ConvertClaudeRequestToCLI(modelName string, inputRawJSON []byte, _ bool) []
|
|||||||
tool, _ = sjson.SetRaw(tool, "parametersJsonSchema", inputSchema)
|
tool, _ = sjson.SetRaw(tool, "parametersJsonSchema", inputSchema)
|
||||||
tool, _ = sjson.Delete(tool, "strict")
|
tool, _ = sjson.Delete(tool, "strict")
|
||||||
tool, _ = sjson.Delete(tool, "input_examples")
|
tool, _ = sjson.Delete(tool, "input_examples")
|
||||||
|
tool, _ = sjson.Delete(tool, "type")
|
||||||
|
tool, _ = sjson.Delete(tool, "cache_control")
|
||||||
var toolDeclaration any
|
var toolDeclaration any
|
||||||
if err := json.Unmarshal([]byte(tool), &toolDeclaration); err == nil {
|
if err := json.Unmarshal([]byte(tool), &toolDeclaration); err == nil {
|
||||||
tools[0].FunctionDeclarations = append(tools[0].FunctionDeclarations, toolDeclaration)
|
tools[0].FunctionDeclarations = append(tools[0].FunctionDeclarations, toolDeclaration)
|
||||||
|
|||||||
@@ -127,6 +127,8 @@ func ConvertClaudeRequestToGemini(modelName string, inputRawJSON []byte, _ bool)
|
|||||||
tool, _ = sjson.SetRaw(tool, "parametersJsonSchema", inputSchema)
|
tool, _ = sjson.SetRaw(tool, "parametersJsonSchema", inputSchema)
|
||||||
tool, _ = sjson.Delete(tool, "strict")
|
tool, _ = sjson.Delete(tool, "strict")
|
||||||
tool, _ = sjson.Delete(tool, "input_examples")
|
tool, _ = sjson.Delete(tool, "input_examples")
|
||||||
|
tool, _ = sjson.Delete(tool, "type")
|
||||||
|
tool, _ = sjson.Delete(tool, "cache_control")
|
||||||
var toolDeclaration any
|
var toolDeclaration any
|
||||||
if err := json.Unmarshal([]byte(tool), &toolDeclaration); err == nil {
|
if err := json.Unmarshal([]byte(tool), &toolDeclaration); err == nil {
|
||||||
tools[0].FunctionDeclarations = append(tools[0].FunctionDeclarations, toolDeclaration)
|
tools[0].FunctionDeclarations = append(tools[0].FunctionDeclarations, toolDeclaration)
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ func ConvertGeminiResponseToOpenAIResponses(_ context.Context, modelName string,
|
|||||||
st.CreatedAt = time.Now().Unix()
|
st.CreatedAt = time.Now().Unix()
|
||||||
}
|
}
|
||||||
|
|
||||||
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null}}`
|
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null,"output":[]}}`
|
||||||
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
||||||
created, _ = sjson.Set(created, "response.id", st.ResponseID)
|
created, _ = sjson.Set(created, "response.id", st.ResponseID)
|
||||||
created, _ = sjson.Set(created, "response.created_at", st.CreatedAt)
|
created, _ = sjson.Set(created, "response.created_at", st.CreatedAt)
|
||||||
@@ -160,11 +160,11 @@ func ConvertGeminiResponseToOpenAIResponses(_ context.Context, modelName string,
|
|||||||
}
|
}
|
||||||
if t := part.Get("text"); t.Exists() && t.String() != "" {
|
if t := part.Get("text"); t.Exists() && t.String() != "" {
|
||||||
st.ReasoningBuf.WriteString(t.String())
|
st.ReasoningBuf.WriteString(t.String())
|
||||||
msg := `{"type":"response.reasoning_summary_text.delta","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"text":""}`
|
msg := `{"type":"response.reasoning_summary_text.delta","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"delta":""}`
|
||||||
msg, _ = sjson.Set(msg, "sequence_number", nextSeq())
|
msg, _ = sjson.Set(msg, "sequence_number", nextSeq())
|
||||||
msg, _ = sjson.Set(msg, "item_id", st.ReasoningItemID)
|
msg, _ = sjson.Set(msg, "item_id", st.ReasoningItemID)
|
||||||
msg, _ = sjson.Set(msg, "output_index", st.ReasoningIndex)
|
msg, _ = sjson.Set(msg, "output_index", st.ReasoningIndex)
|
||||||
msg, _ = sjson.Set(msg, "text", t.String())
|
msg, _ = sjson.Set(msg, "delta", t.String())
|
||||||
out = append(out, emitEvent("response.reasoning_summary_text.delta", msg))
|
out = append(out, emitEvent("response.reasoning_summary_text.delta", msg))
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
|||||||
@@ -143,7 +143,7 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
|
|||||||
st.ReasoningTokens = 0
|
st.ReasoningTokens = 0
|
||||||
st.UsageSeen = false
|
st.UsageSeen = false
|
||||||
// response.created
|
// response.created
|
||||||
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null}}`
|
created := `{"type":"response.created","sequence_number":0,"response":{"id":"","object":"response","created_at":0,"status":"in_progress","background":false,"error":null,"output":[]}}`
|
||||||
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
created, _ = sjson.Set(created, "sequence_number", nextSeq())
|
||||||
created, _ = sjson.Set(created, "response.id", st.ResponseID)
|
created, _ = sjson.Set(created, "response.id", st.ResponseID)
|
||||||
created, _ = sjson.Set(created, "response.created_at", st.Created)
|
created, _ = sjson.Set(created, "response.created_at", st.Created)
|
||||||
@@ -216,11 +216,11 @@ func ConvertOpenAIChatCompletionsResponseToOpenAIResponses(ctx context.Context,
|
|||||||
}
|
}
|
||||||
// Append incremental text to reasoning buffer
|
// Append incremental text to reasoning buffer
|
||||||
st.ReasoningBuf.WriteString(rc.String())
|
st.ReasoningBuf.WriteString(rc.String())
|
||||||
msg := `{"type":"response.reasoning_summary_text.delta","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"text":""}`
|
msg := `{"type":"response.reasoning_summary_text.delta","sequence_number":0,"item_id":"","output_index":0,"summary_index":0,"delta":""}`
|
||||||
msg, _ = sjson.Set(msg, "sequence_number", nextSeq())
|
msg, _ = sjson.Set(msg, "sequence_number", nextSeq())
|
||||||
msg, _ = sjson.Set(msg, "item_id", st.ReasoningID)
|
msg, _ = sjson.Set(msg, "item_id", st.ReasoningID)
|
||||||
msg, _ = sjson.Set(msg, "output_index", st.ReasoningIndex)
|
msg, _ = sjson.Set(msg, "output_index", st.ReasoningIndex)
|
||||||
msg, _ = sjson.Set(msg, "text", rc.String())
|
msg, _ = sjson.Set(msg, "delta", rc.String())
|
||||||
out = append(out, emitRespEvent("response.reasoning_summary_text.delta", msg))
|
out = append(out, emitRespEvent("response.reasoning_summary_text.delta", msg))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -296,6 +296,7 @@ func flattenTypeArrays(jsonStr string) string {
|
|||||||
func removeUnsupportedKeywords(jsonStr string) string {
|
func removeUnsupportedKeywords(jsonStr string) string {
|
||||||
keywords := append(unsupportedConstraints,
|
keywords := append(unsupportedConstraints,
|
||||||
"$schema", "$defs", "definitions", "const", "$ref", "additionalProperties",
|
"$schema", "$defs", "definitions", "const", "$ref", "additionalProperties",
|
||||||
|
"propertyNames", // Gemini doesn't support property name validation
|
||||||
)
|
)
|
||||||
for _, key := range keywords {
|
for _, key := range keywords {
|
||||||
for _, p := range findPaths(jsonStr, key) {
|
for _, p := range findPaths(jsonStr, key) {
|
||||||
|
|||||||
@@ -596,6 +596,71 @@ func TestCleanJSONSchemaForGemini_MultipleNonNullTypes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCleanJSONSchemaForGemini_PropertyNamesRemoval(t *testing.T) {
|
||||||
|
// propertyNames is used to validate object property names (e.g., must match a pattern)
|
||||||
|
// Gemini doesn't support this keyword and will reject requests containing it
|
||||||
|
input := `{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"metadata": {
|
||||||
|
"type": "object",
|
||||||
|
"propertyNames": {
|
||||||
|
"pattern": "^[a-zA-Z_][a-zA-Z0-9_]*$"
|
||||||
|
},
|
||||||
|
"additionalProperties": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
expected := `{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"metadata": {
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
result := CleanJSONSchemaForGemini(input)
|
||||||
|
compareJSON(t, expected, result)
|
||||||
|
|
||||||
|
// Verify propertyNames is completely removed
|
||||||
|
if strings.Contains(result, "propertyNames") {
|
||||||
|
t.Errorf("propertyNames keyword should be removed, got: %s", result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCleanJSONSchemaForGemini_PropertyNamesRemoval_Nested(t *testing.T) {
|
||||||
|
// Test deeply nested propertyNames (as seen in real Claude tool schemas)
|
||||||
|
input := `{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"items": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"config": {
|
||||||
|
"type": "object",
|
||||||
|
"propertyNames": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
result := CleanJSONSchemaForGemini(input)
|
||||||
|
|
||||||
|
if strings.Contains(result, "propertyNames") {
|
||||||
|
t.Errorf("Nested propertyNames should be removed, got: %s", result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func compareJSON(t *testing.T, expectedJSON, actualJSON string) {
|
func compareJSON(t *testing.T, expectedJSON, actualJSON string) {
|
||||||
var expMap, actMap map[string]interface{}
|
var expMap, actMap map[string]interface{}
|
||||||
errExp := json.Unmarshal([]byte(expectedJSON), &expMap)
|
errExp := json.Unmarshal([]byte(expectedJSON), &expMap)
|
||||||
|
|||||||
@@ -136,6 +136,12 @@ func ApplyGeminiThinkingLevel(body []byte, level string, includeThoughts *bool)
|
|||||||
updated = rewritten
|
updated = rewritten
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if it := gjson.GetBytes(body, "generationConfig.thinkingConfig.include_thoughts"); it.Exists() {
|
||||||
|
updated, _ = sjson.DeleteBytes(updated, "generationConfig.thinkingConfig.include_thoughts")
|
||||||
|
}
|
||||||
|
if tb := gjson.GetBytes(body, "generationConfig.thinkingConfig.thinkingBudget"); tb.Exists() {
|
||||||
|
updated, _ = sjson.DeleteBytes(updated, "generationConfig.thinkingConfig.thinkingBudget")
|
||||||
|
}
|
||||||
return updated
|
return updated
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -167,6 +173,12 @@ func ApplyGeminiCLIThinkingLevel(body []byte, level string, includeThoughts *boo
|
|||||||
updated = rewritten
|
updated = rewritten
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if it := gjson.GetBytes(body, "request.generationConfig.thinkingConfig.include_thoughts"); it.Exists() {
|
||||||
|
updated, _ = sjson.DeleteBytes(updated, "request.generationConfig.thinkingConfig.include_thoughts")
|
||||||
|
}
|
||||||
|
if tb := gjson.GetBytes(body, "request.generationConfig.thinkingConfig.thinkingBudget"); tb.Exists() {
|
||||||
|
updated, _ = sjson.DeleteBytes(updated, "request.generationConfig.thinkingConfig.thinkingBudget")
|
||||||
|
}
|
||||||
return updated
|
return updated
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -267,7 +267,7 @@ func (s *FileTokenStore) baseDirSnapshot() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DEPRECATED: Use metadataEqualIgnoringTimestamps for comparing auth metadata.
|
// DEPRECATED: Use metadataEqualIgnoringTimestamps for comparing auth metadata.
|
||||||
// This function is kept for backward compatibility but can cause refresh loops.
|
// This function is kept for backward compatibility but can cause refresh loops.
|
||||||
func jsonEqual(a, b []byte) bool {
|
func jsonEqual(a, b []byte) bool {
|
||||||
var objA any
|
var objA any
|
||||||
var objB any
|
var objB any
|
||||||
|
|||||||
Reference in New Issue
Block a user