fix: preserve registered glm-5 variants (#48185) (thanks @haoyu-haoyu)

This commit is contained in:
Peter Steinberger
2026-04-04 09:41:40 +01:00
parent 6b100e4dcf
commit 323415204e
3 changed files with 85 additions and 1 deletions

View File

@@ -42,6 +42,7 @@ Docs: https://docs.openclaw.ai
- Providers/compat: stop forcing OpenAI-only defaults on proxy and custom OpenAI-compatible routes, preserve native vendor-specific reasoning/tool/streaming behavior across Anthropic-compatible, Moonshot, Mistral, ModelStudio, OpenRouter, xAI, and Z.ai endpoints, and route GitHub Copilot Claude models through Anthropic Messages instead of OpenAI Responses.
- Providers/Model Studio: preserve native streaming usage reporting for DashScope-compatible endpoints even when they are configured under a generic provider key, so streamed token totals stop sticking at zero. (#52395) Thanks @IVY-AI-gif.
- Status/usage: let `/status` and `session_status` fall back to transcript token totals when the session meta store stayed at zero, so LM Studio, Ollama, DashScope, and similar OpenAI-compatible providers stop showing `Context: 0/...`. (#55041) Thanks @jjjojoj.
- Providers/Z.AI: preserve explicitly registered `glm-5-*` variants like `glm-5-turbo` instead of intercepting them with the generic GLM-5 forward-compat shim. (#48185) Thanks @haoyu-haoyu.
- Plugins/OpenAI: enable `gpt-image-1` reference-image edits through `/images/edits` multipart uploads, and stop inferring unsupported resolution overrides when no explicit `size` or `resolution` is provided.
- Gateway/startup: default `gateway.mode` to `local` when unset, detect PID recycling in gateway lock files on Windows and macOS, and show startup progress so healthy restarts stop getting blocked by stale locks. (#54801, #60085, #59843)
- Mobile pairing/Android: tighten secure endpoint handling so Tailscale and public remote setup reject cleartext endpoints, private LAN pairing still works, merged-role approvals mint both node and operator device tokens, and bootstrap tokens survive node auto-pair until operator approval finishes. (#60128, #60208, #60221)

View File

@@ -76,7 +76,9 @@ describe("zai provider plugin", () => {
provider.resolveDynamicModel?.({
provider: "zai",
modelId: testCase.modelId,
modelRegistry: { find: () => template },
modelRegistry: {
find: (_provider, modelId) => (modelId === "glm-4.7" ? template : null),
},
} as never),
).toMatchObject({
provider: "zai",
@@ -87,4 +89,77 @@ describe("zai provider plugin", () => {
});
}
});
it("returns an already-registered GLM-5 variant as-is", async () => {
const provider = await registerSingleProviderPlugin(plugin);
const registered = {
id: "glm-5-turbo",
name: "GLM-5-Turbo",
provider: "zai",
api: "openai-completions",
baseUrl: "https://api.z.ai/api/paas/v4",
reasoning: false,
input: ["text"],
cost: { input: 0.1, output: 0.2, cacheRead: 0, cacheWrite: 0 },
contextWindow: 123456,
maxTokens: 54321,
};
const template = {
id: "glm-4.7",
name: "GLM-4.7",
provider: "zai",
api: "openai-completions",
baseUrl: "https://api.z.ai/api/paas/v4",
reasoning: true,
input: ["text"],
cost: { input: 0.6, output: 2.2, cacheRead: 0.11, cacheWrite: 0 },
contextWindow: 204800,
maxTokens: 131072,
};
expect(
provider.resolveDynamicModel?.({
provider: "zai",
modelId: "glm-5-turbo",
modelRegistry: {
find: (_provider, modelId) =>
modelId === "glm-5-turbo" ? registered : modelId === "glm-4.7" ? template : null,
},
} as never),
).toEqual(registered);
});
it("still synthesizes unknown GLM-5 variants from the GLM-4.7 template", async () => {
const provider = await registerSingleProviderPlugin(plugin);
const template = {
id: "glm-4.7",
name: "GLM-4.7",
provider: "zai",
api: "openai-completions",
baseUrl: "https://api.z.ai/api/paas/v4",
reasoning: true,
input: ["text"],
cost: { input: 0.6, output: 2.2, cacheRead: 0.11, cacheWrite: 0 },
contextWindow: 204800,
maxTokens: 131072,
};
expect(
provider.resolveDynamicModel?.({
provider: "zai",
modelId: "glm-5-turbo",
modelRegistry: {
find: (_provider, modelId) => (modelId === "glm-4.7" ? template : null),
},
} as never),
).toMatchObject({
id: "glm-5-turbo",
name: "GLM-5 Turbo",
provider: "zai",
api: "openai-completions",
baseUrl: "https://api.z.ai/api/paas/v4",
reasoning: true,
input: ["text"],
});
});
});

View File

@@ -45,6 +45,14 @@ function resolveGlm5ForwardCompatModel(
return undefined;
}
const existing = ctx.modelRegistry.find(
PROVIDER_ID,
trimmedModelId,
) as ProviderRuntimeModel | null;
if (existing) {
return existing;
}
const def = buildZaiModelDefinition({ id: trimmedModelId });
const template = ctx.modelRegistry.find(
PROVIDER_ID,