fix(ui): hide synthetic transcript-repair history messages (#65458)

This commit is contained in:
Vincent Koc
2026-04-12 17:35:30 +01:00
committed by GitHub
parent 079eb18bf7
commit 12d351b79c
3 changed files with 81 additions and 1 deletions

View File

@@ -21,6 +21,7 @@ Docs: https://docs.openclaw.ai
- Plugins/memory-core dreaming: keep bundled `memory-core` loaded alongside an explicit external memory slot owner only when that owner enables dreaming, while preserving `plugins.slots.memory = "none"` disable semantics. (#65411) Thanks @pradeep7127 and @vincentkoc.
- Agents/Anthropic replay: preserve immutable signed-thinking replay safety across stored and live reruns, keep non-thinking embedded `tool_result` user blocks intact, and drop conflicting preserved tool IDs before validation so retries stop degrading into omitted tool calls. (#65126) Thanks @shakkernerd.
- Telegram/direct sessions: keep commentary-only assistant fallback payloads out of visible direct delivery, so Codex planning chatter cannot leak into Telegram DMs when a run has no `final_answer` text. (#65112) Thanks @vincentkoc.
- UI/WebChat: hide synthetic transcript-repair tool results from chat history reloads so internal recovery markers do not leak into visible chat after reconnects. (#65247) Thanks @wangwllu and @vincentkoc.
- Infra/net: fix multipart FormData fields (including `model`) being silently dropped when a guarded runtime fetch body crosses a FormData implementation boundary, restoring OpenAI audio transcription requests that failed with HTTP 400. (#64349) Thanks @petr-sloup.
- Dreaming/diary: use the host local timezone for diary timestamps when `dreaming.timezone` is unset, so `DREAMS.md` and the UI stop defaulting to UTC. (#65034) Thanks @neo1027144-creator and @vincentkoc.
- Dreaming/diary: include the timezone abbreviation in diary timestamps so `DREAMS.md` and the UI make UTC or local host time explicit. (#65057) Thanks @Yanhu007 and @vincentkoc.

View File

@@ -561,6 +561,66 @@ describe("loadChatHistory", () => {
// text takes precedence — "real reply" is NOT silent, so message is kept.
expect(state.chatMessages).toHaveLength(1);
});
it("filters the synthetic transcript-repair tool result from history", async () => {
const messages = [
{ role: "user", content: [{ type: "text", text: "hello" }] },
{
role: "toolResult",
toolCallId: "call_1",
toolName: "unknown",
isError: true,
content: [
{
type: "text",
text: "[openclaw] missing tool result in session history; inserted synthetic error result for transcript repair.",
},
],
},
{
role: "toolResult",
toolCallId: "call_2",
toolName: "shell",
content: [{ type: "text", text: "real tool output" }],
},
];
const mockClient = {
request: vi.fn().mockResolvedValue({ messages }),
};
const state = createState({
client: mockClient as unknown as ChatState["client"],
connected: true,
});
await loadChatHistory(state);
expect(state.chatMessages).toEqual([messages[0], messages[2]]);
});
it("keeps a user message even if it matches the synthetic repair text", async () => {
const messages = [
{
role: "user",
content: [
{
type: "text",
text: "[openclaw] missing tool result in session history; inserted synthetic error result for transcript repair.",
},
],
},
];
const mockClient = {
request: vi.fn().mockResolvedValue({ messages }),
};
const state = createState({
client: mockClient as unknown as ChatState["client"],
connected: true,
});
await loadChatHistory(state);
expect(state.chatMessages).toEqual(messages);
});
});
describe("sendChatMessage", () => {

View File

@@ -11,6 +11,8 @@ import {
} from "./scope-errors.ts";
const SILENT_REPLY_PATTERN = /^\s*NO_REPLY\s*$/;
const SYNTHETIC_TRANSCRIPT_REPAIR_RESULT =
"[openclaw] missing tool result in session history; inserted synthetic error result for transcript repair.";
const chatHistoryRequestVersions = new WeakMap<object, number>();
function beginChatHistoryRequest(state: ChatState): number {
@@ -53,6 +55,23 @@ function isAssistantSilentReply(message: unknown): boolean {
return typeof text === "string" && isSilentReplyStream(text);
}
function isSyntheticTranscriptRepairToolResult(message: unknown): boolean {
if (!message || typeof message !== "object") {
return false;
}
const entry = message as Record<string, unknown>;
const role = normalizeLowercaseStringOrEmpty(entry.role);
if (role !== "toolresult") {
return false;
}
const text = extractText(message);
return typeof text === "string" && text.trim() === SYNTHETIC_TRANSCRIPT_REPAIR_RESULT;
}
function shouldHideHistoryMessage(message: unknown): boolean {
return isAssistantSilentReply(message) || isSyntheticTranscriptRepairToolResult(message);
}
export type ChatState = {
client: GatewayBrowserClient | null;
connected: boolean;
@@ -109,7 +128,7 @@ export async function loadChatHistory(state: ChatState) {
return;
}
const messages = Array.isArray(res.messages) ? res.messages : [];
state.chatMessages = messages.filter((message) => !isAssistantSilentReply(message));
state.chatMessages = messages.filter((message) => !shouldHideHistoryMessage(message));
state.chatThinkingLevel = res.thinkingLevel ?? null;
// Clear all streaming state — history includes tool results and text
// inline, so keeping streaming artifacts would cause duplicates.