msteams: implement Teams AI agent UX best practices (#51808)

Migrates the Teams extension from @microsoft/agents-hosting to the official Teams SDK (@microsoft/teams.apps + @microsoft/teams.api) and implements Microsoft's AI UX best practices for Teams agents.

- AI-generated label on all bot messages (Teams native badge + thumbs up/down)
- Streaming responses in 1:1 chats via Teams streaminfo protocol
- Welcome card with configurable prompt starters on bot install
- Feedback with reflective learning (negative feedback triggers background reflection)
- Typing indicators for personal + group chats (disabled for channels)
- Informative status updates (progress bar while LLM processes)
- JWT validation via Teams SDK createServiceTokenValidator
- User-Agent: teams.ts[apps]/<sdk-version> OpenClaw/<version> on outbound requests
- Fix copy-pasted image downloads (smba.trafficmanager.net auth allowlist)
- Pre-parse auth gate (reject unauthenticated requests before body parsing)
- Reflection dispatcher lifecycle fix (prevent leaked dispatchers)
- Colon-safe session filenames (Windows compatibility)
- Cooldown cache eviction (prevent unbounded memory growth)

Closes #51806
This commit is contained in:
Sid Uppal
2026-03-23 22:03:39 -07:00
committed by GitHub
parent ea62655e19
commit cd90130877
39 changed files with 3349 additions and 690 deletions

View File

@@ -24324,7 +24324,7 @@
"network"
],
"label": "Microsoft Teams",
"help": "Bot Framework; enterprise support.",
"help": "Teams SDK; enterprise support.",
"hasChildren": true
},
{

View File

@@ -2181,7 +2181,7 @@
{"recordType":"path","path":"channels.mattermost.requireMention","kind":"channel","type":"boolean","required":false,"deprecated":false,"sensitive":false,"tags":["channels","network"],"label":"Mattermost Require Mention","help":"Require @mention in channels before responding (default: true).","hasChildren":false}
{"recordType":"path","path":"channels.mattermost.responsePrefix","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.mattermost.textChunkLimit","kind":"channel","type":"integer","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.msteams","kind":"channel","type":"object","required":false,"deprecated":false,"sensitive":false,"tags":["channels","network"],"label":"Microsoft Teams","help":"Bot Framework; enterprise support.","hasChildren":true}
{"recordType":"path","path":"channels.msteams","kind":"channel","type":"object","required":false,"deprecated":false,"sensitive":false,"tags":["channels","network"],"label":"Microsoft Teams","help":"Teams SDK; enterprise support.","hasChildren":true}
{"recordType":"path","path":"channels.msteams.allowFrom","kind":"channel","type":"array","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":true}
{"recordType":"path","path":"channels.msteams.allowFrom.*","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}
{"recordType":"path","path":"channels.msteams.appId","kind":"channel","type":"string","required":false,"deprecated":false,"sensitive":false,"tags":[],"hasChildren":false}

View File

@@ -4,9 +4,9 @@
"description": "OpenClaw Microsoft Teams channel plugin",
"type": "module",
"dependencies": {
"@microsoft/agents-hosting": "^1.4.1",
"express": "^5.2.1",
"uuid": "^13.0.0"
"@microsoft/teams.api": "2.0.5",
"@microsoft/teams.apps": "2.0.5",
"express": "^5.2.1"
},
"devDependencies": {
"openclaw": "workspace:*"
@@ -27,10 +27,10 @@
"channel": {
"id": "msteams",
"label": "Microsoft Teams",
"selectionLabel": "Microsoft Teams (Bot Framework)",
"selectionLabel": "Microsoft Teams (Teams SDK)",
"docsPath": "/channels/msteams",
"docsLabel": "msteams",
"blurb": "Bot Framework; enterprise support.",
"blurb": "Teams SDK; enterprise support.",
"aliases": [
"teams"
],

View File

@@ -0,0 +1,7 @@
/** AI-generated content entity added to every outbound AI message. */
export const AI_GENERATED_ENTITY = {
type: "https://schema.org/Message",
"@type": "Message",
"@id": "",
additionalType: ["AIGeneratedContent"],
};

View File

@@ -59,6 +59,9 @@ export const DEFAULT_MEDIA_HOST_ALLOWLIST = [
export const DEFAULT_MEDIA_AUTH_HOST_ALLOWLIST = [
"api.botframework.com",
"botframework.com",
// Bot Framework Service URL (smba.trafficmanager.net) used for outbound
// replies and inbound attachment downloads (clipboard-pasted images).
"smba.trafficmanager.net",
"graph.microsoft.com",
"graph.microsoft.us",
"graph.microsoft.de",

View File

@@ -72,4 +72,54 @@ describe("msteams conversation store (fs)", () => {
"19:new@thread.tacv2",
]);
});
it("stores and retrieves timezone from conversation reference", async () => {
const stateDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), "openclaw-msteams-store-"));
const store = createMSTeamsConversationStoreFs({
env: { ...process.env, OPENCLAW_STATE_DIR: stateDir },
ttlMs: 60_000,
});
const ref: StoredConversationReference = {
conversation: { id: "19:tz-test@thread.tacv2" },
channelId: "msteams",
serviceUrl: "https://service.example.com",
user: { id: "u1", aadObjectId: "aad1" },
timezone: "America/Los_Angeles",
};
await store.upsert("19:tz-test@thread.tacv2", ref);
const retrieved = await store.get("19:tz-test@thread.tacv2");
expect(retrieved).not.toBeNull();
expect(retrieved!.timezone).toBe("America/Los_Angeles");
});
it("preserves existing timezone when upsert omits timezone", async () => {
const stateDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), "openclaw-msteams-store-"));
const store = createMSTeamsConversationStoreFs({
env: { ...process.env, OPENCLAW_STATE_DIR: stateDir },
ttlMs: 60_000,
});
await store.upsert("19:tz-keep@thread.tacv2", {
conversation: { id: "19:tz-keep@thread.tacv2" },
channelId: "msteams",
serviceUrl: "https://service.example.com",
user: { id: "u1" },
timezone: "Europe/London",
});
// Second upsert without timezone field
await store.upsert("19:tz-keep@thread.tacv2", {
conversation: { id: "19:tz-keep@thread.tacv2" },
channelId: "msteams",
serviceUrl: "https://service.example.com",
user: { id: "u1" },
});
const retrieved = await store.get("19:tz-keep@thread.tacv2");
expect(retrieved).not.toBeNull();
expect(retrieved!.timezone).toBe("Europe/London");
});
});

View File

@@ -137,7 +137,11 @@ export function createMSTeamsConversationStoreFs(params?: {
const normalizedId = normalizeConversationId(conversationId);
await withFileLock(filePath, empty, async () => {
const store = await readStore();
const existing = store.conversations[normalizedId];
store.conversations[normalizedId] = {
// Preserve fields from previous entry that may not be present on every activity
// (e.g. timezone is only sent when clientInfo entity is available).
...(existing?.timezone && !reference.timezone ? { timezone: existing.timezone } : {}),
...reference,
lastSeenAt: new Date().toISOString(),
};

View File

@@ -32,6 +32,8 @@ export type StoredConversationReference = {
* Graph-native chat ID so we don't need to re-query the API on every send.
*/
graphChatId?: string;
/** IANA timezone from Teams clientInfo entity (e.g. "America/New_York") */
timezone?: string;
};
export type MSTeamsConversationStoreEntry = {

View File

@@ -0,0 +1,138 @@
import { mkdtemp, rm, writeFile } from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { afterEach, describe, expect, it } from "vitest";
import {
buildFeedbackEvent,
buildReflectionPrompt,
clearReflectionCooldowns,
isReflectionAllowed,
loadSessionLearnings,
recordReflectionTime,
} from "./feedback-reflection.js";
describe("buildFeedbackEvent", () => {
it("builds a well-formed custom event", () => {
const event = buildFeedbackEvent({
messageId: "msg-123",
value: "negative",
comment: "too verbose",
sessionKey: "msteams:user1",
agentId: "default",
conversationId: "19:abc",
});
expect(event.type).toBe("custom");
expect(event.event).toBe("feedback");
expect(event.value).toBe("negative");
expect(event.comment).toBe("too verbose");
expect(event.messageId).toBe("msg-123");
expect(event.ts).toBeGreaterThan(0);
});
it("omits comment when not provided", () => {
const event = buildFeedbackEvent({
messageId: "msg-123",
value: "positive",
sessionKey: "msteams:user1",
agentId: "default",
conversationId: "19:abc",
});
expect(event.comment).toBeUndefined();
expect(event.value).toBe("positive");
});
});
describe("buildReflectionPrompt", () => {
it("includes the thumbed-down response", () => {
const prompt = buildReflectionPrompt({
thumbedDownResponse: "Here is a long explanation...",
});
expect(prompt).toContain("previous response wasn't helpful");
expect(prompt).toContain("Here is a long explanation...");
expect(prompt).toContain("reflect");
});
it("truncates long responses", () => {
const longResponse = "x".repeat(600);
const prompt = buildReflectionPrompt({
thumbedDownResponse: longResponse,
});
expect(prompt).toContain("...");
expect(prompt.length).toBeLessThan(longResponse.length + 500);
});
it("includes user comment when provided", () => {
const prompt = buildReflectionPrompt({
thumbedDownResponse: "Some response",
userComment: "Too wordy",
});
expect(prompt).toContain('User\'s comment: "Too wordy"');
});
it("works without optional params", () => {
const prompt = buildReflectionPrompt({});
expect(prompt).toContain("previous response wasn't helpful");
expect(prompt).toContain("reflect");
});
});
describe("reflection cooldown", () => {
afterEach(() => {
clearReflectionCooldowns();
});
it("allows first reflection", () => {
expect(isReflectionAllowed("session-1")).toBe(true);
});
it("blocks reflection within cooldown", () => {
recordReflectionTime("session-1");
expect(isReflectionAllowed("session-1", 60_000)).toBe(false);
});
it("allows reflection after cooldown expires", () => {
// Manually set a past timestamp
recordReflectionTime("session-1");
// Override the map entry to simulate time passing
clearReflectionCooldowns();
expect(isReflectionAllowed("session-1", 1)).toBe(true);
});
it("tracks sessions independently", () => {
recordReflectionTime("session-1");
expect(isReflectionAllowed("session-1", 60_000)).toBe(false);
expect(isReflectionAllowed("session-2", 60_000)).toBe(true);
});
});
describe("loadSessionLearnings", () => {
let tmpDir: string;
afterEach(async () => {
if (tmpDir) {
await rm(tmpDir, { recursive: true, force: true });
}
});
it("returns empty array when file doesn't exist", async () => {
tmpDir = await mkdtemp(path.join(os.tmpdir(), "learnings-test-"));
const learnings = await loadSessionLearnings(tmpDir, "nonexistent");
expect(learnings).toEqual([]);
});
it("reads existing learnings", async () => {
tmpDir = await mkdtemp(path.join(os.tmpdir(), "learnings-test-"));
// Colons are sanitized to underscores in filenames (Windows compat)
const safeKey = "msteams_user1";
const filePath = path.join(tmpDir, `${safeKey}.learnings.json`);
await writeFile(filePath, JSON.stringify(["Be concise", "Use examples"]), "utf-8");
const learnings = await loadSessionLearnings(tmpDir, "msteams:user1");
expect(learnings).toEqual(["Be concise", "Use examples"]);
});
});

View File

@@ -0,0 +1,353 @@
/**
* Background reflection triggered by negative user feedback (thumbs-down).
*
* Flow:
* 1. User thumbs-down → invoke handler acks immediately
* 2. This module runs in the background (fire-and-forget)
* 3. Reads recent session context
* 4. Sends a synthetic reflection prompt to the agent
* 5. Stores the derived learning in session
* 6. Optionally sends a proactive follow-up to the user
*/
import { dispatchReplyFromConfigWithSettledDispatcher } from "openclaw/plugin-sdk/msteams";
import type { OpenClawConfig } from "../runtime-api.js";
import type { StoredConversationReference } from "./conversation-store.js";
import type { MSTeamsAdapter } from "./messenger.js";
import { buildConversationReference, sendMSTeamsMessages } from "./messenger.js";
import type { MSTeamsMonitorLogger } from "./monitor-types.js";
import { getMSTeamsRuntime } from "./runtime.js";
/** Default cooldown between reflections per session (5 minutes). */
const DEFAULT_COOLDOWN_MS = 300_000;
/** Max chars of the thumbed-down response to include in the reflection prompt. */
const MAX_RESPONSE_CHARS = 500;
/** Tracks last reflection time per session to enforce cooldown. */
const lastReflectionBySession = new Map<string, number>();
/** Maximum cooldown entries before pruning expired ones. */
const MAX_COOLDOWN_ENTRIES = 500;
/** Prune expired cooldown entries to prevent unbounded memory growth. */
function pruneExpiredCooldowns(cooldownMs: number): void {
if (lastReflectionBySession.size <= MAX_COOLDOWN_ENTRIES) {
return;
}
const now = Date.now();
for (const [key, time] of lastReflectionBySession) {
if (now - time >= cooldownMs) {
lastReflectionBySession.delete(key);
}
}
}
export type FeedbackEvent = {
type: "custom";
event: "feedback";
ts: number;
messageId: string;
value: "positive" | "negative";
comment?: string;
sessionKey: string;
agentId: string;
conversationId: string;
reflectionLearning?: string;
};
export function buildFeedbackEvent(params: {
messageId: string;
value: "positive" | "negative";
comment?: string;
sessionKey: string;
agentId: string;
conversationId: string;
}): FeedbackEvent {
return {
type: "custom",
event: "feedback",
ts: Date.now(),
messageId: params.messageId,
value: params.value,
comment: params.comment,
sessionKey: params.sessionKey,
agentId: params.agentId,
conversationId: params.conversationId,
};
}
export function buildReflectionPrompt(params: {
thumbedDownResponse?: string;
userComment?: string;
}): string {
const parts: string[] = ["A user indicated your previous response wasn't helpful."];
if (params.thumbedDownResponse) {
const truncated =
params.thumbedDownResponse.length > MAX_RESPONSE_CHARS
? `${params.thumbedDownResponse.slice(0, MAX_RESPONSE_CHARS)}...`
: params.thumbedDownResponse;
parts.push(`\nYour response was:\n> ${truncated}`);
}
if (params.userComment) {
parts.push(`\nUser's comment: "${params.userComment}"`);
}
parts.push(
"\nBriefly reflect: what could you improve? Consider tone, length, " +
"accuracy, relevance, and specificity. Reply with:\n" +
"1. A short adjustment note (1-2 sentences) for your future behavior " +
"in this conversation.\n" +
"2. Whether you should follow up with the user (yes if the adjustment " +
"is non-obvious or you have a clarifying question; no if minor).\n" +
"3. If following up, draft a brief message to the user.",
);
return parts.join("\n");
}
/**
* Check if a reflection is allowed (cooldown not active).
*/
export function isReflectionAllowed(sessionKey: string, cooldownMs?: number): boolean {
const cooldown = cooldownMs ?? DEFAULT_COOLDOWN_MS;
const lastTime = lastReflectionBySession.get(sessionKey);
if (lastTime == null) {
return true;
}
return Date.now() - lastTime >= cooldown;
}
/**
* Record that a reflection was run for a session.
*/
export function recordReflectionTime(sessionKey: string): void {
lastReflectionBySession.set(sessionKey, Date.now());
pruneExpiredCooldowns(DEFAULT_COOLDOWN_MS);
}
/**
* Clear reflection cooldown tracking (for tests).
*/
export function clearReflectionCooldowns(): void {
lastReflectionBySession.clear();
}
export type RunFeedbackReflectionParams = {
cfg: OpenClawConfig;
adapter: MSTeamsAdapter;
appId: string;
conversationRef: StoredConversationReference;
sessionKey: string;
agentId: string;
conversationId: string;
feedbackMessageId: string;
thumbedDownResponse?: string;
userComment?: string;
log: MSTeamsMonitorLogger;
};
/**
* Run a background reflection after negative feedback.
* This is designed to be called fire-and-forget (don't await in the invoke handler).
*/
export async function runFeedbackReflection(params: RunFeedbackReflectionParams): Promise<void> {
const { cfg, log, sessionKey } = params;
const msteamsCfg = cfg.channels?.msteams;
// Check cooldown
const cooldownMs = msteamsCfg?.feedbackReflectionCooldownMs ?? DEFAULT_COOLDOWN_MS;
if (!isReflectionAllowed(sessionKey, cooldownMs)) {
log.debug?.("skipping reflection (cooldown active)", { sessionKey });
return;
}
// Record cooldown after successful dispatch (not before) so transient
// failures don't suppress future reflection attempts.
const core = getMSTeamsRuntime();
const reflectionPrompt = buildReflectionPrompt({
thumbedDownResponse: params.thumbedDownResponse,
userComment: params.userComment,
});
// Use the agentId from the feedback handler (already resolved with correct routing)
// instead of re-resolving, which could yield a different agent in peer-specific setups.
const storePath = core.channel.session.resolveStorePath(cfg.session?.store, {
agentId: params.agentId,
});
const envelopeOptions = core.channel.reply.resolveEnvelopeFormatOptions(cfg);
const body = core.channel.reply.formatAgentEnvelope({
channel: "Teams",
from: "system",
body: reflectionPrompt,
envelope: envelopeOptions,
});
const ctxPayload = core.channel.reply.finalizeInboundContext({
Body: body,
BodyForAgent: reflectionPrompt,
RawBody: reflectionPrompt,
CommandBody: reflectionPrompt,
From: `msteams:system:${params.conversationId}`,
To: `conversation:${params.conversationId}`,
SessionKey: params.sessionKey,
ChatType: "direct" as const,
SenderName: "system",
SenderId: "system",
Provider: "msteams" as const,
Surface: "msteams" as const,
Timestamp: Date.now(),
WasMentioned: true,
CommandAuthorized: false,
OriginatingChannel: "msteams" as const,
OriginatingTo: `conversation:${params.conversationId}`,
});
// Capture the reflection response instead of sending it directly.
// We only want to proactively message if the agent decides to follow up.
let reflectionResponse = "";
const noopTypingCallbacks = {
onReplyStart: async () => {},
onIdle: () => {},
onCleanup: () => {},
};
const { dispatcher, replyOptions } = core.channel.reply.createReplyDispatcherWithTyping({
deliver: async (payload) => {
if (payload.text) {
reflectionResponse += (reflectionResponse ? "\n" : "") + payload.text;
}
},
typingCallbacks: noopTypingCallbacks,
humanDelay: core.channel.reply.resolveHumanDelayConfig(cfg, params.agentId),
onError: (err) => {
log.debug?.("reflection reply error", { error: String(err) });
},
});
try {
await dispatchReplyFromConfigWithSettledDispatcher({
ctxPayload,
cfg,
dispatcher,
onSettled: () => {},
replyOptions,
});
} catch (err) {
log.error("reflection dispatch failed", { error: String(err) });
// Don't record cooldown — allow retry on next feedback
return;
}
if (!reflectionResponse.trim()) {
log.debug?.("reflection produced no output");
return;
}
// Reflection succeeded — record cooldown now
recordReflectionTime(sessionKey);
log.info("reflection complete", {
sessionKey,
responseLength: reflectionResponse.length,
});
// Store the learning in the session
try {
await storeSessionLearning({
storePath,
sessionKey: params.sessionKey,
learning: reflectionResponse.trim(),
});
} catch (err) {
log.debug?.("failed to store reflection learning", { error: String(err) });
}
// Send proactive follow-up if the reflection suggests one.
// Simple heuristic: if the response contains "follow up: yes" or similar,
// or if it's reasonably short (a direct message to the user).
// For now, always send the reflection as a follow-up — the prompt asks
// the agent to decide, and it will draft a user-facing message if appropriate.
const shouldNotify =
reflectionResponse.toLowerCase().includes("follow up") || reflectionResponse.length < 300;
if (shouldNotify) {
try {
const baseRef = buildConversationReference(params.conversationRef);
const proactiveRef = { ...baseRef, activityId: undefined };
await params.adapter.continueConversation(params.appId, proactiveRef, async (ctx) => {
await ctx.sendActivity({
type: "message",
text: reflectionResponse.trim(),
});
});
log.info("sent reflection follow-up", { sessionKey });
} catch (err) {
log.debug?.("failed to send reflection follow-up", { error: String(err) });
}
}
}
/**
* Store a learning derived from feedback reflection in a session companion file.
*/
async function storeSessionLearning(params: {
storePath: string;
sessionKey: string;
learning: string;
}): Promise<void> {
const fs = await import("node:fs/promises");
const path = await import("node:path");
const safeKey = params.sessionKey.replace(/[^a-zA-Z0-9_-]/g, "_");
const learningsFile = path.join(params.storePath, `${safeKey}.learnings.json`);
let learnings: string[] = [];
try {
const existing = await fs.readFile(learningsFile, "utf-8");
const parsed = JSON.parse(existing);
if (Array.isArray(parsed)) {
learnings = parsed;
}
} catch {
// File doesn't exist yet — start fresh.
}
learnings.push(params.learning);
// Cap at 10 learnings to avoid unbounded growth
if (learnings.length > 10) {
learnings = learnings.slice(-10);
}
await fs.mkdir(path.dirname(learningsFile), { recursive: true });
await fs.writeFile(learningsFile, JSON.stringify(learnings, null, 2), "utf-8");
}
/**
* Load session learnings for injection into extraSystemPrompt.
*/
export async function loadSessionLearnings(
storePath: string,
sessionKey: string,
): Promise<string[]> {
const fs = await import("node:fs/promises");
const path = await import("node:path");
const safeKey = sessionKey.replace(/[^a-zA-Z0-9_-]/g, "_");
const learningsFile = path.join(storePath, `${safeKey}.learnings.json`);
try {
const content = await fs.readFile(learningsFile, "utf-8");
const parsed = JSON.parse(content);
return Array.isArray(parsed) ? parsed : [];
} catch {
return [];
}
}

View File

@@ -8,6 +8,8 @@
* - Parsing fileConsent/invoke activities
*/
import { buildUserAgent } from "./user-agent.js";
export interface FileConsentCardParams {
filename: string;
description?: string;
@@ -114,6 +116,7 @@ export async function uploadToConsentUrl(params: {
const res = await fetchFn(params.url, {
method: "PUT",
headers: {
"User-Agent": buildUserAgent(),
"Content-Type": params.contentType ?? "application/octet-stream",
"Content-Range": `bytes 0-${params.buffer.length - 1}/${params.buffer.length}`,
},

View File

@@ -10,6 +10,7 @@
*/
import type { MSTeamsAccessTokenProvider } from "./attachments/types.js";
import { buildUserAgent } from "./user-agent.js";
const GRAPH_ROOT = "https://graph.microsoft.com/v1.0";
const GRAPH_BETA = "https://graph.microsoft.com/beta";
@@ -21,53 +22,6 @@ export interface OneDriveUploadResult {
name: string;
}
function parseUploadedDriveItem(
data: { id?: string; webUrl?: string; name?: string },
label: "OneDrive" | "SharePoint",
): OneDriveUploadResult {
if (!data.id || !data.webUrl || !data.name) {
throw new Error(`${label} upload response missing required fields`);
}
return {
id: data.id,
webUrl: data.webUrl,
name: data.name,
};
}
async function uploadDriveItem(params: {
buffer: Buffer;
filename: string;
contentType?: string;
tokenProvider: MSTeamsAccessTokenProvider;
fetchFn?: typeof fetch;
url: string;
label: "OneDrive" | "SharePoint";
}): Promise<OneDriveUploadResult> {
const fetchFn = params.fetchFn ?? fetch;
const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE);
const res = await fetchFn(params.url, {
method: "PUT",
headers: {
Authorization: `Bearer ${token}`,
"Content-Type": params.contentType ?? "application/octet-stream",
},
body: new Uint8Array(params.buffer),
});
if (!res.ok) {
const body = await res.text().catch(() => "");
throw new Error(`${params.label} upload failed: ${res.status} ${res.statusText} - ${body}`);
}
return parseUploadedDriveItem(
(await res.json()) as { id?: string; webUrl?: string; name?: string },
params.label,
);
}
/**
* Upload a file to the user's OneDrive root folder.
* For larger files, this uses the simple upload endpoint (up to 4MB).
@@ -79,13 +33,42 @@ export async function uploadToOneDrive(params: {
tokenProvider: MSTeamsAccessTokenProvider;
fetchFn?: typeof fetch;
}): Promise<OneDriveUploadResult> {
const fetchFn = params.fetchFn ?? fetch;
const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE);
// Use "OpenClawShared" folder to organize bot-uploaded files
const uploadPath = `/OpenClawShared/${encodeURIComponent(params.filename)}`;
return await uploadDriveItem({
...params,
url: `${GRAPH_ROOT}/me/drive/root:${uploadPath}:/content`,
label: "OneDrive",
const res = await fetchFn(`${GRAPH_ROOT}/me/drive/root:${uploadPath}:/content`, {
method: "PUT",
headers: {
"User-Agent": buildUserAgent(),
Authorization: `Bearer ${token}`,
"Content-Type": params.contentType ?? "application/octet-stream",
},
body: new Uint8Array(params.buffer),
});
if (!res.ok) {
const body = await res.text().catch(() => "");
throw new Error(`OneDrive upload failed: ${res.status} ${res.statusText} - ${body}`);
}
const data = (await res.json()) as {
id?: string;
webUrl?: string;
name?: string;
};
if (!data.id || !data.webUrl || !data.name) {
throw new Error("OneDrive upload response missing required fields");
}
return {
id: data.id,
webUrl: data.webUrl,
name: data.name,
};
}
export interface OneDriveSharingLink {
@@ -109,6 +92,7 @@ export async function createSharingLink(params: {
const res = await fetchFn(`${GRAPH_ROOT}/me/drive/items/${params.itemId}/createLink`, {
method: "POST",
headers: {
"User-Agent": buildUserAgent(),
Authorization: `Bearer ${token}`,
"Content-Type": "application/json",
},
@@ -194,13 +178,45 @@ export async function uploadToSharePoint(params: {
siteId: string;
fetchFn?: typeof fetch;
}): Promise<OneDriveUploadResult> {
const fetchFn = params.fetchFn ?? fetch;
const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE);
// Use "OpenClawShared" folder to organize bot-uploaded files
const uploadPath = `/OpenClawShared/${encodeURIComponent(params.filename)}`;
return await uploadDriveItem({
...params,
url: `${GRAPH_ROOT}/sites/${params.siteId}/drive/root:${uploadPath}:/content`,
label: "SharePoint",
});
const res = await fetchFn(
`${GRAPH_ROOT}/sites/${params.siteId}/drive/root:${uploadPath}:/content`,
{
method: "PUT",
headers: {
"User-Agent": buildUserAgent(),
Authorization: `Bearer ${token}`,
"Content-Type": params.contentType ?? "application/octet-stream",
},
body: new Uint8Array(params.buffer),
},
);
if (!res.ok) {
const body = await res.text().catch(() => "");
throw new Error(`SharePoint upload failed: ${res.status} ${res.statusText} - ${body}`);
}
const data = (await res.json()) as {
id?: string;
webUrl?: string;
name?: string;
};
if (!data.id || !data.webUrl || !data.name) {
throw new Error("SharePoint upload response missing required fields");
}
return {
id: data.id,
webUrl: data.webUrl,
name: data.name,
};
}
export interface ChatMember {
@@ -239,7 +255,7 @@ export async function getDriveItemProperties(params: {
const res = await fetchFn(
`${GRAPH_ROOT}/sites/${params.siteId}/drive/items/${params.itemId}?$select=eTag,webDavUrl,name`,
{ headers: { Authorization: `Bearer ${token}` } },
{ headers: { "User-Agent": buildUserAgent(), Authorization: `Bearer ${token}` } },
);
if (!res.ok) {
@@ -273,8 +289,6 @@ export async function getDriveItemProperties(params: {
*
* This function looks up the matching Graph chat by querying the bot's chats filtered
* by the target user's AAD object ID.
*
* Returns the Graph chat ID if found, or null if resolution fails.
*/
export async function resolveGraphChatId(params: {
/** Bot Framework conversation ID (may be in non-Graph format for personal DMs) */
@@ -353,7 +367,7 @@ export async function getChatMembers(params: {
const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE);
const res = await fetchFn(`${GRAPH_ROOT}/chats/${params.chatId}/members`, {
headers: { Authorization: `Bearer ${token}` },
headers: { "User-Agent": buildUserAgent(), Authorization: `Bearer ${token}` },
});
if (!res.ok) {
@@ -413,6 +427,7 @@ export async function createSharePointSharingLink(params: {
{
method: "POST",
headers: {
"User-Agent": buildUserAgent(),
Authorization: `Bearer ${token}`,
"Content-Type": "application/json",
},

View File

@@ -1,16 +1,22 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
const { loadMSTeamsSdkWithAuthMock, readAccessTokenMock, resolveMSTeamsCredentialsMock } =
vi.hoisted(() => {
return {
loadMSTeamsSdkWithAuthMock: vi.fn(),
readAccessTokenMock: vi.fn(),
resolveMSTeamsCredentialsMock: vi.fn(),
};
});
const {
loadMSTeamsSdkWithAuthMock,
createMSTeamsTokenProviderMock,
readAccessTokenMock,
resolveMSTeamsCredentialsMock,
} = vi.hoisted(() => {
return {
loadMSTeamsSdkWithAuthMock: vi.fn(),
createMSTeamsTokenProviderMock: vi.fn(),
readAccessTokenMock: vi.fn(),
resolveMSTeamsCredentialsMock: vi.fn(),
};
});
vi.mock("./sdk.js", () => ({
loadMSTeamsSdkWithAuth: loadMSTeamsSdkWithAuthMock,
createMSTeamsTokenProvider: createMSTeamsTokenProviderMock,
}));
vi.mock("./token-response.js", () => ({
@@ -66,10 +72,10 @@ describe("msteams graph helpers", () => {
expect(globalThis.fetch).toHaveBeenCalledWith(
"https://graph.microsoft.com/v1.0/groups?$select=id",
{
headers: {
headers: expect.objectContaining({
Authorization: "Bearer graph-token",
ConsistencyLevel: "eventual",
},
}),
},
);
@@ -86,25 +92,21 @@ describe("msteams graph helpers", () => {
});
it("resolves Graph tokens through the SDK auth provider", async () => {
const getAccessToken = vi.fn(async () => ({ accessToken: "sdk-token" }));
const MsalTokenProvider = vi.fn(function MockMsalTokenProvider() {
return { getAccessToken };
});
const getAccessToken = vi.fn(async () => "raw-graph-token");
const mockApp = { id: "mock-app" };
resolveMSTeamsCredentialsMock.mockReturnValue({
appId: "app-id",
appPassword: "app-password",
tenantId: "tenant-id",
});
loadMSTeamsSdkWithAuthMock.mockResolvedValue({
sdk: { MsalTokenProvider },
authConfig: { clientId: "app-id" },
});
loadMSTeamsSdkWithAuthMock.mockResolvedValue({ app: mockApp });
createMSTeamsTokenProviderMock.mockReturnValue({ getAccessToken });
readAccessTokenMock.mockReturnValue("resolved-token");
await expect(resolveGraphToken({ channels: { msteams: {} } })).resolves.toBe("resolved-token");
expect(MsalTokenProvider).toHaveBeenCalledWith({ clientId: "app-id" });
expect(createMSTeamsTokenProviderMock).toHaveBeenCalledWith(mockApp);
expect(getAccessToken).toHaveBeenCalledWith("https://graph.microsoft.com");
});
@@ -114,15 +116,9 @@ describe("msteams graph helpers", () => {
"MS Teams credentials missing",
);
const getAccessToken = vi.fn(async () => ({ token: null }));
loadMSTeamsSdkWithAuthMock.mockResolvedValue({
sdk: {
MsalTokenProvider: vi.fn(function MockMsalTokenProvider() {
return { getAccessToken };
}),
},
authConfig: { clientId: "app-id" },
});
const getAccessToken = vi.fn(async () => null);
loadMSTeamsSdkWithAuthMock.mockResolvedValue({ app: { id: "mock-app" } });
createMSTeamsTokenProviderMock.mockReturnValue({ getAccessToken });
resolveMSTeamsCredentialsMock.mockReturnValue({
appId: "app-id",
appPassword: "app-password",

View File

@@ -1,8 +1,9 @@
import type { MSTeamsConfig } from "../runtime-api.js";
import { GRAPH_ROOT } from "./attachments/shared.js";
import { loadMSTeamsSdkWithAuth } from "./sdk.js";
import { createMSTeamsTokenProvider, loadMSTeamsSdkWithAuth } from "./sdk.js";
import { readAccessToken } from "./token-response.js";
import { resolveMSTeamsCredentials } from "./token.js";
import { buildUserAgent } from "./user-agent.js";
export type GraphUser = {
id?: string;
@@ -38,6 +39,7 @@ export async function fetchGraphJson<T>(params: {
}): Promise<T> {
const res = await fetch(`${GRAPH_ROOT}${params.path}`, {
headers: {
"User-Agent": buildUserAgent(),
Authorization: `Bearer ${params.token}`,
...params.headers,
},
@@ -56,10 +58,10 @@ export async function resolveGraphToken(cfg: unknown): Promise<string> {
if (!creds) {
throw new Error("MS Teams credentials missing");
}
const { sdk, authConfig } = await loadMSTeamsSdkWithAuth(creds);
const tokenProvider = new sdk.MsalTokenProvider(authConfig);
const token = await tokenProvider.getAccessToken("https://graph.microsoft.com");
const accessToken = readAccessToken(token);
const { app } = await loadMSTeamsSdkWithAuth(creds);
const tokenProvider = createMSTeamsTokenProvider(app);
const graphTokenValue = await tokenProvider.getAccessToken("https://graph.microsoft.com");
const accessToken = readAccessToken(graphTokenValue);
if (!accessToken) {
throw new Error("MS Teams graph token unavailable");
}

View File

@@ -0,0 +1,52 @@
import { afterEach, describe, expect, it, vi } from "vitest";
vi.mock("./runtime.js", () => ({
getMSTeamsRuntime: vi.fn(() => ({ version: "2026.3.19" })),
}));
import { fetchGraphJson } from "./graph.js";
import { resetUserAgentCache } from "./user-agent.js";
describe("fetchGraphJson User-Agent", () => {
afterEach(() => {
resetUserAgentCache();
vi.restoreAllMocks();
});
it("sends User-Agent header with OpenClaw version", async () => {
const mockFetch = vi.fn().mockResolvedValueOnce({
ok: true,
json: async () => ({ value: [] }),
});
vi.stubGlobal("fetch", mockFetch);
await fetchGraphJson({ token: "test-token", path: "/groups" });
expect(mockFetch).toHaveBeenCalledOnce();
const [, init] = mockFetch.mock.calls[0];
expect(init.headers["User-Agent"]).toMatch(/^teams\.ts\[apps\]\/.+ OpenClaw\/2026\.3\.19$/);
expect(init.headers).toHaveProperty("Authorization", "Bearer test-token");
vi.unstubAllGlobals();
});
it("allows caller headers to override User-Agent", async () => {
const mockFetch = vi.fn().mockResolvedValueOnce({
ok: true,
json: async () => ({ value: [] }),
});
vi.stubGlobal("fetch", mockFetch);
await fetchGraphJson({
token: "test-token",
path: "/groups",
headers: { "User-Agent": "custom-agent/1.0" },
});
const [, init] = mockFetch.mock.calls[0];
// Caller headers spread after, so they override
expect(init.headers["User-Agent"]).toBe("custom-agent/1.0");
vi.unstubAllGlobals();
});
});

View File

@@ -21,6 +21,7 @@ import { resolvePreferredOpenClawTmpDir } from "../../../src/infra/tmp-openclaw-
import {
type MSTeamsAdapter,
type MSTeamsRenderedMessage,
buildActivity,
renderReplyPayloadsToMessages,
sendMSTeamsMessages,
} from "./messenger.js";
@@ -293,16 +294,21 @@ describe("msteams messenger", () => {
expect(firstSent.text).toContain(
"📎 [upload.txt](https://onedrive.example.com/share/item123)",
);
expect(firstSent.entities).toEqual([
{
type: "mention",
text: "<at>John</at>",
mentioned: {
id: "29:08q2j2o3jc09au90eucae",
name: "John",
expect(sent[0]?.entities).toEqual(
expect.arrayContaining([
{
type: "mention",
text: "<at>John</at>",
mentioned: {
id: "29:08q2j2o3jc09au90eucae",
name: "John",
},
},
},
]);
expect.objectContaining({
additionalType: ["AIGeneratedContent"],
}),
]),
);
} finally {
await rm(tmpDir, { recursive: true, force: true });
}
@@ -477,4 +483,76 @@ describe("msteams messenger", () => {
]);
});
});
describe("buildActivity AI metadata", () => {
const baseRef: StoredConversationReference = {
activityId: "activity123",
user: { id: "user123", name: "User" },
agent: { id: "bot123", name: "Bot" },
conversation: { id: "conv123", conversationType: "personal" },
channelId: "msteams",
serviceUrl: "https://service.example.com",
};
it("adds AI-generated entity to text messages", async () => {
const activity = await buildActivity({ text: "hello" }, baseRef);
const entities = activity.entities as Array<Record<string, unknown>>;
expect(entities).toEqual(
expect.arrayContaining([
expect.objectContaining({
type: "https://schema.org/Message",
"@type": "Message",
additionalType: ["AIGeneratedContent"],
}),
]),
);
});
it("adds AI-generated entity to media-only messages", async () => {
const activity = await buildActivity({ mediaUrl: "https://example.com/img.png" }, baseRef);
const entities = activity.entities as Array<Record<string, unknown>>;
expect(entities).toEqual(
expect.arrayContaining([
expect.objectContaining({
additionalType: ["AIGeneratedContent"],
}),
]),
);
});
it("preserves mention entities alongside AI entity", async () => {
const activity = await buildActivity({ text: "hi <at>@User</at>" }, baseRef);
const entities = activity.entities as Array<Record<string, unknown>>;
// Should have at least the AI entity
expect(entities.length).toBeGreaterThanOrEqual(1);
expect(entities).toEqual(
expect.arrayContaining([
expect.objectContaining({
additionalType: ["AIGeneratedContent"],
}),
]),
);
});
it("sets feedbackLoopEnabled in channelData when enabled", async () => {
const activity = await buildActivity(
{ text: "hello" },
baseRef,
undefined,
undefined,
undefined,
{
feedbackLoopEnabled: true,
},
);
const channelData = activity.channelData as Record<string, unknown>;
expect(channelData.feedbackLoopEnabled).toBe(true);
});
it("defaults feedbackLoopEnabled to false", async () => {
const activity = await buildActivity({ text: "hello" }, baseRef);
const channelData = activity.channelData as Record<string, unknown>;
expect(channelData.feedbackLoopEnabled).toBe(false);
});
});
});

View File

@@ -264,24 +264,32 @@ export function renderReplyPayloadsToMessages(
return out;
}
async function buildActivity(
import { AI_GENERATED_ENTITY } from "./ai-entity.js";
export async function buildActivity(
msg: MSTeamsRenderedMessage,
conversationRef: StoredConversationReference,
tokenProvider?: MSTeamsAccessTokenProvider,
sharePointSiteId?: string,
mediaMaxBytes?: number,
options?: { feedbackLoopEnabled?: boolean },
): Promise<Record<string, unknown>> {
const activity: Record<string, unknown> = { type: "message" };
// Mark as AI-generated so Teams renders the "AI generated" badge.
activity.channelData = {
feedbackLoopEnabled: options?.feedbackLoopEnabled ?? false,
};
if (msg.text) {
// Parse mentions from text (format: @[Name](id))
const { text: formattedText, entities } = parseMentions(msg.text);
activity.text = formattedText;
// Add mention entities if any mentions were found
if (entities.length > 0) {
activity.entities = entities;
}
// Start with mention entities (if any) + AI-generated entity
activity.entities = [...(entities.length > 0 ? entities : []), AI_GENERATED_ENTITY];
} else {
activity.entities = [AI_GENERATED_ENTITY];
}
if (msg.mediaUrl) {
@@ -401,6 +409,8 @@ export async function sendMSTeamsMessages(params: {
sharePointSiteId?: string;
/** Max media size in bytes. Default: 100MB. */
mediaMaxBytes?: number;
/** Enable the Teams feedback loop (thumbs up/down) on sent messages. */
feedbackLoopEnabled?: boolean;
}): Promise<string[]> {
const messages = params.messages.filter(
(m) => (m.text && m.text.trim().length > 0) || m.mediaUrl,
@@ -461,6 +471,7 @@ export async function sendMSTeamsMessages(params: {
params.tokenProvider,
params.sharePointSiteId,
params.mediaMaxBytes,
{ feedbackLoopEnabled: params.feedbackLoopEnabled },
),
),
{ messageIndex, messageCount: messages.length },

View File

@@ -1,5 +1,6 @@
import type { OpenClawConfig, RuntimeEnv } from "../runtime-api.js";
import type { MSTeamsConversationStore } from "./conversation-store.js";
import { buildFeedbackEvent, runFeedbackReflection } from "./feedback-reflection.js";
import { buildFileInfoCard, parseFileConsentInvoke, uploadToConsentUrl } from "./file-consent.js";
import { normalizeMSTeamsConversationId } from "./inbound.js";
import type { MSTeamsAdapter } from "./messenger.js";
@@ -8,7 +9,9 @@ import type { MSTeamsMonitorLogger } from "./monitor-types.js";
import { getPendingUpload, removePendingUpload } from "./pending-uploads.js";
import type { MSTeamsPollStore } from "./polls.js";
import { withRevokedProxyFallback } from "./revoked-context.js";
import { getMSTeamsRuntime } from "./runtime.js";
import type { MSTeamsTurnContext } from "./sdk-types.js";
import { buildGroupWelcomeText, buildWelcomeCard } from "./welcome-card.js";
export type MSTeamsAccessTokenProvider = {
getAccessToken: (scope: string) => Promise<string>;
@@ -137,6 +140,161 @@ async function handleFileConsentInvoke(
return true;
}
/**
* Parse and handle feedback invoke activities (thumbs up/down).
* Returns true if the activity was a feedback invoke, false otherwise.
*/
async function handleFeedbackInvoke(
context: MSTeamsTurnContext,
deps: MSTeamsMessageHandlerDeps,
): Promise<boolean> {
const activity = context.activity;
const value = activity.value as
| {
actionName?: string;
actionValue?: { reaction?: string; feedback?: string };
replyToId?: string;
}
| undefined;
if (!value) {
return false;
}
// Teams feedback invoke format: actionName="feedback", actionValue.reaction="like"|"dislike"
if (value.actionName !== "feedback") {
return false;
}
const reaction = value.actionValue?.reaction;
if (reaction !== "like" && reaction !== "dislike") {
deps.log.debug?.("ignoring feedback with unknown reaction", { reaction });
return false;
}
const msteamsCfg = deps.cfg.channels?.msteams;
if (msteamsCfg?.feedbackEnabled === false) {
deps.log.debug?.("feedback handling disabled");
return true; // Still consume the invoke
}
// Extract user comment from the nested JSON string
let userComment: string | undefined;
if (value.actionValue?.feedback) {
try {
const parsed = JSON.parse(value.actionValue.feedback) as { feedbackText?: string };
userComment = parsed.feedbackText || undefined;
} catch {
// Best effort — feedback text is optional
}
}
// Strip ;messageid=... suffix to match the normalized ID used by the message handler.
const conversationId = normalizeMSTeamsConversationId(activity.conversation?.id ?? "unknown");
const senderId = activity.from?.aadObjectId ?? activity.from?.id ?? "unknown";
const messageId = value.replyToId ?? activity.replyToId ?? "unknown";
const isNegative = reaction === "dislike";
// Route feedback using the same chat-type logic as normal messages
// so session keys, agent IDs, and transcript paths match.
const convType = activity.conversation?.conversationType?.toLowerCase();
const isDirectMessage = convType === "personal" || (!convType && !activity.conversation?.isGroup);
const isChannel = convType === "channel";
const core = getMSTeamsRuntime();
const route = core.channel.routing.resolveAgentRoute({
cfg: deps.cfg,
channel: "msteams",
peer: {
kind: isDirectMessage ? "direct" : isChannel ? "channel" : "group",
id: isDirectMessage ? senderId : conversationId,
},
});
// Log feedback event to session JSONL
const feedbackEvent = buildFeedbackEvent({
messageId,
value: isNegative ? "negative" : "positive",
comment: userComment,
sessionKey: route.sessionKey,
agentId: route.agentId,
conversationId,
});
deps.log.info("received feedback", {
value: feedbackEvent.value,
messageId,
conversationId,
hasComment: Boolean(userComment),
});
// Write feedback event to session transcript
try {
const storePath = core.channel.session.resolveStorePath(deps.cfg.session?.store, {
agentId: route.agentId,
});
const fs = await import("node:fs/promises");
const pathMod = await import("node:path");
const safeKey = route.sessionKey.replace(/[^a-zA-Z0-9_-]/g, "_");
const transcriptFile = pathMod.join(storePath, `${safeKey}.jsonl`);
await fs.appendFile(transcriptFile, JSON.stringify(feedbackEvent) + "\n", "utf-8").catch(() => {
// Best effort — transcript dir may not exist yet
});
} catch {
// Best effort
}
// Build conversation reference for proactive messages (ack + reflection follow-up)
const conversationRef = {
activityId: activity.id,
user: {
id: activity.from?.id,
name: activity.from?.name,
aadObjectId: activity.from?.aadObjectId,
},
agent: activity.recipient
? { id: activity.recipient.id, name: activity.recipient.name }
: undefined,
bot: activity.recipient
? { id: activity.recipient.id, name: activity.recipient.name }
: undefined,
conversation: {
id: conversationId,
conversationType: activity.conversation?.conversationType,
tenantId: activity.conversation?.tenantId,
},
channelId: activity.channelId ?? "msteams",
serviceUrl: activity.serviceUrl,
locale: activity.locale,
};
// For negative feedback, trigger background reflection (fire-and-forget).
// No ack message — the reflection follow-up serves as the acknowledgement.
// Sending anything during the invoke handler causes "unable to reach app" errors.
if (isNegative && msteamsCfg?.feedbackReflection !== false) {
// Note: thumbedDownResponse is not populated here because we don't cache
// sent message text. The agent still has full session context for reflection
// since the reflection runs in the same session. The user comment (if any)
// provides additional signal.
runFeedbackReflection({
cfg: deps.cfg,
adapter: deps.adapter,
appId: deps.appId,
conversationRef,
sessionKey: route.sessionKey,
agentId: route.agentId,
conversationId,
feedbackMessageId: messageId,
userComment,
log: deps.log,
}).catch((err) => {
deps.log.error("feedback reflection failed", { error: String(err) });
});
}
return true;
}
export function registerMSTeamsHandlers<T extends MSTeamsActivityHandler>(
handler: T,
deps: MSTeamsMessageHandlerDeps,
@@ -168,6 +326,18 @@ export function registerMSTeamsHandlers<T extends MSTeamsActivityHandler>(
}
return;
}
// Handle feedback invokes (thumbs up/down on AI-generated messages).
// Just return after handling — the process() handler sends HTTP 200 automatically.
// Do NOT call sendActivity with invokeResponse; our custom adapter would POST
// a new activity to Bot Framework instead of responding to the HTTP request.
if (ctx.activity?.type === "invoke" && ctx.activity?.name === "message/submitAction") {
const handled = await handleFeedbackInvoke(ctx, deps);
if (handled) {
return;
}
}
return originalRun.call(handler, context);
};
}
@@ -182,11 +352,51 @@ export function registerMSTeamsHandlers<T extends MSTeamsActivityHandler>(
});
handler.onMembersAdded(async (context, next) => {
const membersAdded = (context as MSTeamsTurnContext).activity?.membersAdded ?? [];
const ctx = context as MSTeamsTurnContext;
const membersAdded = ctx.activity?.membersAdded ?? [];
const botId = ctx.activity?.recipient?.id;
const msteamsCfg = deps.cfg.channels?.msteams;
for (const member of membersAdded) {
if (member.id !== (context as MSTeamsTurnContext).activity?.recipient?.id) {
if (member.id === botId) {
// Bot was added to a conversation — send welcome card if configured.
const conversationType =
ctx.activity?.conversation?.conversationType?.toLowerCase() ?? "personal";
const isPersonal = conversationType === "personal";
if (isPersonal && msteamsCfg?.welcomeCard !== false) {
const botName = ctx.activity?.recipient?.name ?? undefined;
const card = buildWelcomeCard({
botName,
promptStarters: msteamsCfg?.promptStarters,
});
try {
await ctx.sendActivity({
type: "message",
attachments: [
{
contentType: "application/vnd.microsoft.card.adaptive",
content: card,
},
],
});
deps.log.info("sent welcome card");
} catch (err) {
deps.log.debug?.("failed to send welcome card", { error: String(err) });
}
} else if (!isPersonal && msteamsCfg?.groupWelcomeCard === true) {
const botName = ctx.activity?.recipient?.name ?? undefined;
try {
await ctx.sendActivity(buildGroupWelcomeText(botName));
deps.log.info("sent group welcome message");
} catch (err) {
deps.log.debug?.("failed to send group welcome", { error: String(err) });
}
} else {
deps.log.debug?.("skipping welcome (disabled by config or conversation type)");
}
} else {
deps.log.debug?.("member added", { member: member.id });
// Don't send welcome message - let the user initiate conversation.
}
}
await next();

View File

@@ -324,6 +324,11 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) {
return;
}
// Extract clientInfo entity (Teams sends this on every activity with timezone, locale, etc.)
const clientInfo = activity.entities?.find((e) => e.type === "clientInfo") as
| { timezone?: string; locale?: string; country?: string; platform?: string }
| undefined;
// Build conversation reference for proactive replies.
const agent = activity.recipient;
const conversationRef: StoredConversationReference = {
@@ -340,6 +345,8 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) {
channelId: activity.channelId,
serviceUrl: activity.serviceUrl,
locale: activity.locale,
// Only set timezone if present (preserve previously stored value on next upsert)
...(clientInfo?.timezone ? { timezone: clientInfo.timezone } : {}),
};
conversationStore.upsert(conversationId, conversationRef).catch((err) => {
log.debug?.("failed to save conversation reference", {
@@ -575,10 +582,26 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) {
sharePointSiteId,
});
// Use Teams clientInfo timezone if no explicit userTimezone is configured.
// This ensures the agent knows the sender's timezone for time-aware responses
// and proactive sends within the same session.
// Apply Teams clientInfo timezone if no explicit userTimezone is configured.
const senderTimezone = clientInfo?.timezone || conversationRef.timezone;
const effectiveCfg =
senderTimezone && !cfg.agents?.defaults?.userTimezone
? {
...cfg,
agents: {
...cfg.agents,
defaults: { ...cfg.agents?.defaults, userTimezone: senderTimezone },
},
}
: cfg;
log.info("dispatching to agent", { sessionKey: route.sessionKey });
try {
const { queuedFinal, counts } = await dispatchReplyFromConfigWithSettledDispatcher({
cfg,
cfg: effectiveCfg,
ctxPayload,
dispatcher,
onSettled: () => markDispatchIdle(),

View File

@@ -113,6 +113,12 @@ vi.mock("./resolve-allowlist.js", () => ({
vi.mock("./sdk.js", () => ({
createMSTeamsAdapter: () => createMSTeamsAdapter(),
loadMSTeamsSdkWithAuth: () => loadMSTeamsSdkWithAuth(),
createMSTeamsTokenProvider: () => ({
getAccessToken: vi.fn().mockResolvedValue("mock-token"),
}),
createBotFrameworkJwtValidator: vi.fn().mockResolvedValue({
validate: vi.fn().mockResolvedValue(true),
}),
}));
vi.mock("./runtime.js", () => ({

View File

@@ -18,7 +18,12 @@ import {
resolveMSTeamsUserAllowlist,
} from "./resolve-allowlist.js";
import { getMSTeamsRuntime } from "./runtime.js";
import { createMSTeamsAdapter, loadMSTeamsSdkWithAuth } from "./sdk.js";
import {
createBotFrameworkJwtValidator,
createMSTeamsAdapter,
createMSTeamsTokenProvider,
loadMSTeamsSdkWithAuth,
} from "./sdk.js";
import { resolveMSTeamsCredentials } from "./token.js";
import {
applyMSTeamsWebhookTimeouts,
@@ -224,14 +229,16 @@ export async function monitorMSTeamsProvider(
// Dynamic import to avoid loading SDK when provider is disabled
const express = await import("express");
const { sdk, authConfig } = await loadMSTeamsSdkWithAuth(creds);
const { ActivityHandler, MsalTokenProvider, authorizeJWT } = sdk;
const { sdk, app } = await loadMSTeamsSdkWithAuth(creds);
// Auth configuration - create early so adapter is available for deliverReplies
const tokenProvider = new MsalTokenProvider(authConfig);
const adapter = createMSTeamsAdapter(authConfig, sdk);
// Build a token provider adapter for Graph API operations
const tokenProvider = createMSTeamsTokenProvider(app);
const handler = registerMSTeamsHandlers(new ActivityHandler() as MSTeamsActivityHandler, {
const adapter = createMSTeamsAdapter(app, sdk);
// Build a simple ActivityHandler-compatible object
const handler = buildActivityHandler();
registerMSTeamsHandlers(handler, {
cfg,
runtime,
appId,
@@ -246,7 +253,19 @@ export async function monitorMSTeamsProvider(
// Create Express server
const expressApp = express.default();
expressApp.use(authorizeJWT(authConfig));
// Cheap pre-parse auth gate: reject requests without a Bearer token before
// spending CPU/memory on JSON body parsing. This prevents unauthenticated
// request floods from forcing body parsing on internet-exposed webhooks.
expressApp.use((req: Request, res: Response, next: (err?: unknown) => void) => {
const auth = req.headers.authorization;
if (!auth || !auth.startsWith("Bearer ")) {
res.status(401).json({ error: "Unauthorized" });
return;
}
next();
});
expressApp.use(express.json({ limit: MSTEAMS_WEBHOOK_MAX_BODY_BYTES }));
expressApp.use((err: unknown, _req: Request, res: Response, next: (err?: unknown) => void) => {
if (err && typeof err === "object" && "status" in err && err.status === 413) {
@@ -256,6 +275,29 @@ export async function monitorMSTeamsProvider(
next(err);
});
// JWT validation — verify Bot Framework tokens using the Teams SDK's
// JwtValidator (validates signature via JWKS, audience, issuer, expiration).
const jwtValidator = await createBotFrameworkJwtValidator(creds);
expressApp.use((req: Request, res: Response, next: (err?: unknown) => void) => {
// Authorization header is guaranteed by the pre-parse auth gate above.
const authHeader = req.headers.authorization!;
const serviceUrl = (req.body as Record<string, unknown>)?.serviceUrl as string | undefined;
jwtValidator
.validate(authHeader, serviceUrl)
.then((valid) => {
if (!valid) {
log.debug?.("JWT validation failed");
res.status(401).json({ error: "Unauthorized" });
return;
}
next();
})
.catch((err) => {
log.debug?.(`JWT validation error: ${String(err)}`);
res.status(401).json({ error: "Unauthorized" });
});
});
// Set up the messages endpoint - use configured path and /api/messages as fallback
const configuredPath = msteamsCfg.webhook?.path ?? "/api/messages";
const messageHandler = (req: Request, res: Response) => {
@@ -320,3 +362,65 @@ export async function monitorMSTeamsProvider(
return { app: expressApp, shutdown };
}
/**
* Build a minimal ActivityHandler-compatible object that supports
* onMessage / onMembersAdded registration and a run() method.
*/
function buildActivityHandler(): MSTeamsActivityHandler {
type Handler = (context: unknown, next: () => Promise<void>) => Promise<void>;
const messageHandlers: Handler[] = [];
const membersAddedHandlers: Handler[] = [];
const reactionsAddedHandlers: Handler[] = [];
const reactionsRemovedHandlers: Handler[] = [];
const handler: MSTeamsActivityHandler = {
onMessage(cb) {
messageHandlers.push(cb);
return handler;
},
onMembersAdded(cb) {
membersAddedHandlers.push(cb);
return handler;
},
onReactionsAdded(cb) {
reactionsAddedHandlers.push(cb);
return handler;
},
onReactionsRemoved(cb) {
reactionsRemovedHandlers.push(cb);
return handler;
},
async run(context: unknown) {
const ctx = context as { activity?: { type?: string } };
const activityType = ctx?.activity?.type;
const noop = async () => {};
if (activityType === "message") {
for (const h of messageHandlers) {
await h(context, noop);
}
} else if (activityType === "conversationUpdate") {
for (const h of membersAddedHandlers) {
await h(context, noop);
}
} else if (activityType === "messageReaction") {
const activity = (
ctx as { activity?: { reactionsAdded?: unknown[]; reactionsRemoved?: unknown[] } }
)?.activity;
if (activity?.reactionsAdded?.length) {
for (const h of reactionsAddedHandlers) {
await h(context, noop);
}
}
if (activity?.reactionsRemoved?.length) {
for (const h of reactionsRemovedHandlers) {
await h(context, noop);
}
}
}
},
};
return handler;
}

View File

@@ -5,18 +5,27 @@ const hostMockState = vi.hoisted(() => ({
tokenError: null as Error | null,
}));
vi.mock("@microsoft/agents-hosting", () => ({
getAuthConfigWithDefaults: (cfg: unknown) => cfg,
MsalTokenProvider: class {
async getAccessToken() {
vi.mock("@microsoft/teams.apps", () => ({
App: class {
protected async getBotToken() {
if (hostMockState.tokenError) {
throw hostMockState.tokenError;
}
return "token";
return { value: "token" };
}
protected async getAppGraphToken() {
if (hostMockState.tokenError) {
throw hostMockState.tokenError;
}
return { value: "token" };
}
},
}));
vi.mock("@microsoft/teams.api", () => ({
Client: class {},
}));
import { probeMSTeams } from "./probe.js";
describe("msteams probe", () => {

View File

@@ -4,7 +4,7 @@ import {
type MSTeamsConfig,
} from "../runtime-api.js";
import { formatUnknownError } from "./errors.js";
import { loadMSTeamsSdkWithAuth } from "./sdk.js";
import { createMSTeamsTokenProvider, loadMSTeamsSdkWithAuth } from "./sdk.js";
import { readAccessToken } from "./token-response.js";
import { resolveMSTeamsCredentials } from "./token.js";
@@ -64,9 +64,13 @@ export async function probeMSTeams(cfg?: MSTeamsConfig): Promise<ProbeMSTeamsRes
}
try {
const { sdk, authConfig } = await loadMSTeamsSdkWithAuth(creds);
const tokenProvider = new sdk.MsalTokenProvider(authConfig);
await tokenProvider.getAccessToken("https://api.botframework.com");
const { app } = await loadMSTeamsSdkWithAuth(creds);
const tokenProvider = createMSTeamsTokenProvider(app);
const botTokenValue = await tokenProvider.getAccessToken("https://api.botframework.com");
if (!botTokenValue) {
throw new Error("Failed to acquire bot token");
}
let graph:
| {
ok: boolean;
@@ -76,8 +80,8 @@ export async function probeMSTeams(cfg?: MSTeamsConfig): Promise<ProbeMSTeamsRes
}
| undefined;
try {
const graphToken = await tokenProvider.getAccessToken("https://graph.microsoft.com");
const accessToken = readAccessToken(graphToken);
const graphTokenValue = await tokenProvider.getAccessToken("https://graph.microsoft.com");
const accessToken = readAccessToken(graphTokenValue);
const payload = accessToken ? decodeJwtPayload(accessToken) : null;
graph = {
ok: true,

View File

@@ -24,6 +24,7 @@ import type { MSTeamsMonitorLogger } from "./monitor-types.js";
import { withRevokedProxyFallback } from "./revoked-context.js";
import { getMSTeamsRuntime } from "./runtime.js";
import type { MSTeamsTurnContext } from "./sdk-types.js";
import { TeamsHttpStream } from "./streaming-message.js";
export function createMSTeamsReplyDispatcher(params: {
cfg: OpenClawConfig;
@@ -45,33 +46,46 @@ export function createMSTeamsReplyDispatcher(params: {
}) {
const core = getMSTeamsRuntime();
// Determine conversation type to decide typing vs streaming behavior:
// - personal (1:1): typing bubble + streaming (typing shows immediately,
// streaming takes over once tokens arrive)
// - groupChat: typing bubble only, no streaming
// - channel: neither (Teams doesn't support typing or streaming in channels)
const conversationType = params.conversationRef.conversation?.conversationType?.toLowerCase();
const isPersonal = conversationType === "personal";
const isGroupChat = conversationType === "groupchat";
const isChannel = conversationType === "channel";
/**
* Send a typing indicator.
*
* First tries the live turn context (cheapest path). When the context has
* been revoked (debounced messages) we fall back to proactive messaging via
* the stored conversation reference so the user still sees the "…" bubble.
* Sent for personal and group chats so users see immediate feedback.
* Channels don't support typing indicators.
*/
const sendTypingIndicator = async () => {
await withRevokedProxyFallback({
run: async () => {
await params.context.sendActivity({ type: "typing" });
},
onRevoked: async () => {
const baseRef = buildConversationReference(params.conversationRef);
await params.adapter.continueConversation(
params.appId,
{ ...baseRef, activityId: undefined },
async (ctx) => {
await ctx.sendActivity({ type: "typing" });
},
);
},
onRevokedLog: () => {
params.log.debug?.("turn context revoked, sending typing via proactive messaging");
},
});
};
const sendTypingIndicator =
isPersonal || isGroupChat
? async () => {
await withRevokedProxyFallback({
run: async () => {
await params.context.sendActivity({ type: "typing" });
},
onRevoked: async () => {
const baseRef = buildConversationReference(params.conversationRef);
await params.adapter.continueConversation(
params.appId,
{ ...baseRef, activityId: undefined },
async (ctx) => {
await ctx.sendActivity({ type: "typing" });
},
);
},
onRevokedLog: () => {
params.log.debug?.("turn context revoked, sending typing via proactive messaging");
},
});
}
: async () => {
// No-op for channels (not supported)
};
const { onModelSelected, typingCallbacks, ...replyPipeline } = createChannelReplyPipeline({
cfg: params.cfg,
@@ -99,12 +113,26 @@ export function createMSTeamsReplyDispatcher(params: {
cfg: params.cfg,
resolveChannelLimitMb: ({ cfg }) => cfg.channels?.msteams?.mediaMaxMb,
});
const feedbackLoopEnabled = params.cfg.channels?.msteams?.feedbackEnabled !== false;
// Streaming for personal (1:1) chats using the Teams streaminfo protocol.
let stream: TeamsHttpStream | undefined;
// Track whether onPartialReply was ever called — if so, the stream
// owns the text delivery and deliver should skip text payloads.
let streamReceivedTokens = false;
if (isPersonal) {
stream = new TeamsHttpStream({
sendActivity: (activity) => params.context.sendActivity(activity),
feedbackLoopEnabled,
onError: (err) => {
params.log.debug?.(`stream error: ${err instanceof Error ? err.message : String(err)}`);
},
});
}
// Accumulate rendered messages from all deliver() calls so the entire turn's
// reply is sent in a single sendMSTeamsMessages() call. This avoids Teams
// silently dropping blocks 2+ when each deliver() opened its own independent
// continueConversation() call — only the first proactive send per turn context
// window succeeds. (#29379)
// reply is sent in a single sendMSTeamsMessages() call. (#29379)
const pendingMessages: MSTeamsRenderedMessage[] = [];
const sendMessages = async (messages: MSTeamsRenderedMessage[]): Promise<string[]> => {
@@ -115,7 +143,6 @@ export function createMSTeamsReplyDispatcher(params: {
conversationRef: params.conversationRef,
context: params.context,
messages,
// Enable default retry/backoff for throttling/transient failures.
retry: {},
onRetry: (event) => {
params.log.debug?.("retrying send", {
@@ -126,6 +153,7 @@ export function createMSTeamsReplyDispatcher(params: {
tokenProvider: params.tokenProvider,
sharePointSiteId: params.sharePointSiteId,
mediaMaxBytes,
feedbackLoopEnabled,
});
};
@@ -133,16 +161,12 @@ export function createMSTeamsReplyDispatcher(params: {
if (pendingMessages.length === 0) {
return;
}
// Copy the buffer before draining so we have a reference for per-message
// retry if the batch send fails.
const toSend = pendingMessages.splice(0);
const total = toSend.length;
let ids: string[];
try {
ids = await sendMessages(toSend);
} catch {
// Batch send failed (e.g. bad attachment on one message); retry each
// message individually so trailing blocks are not silently lost.
ids = [];
let failed = 0;
for (const msg of toSend) {
@@ -175,6 +199,18 @@ export function createMSTeamsReplyDispatcher(params: {
humanDelay: core.channel.reply.resolveHumanDelayConfig(params.cfg, params.agentId),
typingCallbacks,
deliver: async (payload) => {
// When streaming received tokens AND hasn't failed, skip text delivery —
// finalize() handles the final message. If streaming failed (>4000 chars),
// fall through so deliver sends the complete response.
// For payloads with media, strip the text and send media only.
if (stream && streamReceivedTokens && stream.hasContent) {
const hasMedia = Boolean(payload.mediaUrl || payload.mediaUrls?.length);
if (!hasMedia) {
return;
}
payload = { ...payload, text: undefined };
}
// Render the payload to messages and accumulate them. All messages from
// this turn are flushed together in markDispatchIdle() so they go out
// in a single continueConversation() call.
@@ -203,8 +239,7 @@ export function createMSTeamsReplyDispatcher(params: {
},
});
// Wrap markDispatchIdle to flush all accumulated messages before signalling idle.
// Returns a promise so callers (e.g. onSettled) can await completion.
// Wrap markDispatchIdle to flush accumulated messages and finalize stream.
const markDispatchIdle = (): Promise<void> => {
return flushPendingMessages()
.catch((err) => {
@@ -218,14 +253,36 @@ export function createMSTeamsReplyDispatcher(params: {
hint,
});
})
.then(() => {
if (stream) {
return stream.finalize().catch((err) => {
params.log.debug?.("stream finalize failed", { error: String(err) });
});
}
})
.finally(() => {
baseMarkDispatchIdle();
});
};
// Build reply options with onPartialReply for streaming.
// Send the informative update on the first token (not eagerly at stream creation)
// so it only appears when the LLM is actually generating text — not when the
// agent uses a tool (e.g. sends an adaptive card) without streaming.
const streamingReplyOptions = stream
? {
onPartialReply: (payload: { text?: string }) => {
if (payload.text) {
streamReceivedTokens = true;
stream!.update(payload.text);
}
},
}
: {};
return {
dispatcher,
replyOptions: { ...replyOptions, onModelSelected },
replyOptions: { ...replyOptions, ...streamingReplyOptions, onModelSelected },
markDispatchIdle,
};
}

View File

@@ -1,5 +1,3 @@
import type { TurnContext } from "@microsoft/agents-hosting";
/**
* Minimal public surface we depend on from the Microsoft SDK types.
*
@@ -8,7 +6,47 @@ import type { TurnContext } from "@microsoft/agents-hosting";
* stricter than what the runtime accepts (e.g. it allows plain activity-like
* objects), so we model the minimal structural shape we rely on.
*/
export type MSTeamsActivity = TurnContext["activity"];
export type MSTeamsActivity = {
type: string;
id?: string;
timestamp?: string;
localTimestamp?: string;
channelId?: string;
from?: { id?: string; name?: string; aadObjectId?: string; role?: string };
conversation?: {
id?: string;
conversationType?: string;
tenantId?: string;
name?: string;
isGroup?: boolean;
};
recipient?: { id?: string; name?: string };
text?: string;
textFormat?: string;
locale?: string;
serviceUrl?: string;
channelData?: {
team?: { id?: string; name?: string };
channel?: { id?: string; name?: string };
tenant?: { id?: string };
[key: string]: unknown;
};
attachments?: Array<{
contentType?: string;
contentUrl?: string;
content?: unknown;
name?: string;
thumbnailUrl?: string;
}>;
entities?: Array<Record<string, unknown>>;
value?: unknown;
name?: string;
membersAdded?: Array<{ id?: string; name?: string }>;
membersRemoved?: Array<{ id?: string; name?: string }>;
replyToId?: string;
[key: string]: unknown;
};
export type MSTeamsTurnContext = {
activity: MSTeamsActivity;

View File

@@ -1,33 +1,377 @@
import type { MSTeamsAdapter } from "./messenger.js";
import type { MSTeamsCredentials } from "./token.js";
import { buildUserAgent } from "./user-agent.js";
export type MSTeamsSdk = typeof import("@microsoft/agents-hosting");
export type MSTeamsAuthConfig = ReturnType<MSTeamsSdk["getAuthConfigWithDefaults"]>;
/**
* Resolved Teams SDK modules loaded lazily to avoid importing when the
* provider is disabled.
*/
export type MSTeamsTeamsSdk = {
App: typeof import("@microsoft/teams.apps").App;
Client: typeof import("@microsoft/teams.api").Client;
};
export async function loadMSTeamsSdk(): Promise<MSTeamsSdk> {
return await import("@microsoft/agents-hosting");
/**
* A Teams SDK App instance used for token management and proactive messaging.
*/
export type MSTeamsApp = InstanceType<MSTeamsTeamsSdk["App"]>;
/**
* Token provider compatible with the existing codebase, wrapping the Teams
* SDK App's token methods.
*/
export type MSTeamsTokenProvider = {
getAccessToken: (scope: string) => Promise<string>;
};
export async function loadMSTeamsSdk(): Promise<MSTeamsTeamsSdk> {
const [appsModule, apiModule] = await Promise.all([
import("@microsoft/teams.apps"),
import("@microsoft/teams.api"),
]);
return {
App: appsModule.App,
Client: apiModule.Client,
};
}
export function buildMSTeamsAuthConfig(
creds: MSTeamsCredentials,
sdk: MSTeamsSdk,
): MSTeamsAuthConfig {
return sdk.getAuthConfigWithDefaults({
/**
* Create a Teams SDK App instance from credentials. The App manages token
* acquisition, JWT validation, and the HTTP server lifecycle.
*
* This replaces the previous CloudAdapter + MsalTokenProvider + authorizeJWT
* from @microsoft/agents-hosting.
*/
export function createMSTeamsApp(creds: MSTeamsCredentials, sdk: MSTeamsTeamsSdk): MSTeamsApp {
return new sdk.App({
clientId: creds.appId,
clientSecret: creds.appPassword,
tenantId: creds.tenantId,
});
}
export function createMSTeamsAdapter(
authConfig: MSTeamsAuthConfig,
sdk: MSTeamsSdk,
): MSTeamsAdapter {
return new sdk.CloudAdapter(authConfig) as unknown as MSTeamsAdapter;
/**
* Build a token provider that uses the Teams SDK App for token acquisition.
*/
export function createMSTeamsTokenProvider(app: MSTeamsApp): MSTeamsTokenProvider {
return {
async getAccessToken(scope: string): Promise<string> {
if (scope.includes("graph.microsoft.com")) {
const token = await (
app as unknown as { getAppGraphToken(): Promise<{ toString(): string } | null> }
).getAppGraphToken();
return token ? String(token) : "";
}
const token = await (
app as unknown as { getBotToken(): Promise<{ toString(): string } | null> }
).getBotToken();
return token ? String(token) : "";
},
};
}
/**
* Update an existing activity via the Bot Framework REST API.
* PUT /v3/conversations/{conversationId}/activities/{activityId}
*/
async function updateActivityViaRest(params: {
serviceUrl: string;
conversationId: string;
activityId: string;
activity: Record<string, unknown>;
token?: string;
}): Promise<unknown> {
const { serviceUrl, conversationId, activityId, activity, token } = params;
const baseUrl = serviceUrl.replace(/\/+$/, "");
const url = `${baseUrl}/v3/conversations/${encodeURIComponent(conversationId)}/activities/${encodeURIComponent(activityId)}`;
const headers: Record<string, string> = {
"Content-Type": "application/json",
"User-Agent": buildUserAgent(),
};
if (token) {
headers.Authorization = `Bearer ${token}`;
}
const response = await fetch(url, {
method: "PUT",
headers,
body: JSON.stringify({
type: "message",
...activity,
id: activityId,
}),
});
if (!response.ok) {
const body = await response.text().catch(() => "");
throw Object.assign(new Error(`updateActivity failed: HTTP ${response.status} ${body}`), {
statusCode: response.status,
});
}
return await response.json().catch(() => ({ id: activityId }));
}
/**
* Build a CloudAdapter-compatible adapter using the Teams SDK REST client.
*
* This replaces the previous CloudAdapter from @microsoft/agents-hosting.
* For incoming requests: the App's HttpPlugin handles JWT validation.
* For proactive sends: uses the Bot Framework REST API via
* @microsoft/teams.api Client.
*/
export function createMSTeamsAdapter(app: MSTeamsApp, sdk: MSTeamsTeamsSdk): MSTeamsAdapter {
return {
async continueConversation(_appId, reference, logic) {
const serviceUrl = reference.serviceUrl;
if (!serviceUrl) {
throw new Error("Missing serviceUrl in conversation reference");
}
const conversationId = reference.conversation?.id;
if (!conversationId) {
throw new Error("Missing conversation.id in conversation reference");
}
// Fetch a fresh token for each call via a token factory.
// The SDK's App manages token caching/refresh internally.
const getToken = async () => {
const token = await (
app as unknown as { getBotToken(): Promise<{ toString(): string } | null> }
).getBotToken();
return token ? String(token) : undefined;
};
// Build a send context that uses the Bot Framework REST API.
// Pass a token factory (not a cached value) so each request gets a fresh token.
const apiClient = new sdk.Client(serviceUrl, {
token: async () => (await getToken()) || undefined,
headers: { "User-Agent": buildUserAgent() },
} as Record<string, unknown>);
const sendContext = {
async sendActivity(textOrActivity: string | object): Promise<unknown> {
const activity =
typeof textOrActivity === "string"
? ({ type: "message", text: textOrActivity } as Record<string, unknown>)
: (textOrActivity as Record<string, unknown>);
const response = await apiClient.conversations.activities(conversationId).create({
type: "message",
...activity,
from: reference.agent
? { id: reference.agent.id, name: reference.agent.name ?? "", role: "bot" }
: undefined,
conversation: {
id: conversationId,
conversationType: reference.conversation?.conversationType ?? "personal",
},
} as Parameters<
typeof apiClient.conversations.activities extends (id: string) => {
create: (a: infer T) => unknown;
}
? never
: never
>[0]);
return response;
},
async updateActivity(
activityUpdate: { id: string } & Record<string, unknown>,
): Promise<unknown> {
const activityId = activityUpdate.id;
if (!activityId) {
throw new Error("updateActivity requires an activity id");
}
// Bot Framework REST API: PUT /v3/conversations/{conversationId}/activities/{activityId}
return await updateActivityViaRest({
serviceUrl,
conversationId,
activityId,
activity: activityUpdate,
token: await getToken(),
});
},
};
await logic(sendContext);
},
async process(req, res, logic) {
const request = req as { body?: Record<string, unknown> };
const response = res as {
status: (code: number) => { send: (body?: unknown) => void };
};
const activity = request.body;
const isInvoke = (activity as Record<string, unknown>)?.type === "invoke";
try {
const serviceUrl = activity?.serviceUrl as string | undefined;
// Token factory — fetches a fresh token for each API call.
const getToken = async () => {
const token = await (
app as unknown as { getBotToken(): Promise<{ toString(): string } | null> }
).getBotToken();
return token ? String(token) : undefined;
};
const context = {
activity,
async sendActivity(textOrActivity: string | object): Promise<unknown> {
const msg =
typeof textOrActivity === "string"
? ({ type: "message", text: textOrActivity } as Record<string, unknown>)
: (textOrActivity as Record<string, unknown>);
// invokeResponse is handled by the HTTP response from process(),
// not by posting a new activity to Bot Framework.
if (msg.type === "invokeResponse") {
return { id: "invokeResponse" };
}
if (!serviceUrl) {
return { id: "unknown" };
}
const convId = (activity?.conversation as Record<string, unknown>)?.id as
| string
| undefined;
if (!convId) {
return { id: "unknown" };
}
const apiClient = new sdk.Client(serviceUrl, {
token: async () => (await getToken()) || undefined,
headers: { "User-Agent": buildUserAgent() },
} as Record<string, unknown>);
const botId = (activity?.recipient as Record<string, unknown>)?.id as
| string
| undefined;
const botName = (activity?.recipient as Record<string, unknown>)?.name as
| string
| undefined;
const convType = (activity?.conversation as Record<string, unknown>)
?.conversationType as string | undefined;
// Preserve replyToId for threaded replies (replyStyle: "thread")
const inboundActivityId = (activity as Record<string, unknown>)?.id as
| string
| undefined;
return await apiClient.conversations.activities(convId).create({
type: "message",
...msg,
from: botId ? { id: botId, name: botName ?? "", role: "bot" } : undefined,
conversation: { id: convId, conversationType: convType ?? "personal" },
...(inboundActivityId && !msg.replyToId ? { replyToId: inboundActivityId } : {}),
} as Parameters<
typeof apiClient.conversations.activities extends (id: string) => {
create: (a: infer T) => unknown;
}
? never
: never
>[0]);
},
async sendActivities(
activities: Array<{ type: string } & Record<string, unknown>>,
): Promise<unknown> {
const results = [];
for (const act of activities) {
results.push(await context.sendActivity(act));
}
return results;
},
async updateActivity(
activityUpdate: { id: string } & Record<string, unknown>,
): Promise<unknown> {
const activityId = activityUpdate.id;
if (!activityId || !serviceUrl) {
return { id: "unknown" };
}
const convId = (activity?.conversation as Record<string, unknown>)?.id as
| string
| undefined;
if (!convId) {
return { id: "unknown" };
}
return await updateActivityViaRest({
serviceUrl,
conversationId: convId,
activityId,
activity: activityUpdate,
token: await getToken(),
});
},
};
// For invoke activities, send HTTP 200 immediately before running
// handler logic so slow operations (file uploads, reflections) don't
// hit Teams invoke timeouts ("unable to reach app").
if (isInvoke) {
response.status(200).send();
}
await logic(context);
if (!isInvoke) {
response.status(200).send();
}
} catch (err) {
if (!isInvoke) {
response.status(500).send({ error: String(err) });
}
}
},
async updateActivity(_context, activity) {
// No-op: updateActivity is handled via REST in streaming-message.ts
},
async deleteActivity(_context, _reference) {
// No-op: deleteActivity not yet implemented for Teams SDK adapter
},
};
}
export async function loadMSTeamsSdkWithAuth(creds: MSTeamsCredentials) {
const sdk = await loadMSTeamsSdk();
const authConfig = buildMSTeamsAuthConfig(creds, sdk);
return { sdk, authConfig };
const app = createMSTeamsApp(creds, sdk);
return { sdk, app };
}
/**
* Create a Bot Framework JWT validator using the Teams SDK's built-in
* JwtValidator pre-configured for Bot Framework signing keys.
*
* Validates: signature (JWKS), audience (appId), issuer (api.botframework.com),
* and expiration (5-minute clock tolerance).
*/
export async function createBotFrameworkJwtValidator(creds: MSTeamsCredentials): Promise<{
validate: (authHeader: string, serviceUrl?: string) => Promise<boolean>;
}> {
const { createServiceTokenValidator } =
await import("@microsoft/teams.apps/dist/middleware/auth/jwt-validator.js");
const validator = createServiceTokenValidator(creds.appId, creds.tenantId);
return {
async validate(authHeader: string, serviceUrl?: string): Promise<boolean> {
const token = authHeader.startsWith("Bearer ") ? authHeader.slice(7) : authHeader;
if (!token) {
return false;
}
try {
const result = await validator.validateAccessToken(
token,
serviceUrl ? { validateServiceUrl: { expectedServiceUrl: serviceUrl } } : undefined,
);
return result != null;
} catch {
return false;
}
},
};
}

View File

@@ -12,7 +12,7 @@ import type {
import { resolveGraphChatId } from "./graph-upload.js";
import type { MSTeamsAdapter } from "./messenger.js";
import { getMSTeamsRuntime } from "./runtime.js";
import { createMSTeamsAdapter, loadMSTeamsSdkWithAuth } from "./sdk.js";
import { createMSTeamsAdapter, createMSTeamsTokenProvider, loadMSTeamsSdkWithAuth } from "./sdk.js";
import { resolveMSTeamsCredentials } from "./token.js";
export type MSTeamsConversationType = "personal" | "groupChat" | "channel";
@@ -131,11 +131,11 @@ export async function resolveMSTeamsSendContext(params: {
const core = getMSTeamsRuntime();
const log = core.logging.getChildLogger({ name: "msteams:send" });
const { sdk, authConfig } = await loadMSTeamsSdkWithAuth(creds);
const adapter = createMSTeamsAdapter(authConfig, sdk);
const { sdk, app } = await loadMSTeamsSdkWithAuth(creds);
const adapter = createMSTeamsAdapter(app, sdk);
// Create token provider for Graph API / OneDrive operations
const tokenProvider = new sdk.MsalTokenProvider(authConfig) as MSTeamsAccessTokenProvider;
// Create token provider adapter for Graph API / OneDrive operations
const tokenProvider: MSTeamsAccessTokenProvider = createMSTeamsTokenProvider(app);
// Determine conversation type from stored reference
const storedConversationType = ref.conversation?.conversationType?.toLowerCase() ?? "";

View File

@@ -0,0 +1,206 @@
import { describe, expect, it, vi } from "vitest";
import { TeamsHttpStream } from "./streaming-message.js";
describe("TeamsHttpStream", () => {
it("sends first chunk as typing activity with streaminfo", async () => {
const sent: unknown[] = [];
const stream = new TeamsHttpStream({
sendActivity: vi.fn(async (activity) => {
sent.push(activity);
return { id: "stream-1" };
}),
});
// Enough text to pass MIN_INITIAL_CHARS threshold
stream.update("Hello, this is a test response that is long enough.");
// Wait for throttle to flush
await new Promise((r) => setTimeout(r, 700));
expect(sent.length).toBeGreaterThanOrEqual(1);
const firstActivity = sent[0] as Record<string, unknown>;
expect(firstActivity.type).toBe("typing");
expect(typeof firstActivity.text).toBe("string");
expect(firstActivity.text as string).toContain("Hello");
// Should have streaminfo entity
const entities = firstActivity.entities as Array<Record<string, unknown>>;
expect(entities).toEqual(
expect.arrayContaining([
expect.objectContaining({ type: "streaminfo", streamType: "streaming" }),
]),
);
});
it("sends final message activity on finalize", async () => {
const sent: unknown[] = [];
const stream = new TeamsHttpStream({
sendActivity: vi.fn(async (activity) => {
sent.push(activity);
return { id: "stream-1" };
}),
});
stream.update("Hello, this is a complete response for finalization testing.");
await new Promise((r) => setTimeout(r, 700));
await stream.finalize();
// Find the final message activity
const finalActivity = sent.find((a) => (a as Record<string, unknown>).type === "message") as
| Record<string, unknown>
| undefined;
expect(finalActivity).toBeDefined();
expect(finalActivity!.text).toBe(
"Hello, this is a complete response for finalization testing.",
);
// No cursor in final
expect(finalActivity!.text as string).not.toContain("\u258D");
// Should have AI-generated entity
const entities = finalActivity!.entities as Array<Record<string, unknown>>;
expect(entities).toEqual(
expect.arrayContaining([expect.objectContaining({ additionalType: ["AIGeneratedContent"] })]),
);
// Should have streaminfo with final type
expect(entities).toEqual(
expect.arrayContaining([
expect.objectContaining({ type: "streaminfo", streamType: "final" }),
]),
);
});
it("does not send below MIN_INITIAL_CHARS", async () => {
const sendActivity = vi.fn(async () => ({ id: "x" }));
const stream = new TeamsHttpStream({ sendActivity });
stream.update("Hi");
await new Promise((r) => setTimeout(r, 700));
expect(sendActivity).not.toHaveBeenCalled();
});
it("finalize with no content does nothing", async () => {
const sendActivity = vi.fn(async () => ({ id: "x" }));
const stream = new TeamsHttpStream({ sendActivity });
await stream.finalize();
expect(sendActivity).not.toHaveBeenCalled();
});
it("finalize sends content even if no chunks were streamed", async () => {
const sent: unknown[] = [];
const stream = new TeamsHttpStream({
sendActivity: vi.fn(async (activity) => {
sent.push(activity);
return { id: "msg-1" };
}),
});
// Short text — below MIN_INITIAL_CHARS, so no streaming chunk sent
stream.update("Short");
await stream.finalize();
// Should send final message even though no chunks were streamed
expect(sent.length).toBe(1);
const activity = sent[0] as Record<string, unknown>;
expect(activity.type).toBe("message");
expect(activity.text).toBe("Short");
});
it("sets feedbackLoopEnabled on final message", async () => {
const sent: unknown[] = [];
const stream = new TeamsHttpStream({
sendActivity: vi.fn(async (activity) => {
sent.push(activity);
return { id: "stream-1" };
}),
feedbackLoopEnabled: true,
});
stream.update("A response long enough to pass the minimum character threshold for streaming.");
await new Promise((r) => setTimeout(r, 700));
await stream.finalize();
const finalActivity = sent.find(
(a) => (a as Record<string, unknown>).type === "message",
) as Record<string, unknown>;
const channelData = finalActivity.channelData as Record<string, unknown>;
expect(channelData.feedbackLoopEnabled).toBe(true);
});
it("sends informative update with streamType informative", async () => {
const sent: unknown[] = [];
const stream = new TeamsHttpStream({
sendActivity: vi.fn(async (activity) => {
sent.push(activity);
return { id: "stream-1" };
}),
});
await stream.sendInformativeUpdate("Thinking...");
expect(sent.length).toBe(1);
const activity = sent[0] as Record<string, unknown>;
expect(activity.type).toBe("typing");
expect(activity.text).toBe("Thinking...");
const entities = activity.entities as Array<Record<string, unknown>>;
expect(entities).toEqual(
expect.arrayContaining([
expect.objectContaining({
type: "streaminfo",
streamType: "informative",
streamSequence: 1,
}),
]),
);
});
it("informative update establishes streamId for subsequent chunks", async () => {
const sent: unknown[] = [];
const stream = new TeamsHttpStream({
sendActivity: vi.fn(async (activity) => {
sent.push(activity);
return { id: "stream-1" };
}),
});
await stream.sendInformativeUpdate("Working...");
stream.update("Hello, this is a long enough response for streaming to begin.");
await new Promise((r) => setTimeout(r, 1600));
// Second activity (streaming chunk) should have the streamId from the informative update
expect(sent.length).toBeGreaterThanOrEqual(2);
const chunk = sent[1] as Record<string, unknown>;
const entities = chunk.entities as Array<Record<string, unknown>>;
expect(entities).toEqual(
expect.arrayContaining([
expect.objectContaining({ type: "streaminfo", streamId: "stream-1" }),
]),
);
});
it("hasContent is true after update", () => {
const stream = new TeamsHttpStream({
sendActivity: vi.fn(async () => ({ id: "x" })),
});
expect(stream.hasContent).toBe(false);
stream.update("some text");
expect(stream.hasContent).toBe(true);
});
it("double finalize is a no-op", async () => {
const sendActivity = vi.fn(async () => ({ id: "x" }));
const stream = new TeamsHttpStream({ sendActivity });
stream.update("A response long enough to pass the minimum character threshold.");
await stream.finalize();
const callCount = sendActivity.mock.calls.length;
await stream.finalize();
expect(sendActivity.mock.calls.length).toBe(callCount);
});
});

View File

@@ -0,0 +1,267 @@
/**
* Teams streaming message using the streaminfo entity protocol.
*
* Follows the official Teams SDK pattern:
* 1. First chunk → POST a typing activity with streaminfo entity (streamType: "streaming")
* 2. Subsequent chunks → POST typing activities with streaminfo + incrementing streamSequence
* 3. Finalize → POST a message activity with streaminfo (streamType: "final")
*
* Uses the shared draft-stream-loop for throttling (avoids rate limits).
*/
import { createDraftStreamLoop, type DraftStreamLoop } from "openclaw/plugin-sdk/channel-lifecycle";
/** Default throttle interval between stream updates (ms).
* Teams docs recommend buffering tokens for 1.5-2s; limit is 1 req/s. */
const DEFAULT_THROTTLE_MS = 1500;
/** Minimum chars before sending the first streaming message. */
const MIN_INITIAL_CHARS = 20;
/** Teams message text limit. */
const TEAMS_MAX_CHARS = 4000;
type StreamSendFn = (activity: Record<string, unknown>) => Promise<{ id?: string } | unknown>;
export type TeamsStreamOptions = {
/** Function to send an activity (POST to Bot Framework). */
sendActivity: StreamSendFn;
/** Whether to enable feedback loop on the final message. */
feedbackLoopEnabled?: boolean;
/** Throttle interval in ms. Default: 600. */
throttleMs?: number;
/** Called on errors during streaming. */
onError?: (err: unknown) => void;
};
import { AI_GENERATED_ENTITY } from "./ai-entity.js";
function extractId(response: unknown): string | undefined {
if (response && typeof response === "object" && "id" in response) {
const id = (response as { id?: unknown }).id;
return typeof id === "string" ? id : undefined;
}
return undefined;
}
function buildStreamInfoEntity(
streamId: string | undefined,
streamType: "informative" | "streaming" | "final",
streamSequence?: number,
): Record<string, unknown> {
const entity: Record<string, unknown> = {
type: "streaminfo",
streamType,
};
// streamId is only present after the first chunk (returned by the service)
if (streamId) {
entity.streamId = streamId;
}
// streamSequence must be present for start/continue, but NOT for final
if (streamSequence != null) {
entity.streamSequence = streamSequence;
}
return entity;
}
export class TeamsHttpStream {
private sendActivity: StreamSendFn;
private feedbackLoopEnabled: boolean;
private onError?: (err: unknown) => void;
private accumulatedText = "";
private streamId: string | undefined = undefined;
private sequenceNumber = 0;
private stopped = false;
private finalized = false;
private streamFailed = false;
private lastStreamedText = "";
private loop: DraftStreamLoop;
constructor(options: TeamsStreamOptions) {
this.sendActivity = options.sendActivity;
this.feedbackLoopEnabled = options.feedbackLoopEnabled ?? false;
this.onError = options.onError;
this.loop = createDraftStreamLoop({
throttleMs: options.throttleMs ?? DEFAULT_THROTTLE_MS,
isStopped: () => this.stopped,
sendOrEditStreamMessage: (text) => this.pushStreamChunk(text),
});
}
/**
* Send an informative status update (blue progress bar in Teams).
* Call this immediately when a message is received, before LLM starts generating.
* Establishes the stream so subsequent chunks continue from this stream ID.
*/
async sendInformativeUpdate(text: string): Promise<void> {
if (this.stopped || this.finalized) {
return;
}
this.sequenceNumber++;
const activity: Record<string, unknown> = {
type: "typing",
text,
entities: [buildStreamInfoEntity(this.streamId, "informative", this.sequenceNumber)],
};
try {
const response = await this.sendActivity(activity);
if (!this.streamId) {
this.streamId = extractId(response);
}
} catch (err) {
this.onError?.(err);
}
}
/**
* Ingest partial text from the LLM token stream.
* Called by onPartialReply — accumulates text and throttles updates.
*/
update(text: string): void {
if (this.stopped || this.finalized) {
return;
}
this.accumulatedText = text;
// Wait for minimum chars before first send (avoids push notification flicker)
if (!this.streamId && this.accumulatedText.length < MIN_INITIAL_CHARS) {
return;
}
// Text exceeded Teams limit — finalize immediately with what we have
// so the user isn't left waiting while the LLM keeps generating.
if (this.accumulatedText.length > TEAMS_MAX_CHARS) {
this.streamFailed = true;
void this.finalize();
return;
}
// Don't append cursor — Teams requires each chunk to be a prefix of subsequent chunks.
// The cursor character would cause "content should contain previously streamed content" errors.
this.loop.update(this.accumulatedText);
}
/**
* Finalize the stream — send the final message activity.
*/
async finalize(): Promise<void> {
if (this.finalized) {
return;
}
this.finalized = true;
this.stopped = true;
this.loop.stop();
await this.loop.waitForInFlight();
// If no text was streamed (e.g. agent sent a card via tool instead of
// streaming text), just return. Teams auto-clears the informative progress
// bar after its streaming timeout. Sending an empty final message fails
// with 403.
if (!this.accumulatedText.trim()) {
return;
}
// If streaming failed (>4000 chars or POST errors), close the stream
// with the last successfully streamed text so Teams removes the "Stop"
// button and replaces the partial chunks. deliver() handles the complete
// response since hasContent returns false when streamFailed is true.
if (this.streamFailed) {
if (this.streamId) {
try {
await this.sendActivity({
type: "message",
text: this.lastStreamedText || "",
channelData: { feedbackLoopEnabled: this.feedbackLoopEnabled },
entities: [AI_GENERATED_ENTITY, buildStreamInfoEntity(this.streamId, "final")],
});
} catch {
// Best effort — stream will auto-close after Teams timeout
}
}
return;
}
// Send final message activity.
// Per the spec: type=message, streamType=final, NO streamSequence.
try {
const entities: Array<Record<string, unknown>> = [AI_GENERATED_ENTITY];
if (this.streamId) {
entities.push(buildStreamInfoEntity(this.streamId, "final"));
}
const finalActivity: Record<string, unknown> = {
type: "message",
text: this.accumulatedText,
channelData: {
feedbackLoopEnabled: this.feedbackLoopEnabled,
},
entities,
};
await this.sendActivity(finalActivity);
} catch (err) {
this.onError?.(err);
}
}
/** Whether streaming successfully delivered content (at least one chunk sent, not failed). */
get hasContent(): boolean {
return this.accumulatedText.length > 0 && !this.streamFailed;
}
/** Whether the stream has been finalized. */
get isFinalized(): boolean {
return this.finalized;
}
/** Whether streaming fell back (not used in this implementation). */
get isFallback(): boolean {
return false;
}
/**
* Send a single streaming chunk as a typing activity with streaminfo.
* Per the Teams REST API spec:
* - First chunk: no streamId, streamSequence=1 → returns 201 with { id: streamId }
* - Subsequent chunks: include streamId, increment streamSequence → returns 202
*/
private async pushStreamChunk(text: string): Promise<boolean> {
if (this.stopped && !this.finalized) {
return false;
}
this.sequenceNumber++;
const activity: Record<string, unknown> = {
type: "typing",
text,
entities: [buildStreamInfoEntity(this.streamId, "streaming", this.sequenceNumber)],
};
try {
const response = await this.sendActivity(activity);
if (!this.streamId) {
this.streamId = extractId(response);
}
this.lastStreamedText = text;
return true;
} catch (err) {
const axiosData = (err as { response?: { data?: unknown; status?: number } })?.response;
const statusCode = axiosData?.status ?? (err as { statusCode?: number })?.statusCode;
const responseBody = axiosData?.data ? JSON.stringify(axiosData.data).slice(0, 300) : "";
const msg = err instanceof Error ? err.message : String(err);
this.onError?.(
new Error(
`stream POST failed (HTTP ${statusCode ?? "?"}): ${msg}${responseBody ? ` body=${responseBody}` : ""}`,
),
);
this.streamFailed = true;
return false;
}
}
}

View File

@@ -0,0 +1,45 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
// Mock the runtime before importing buildUserAgent
const mockRuntime = {
version: "2026.3.19",
};
vi.mock("./runtime.js", () => ({
getMSTeamsRuntime: vi.fn(() => mockRuntime),
}));
import { getMSTeamsRuntime } from "./runtime.js";
import { buildUserAgent, resetUserAgentCache } from "./user-agent.js";
describe("buildUserAgent", () => {
beforeEach(() => {
resetUserAgentCache();
vi.mocked(getMSTeamsRuntime).mockReturnValue(mockRuntime as never);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("returns teams.ts[apps]/<sdk> OpenClaw/<version> format", () => {
const ua = buildUserAgent();
expect(ua).toMatch(/^teams\.ts\[apps\]\/.+ OpenClaw\/2026\.3\.19$/);
});
it("reflects the runtime version", () => {
vi.mocked(getMSTeamsRuntime).mockReturnValue({ version: "1.2.3" } as never);
const ua = buildUserAgent();
expect(ua).toMatch(/OpenClaw\/1\.2\.3$/);
});
it("returns OpenClaw/unknown when runtime is not initialized", () => {
vi.mocked(getMSTeamsRuntime).mockImplementation(() => {
throw new Error("MSTeams runtime not initialized");
});
const ua = buildUserAgent();
expect(ua).toMatch(/OpenClaw\/unknown$/);
// SDK version should still be present
expect(ua).toMatch(/^teams\.ts\[apps\]\//);
});
});

View File

@@ -0,0 +1,45 @@
import { createRequire } from "node:module";
import { getMSTeamsRuntime } from "./runtime.js";
let cachedUserAgent: string | undefined;
function resolveTeamsSdkVersion(): string {
try {
const require = createRequire(import.meta.url);
const pkg = require("@microsoft/teams.apps/package.json") as { version?: string };
return pkg.version ?? "unknown";
} catch {
return "unknown";
}
}
function resolveOpenClawVersion(): string {
try {
return getMSTeamsRuntime().version;
} catch {
return "unknown";
}
}
/**
* Build a combined User-Agent string that preserves the Teams SDK identity
* and appends the OpenClaw version.
*
* Format: "teams.ts[apps]/<sdk-version> OpenClaw/<openclaw-version>"
* Example: "teams.ts[apps]/2.0.5 OpenClaw/2026.3.22"
*
* This lets the Teams backend track SDK usage while also identifying the
* host application.
*/
/** Reset the cached User-Agent (for testing). */
export function resetUserAgentCache(): void {
cachedUserAgent = undefined;
}
export function buildUserAgent(): string {
if (cachedUserAgent) {
return cachedUserAgent;
}
cachedUserAgent = `teams.ts[apps]/${resolveTeamsSdkVersion()} OpenClaw/${resolveOpenClawVersion()}`;
return cachedUserAgent;
}

View File

@@ -0,0 +1,57 @@
import { describe, expect, it } from "vitest";
import { buildGroupWelcomeText, buildWelcomeCard } from "./welcome-card.js";
describe("buildWelcomeCard", () => {
it("builds card with default prompt starters", () => {
const card = buildWelcomeCard();
expect(card.type).toBe("AdaptiveCard");
expect(card.version).toBe("1.5");
const body = card.body as Array<{ text: string }>;
expect(body[0]?.text).toContain("OpenClaw");
const actions = card.actions as Array<{ title: string; data: unknown }>;
expect(actions.length).toBe(3);
expect(actions[0]?.title).toBe("What can you do?");
});
it("uses custom bot name", () => {
const card = buildWelcomeCard({ botName: "TestBot" });
const body = card.body as Array<{ text: string }>;
expect(body[0]?.text).toContain("TestBot");
});
it("uses custom prompt starters", () => {
const card = buildWelcomeCard({
promptStarters: ["Do X", "Do Y"],
});
const actions = card.actions as Array<{ title: string; data: unknown }>;
expect(actions.length).toBe(2);
expect(actions[0]?.title).toBe("Do X");
expect(actions[1]?.title).toBe("Do Y");
// Verify imBack data
const data = actions[0]?.data as { msteams: { type: string; value: string } };
expect(data.msteams.type).toBe("imBack");
expect(data.msteams.value).toBe("Do X");
});
it("falls back to defaults when promptStarters is empty", () => {
const card = buildWelcomeCard({ promptStarters: [] });
const actions = card.actions as Array<{ title: string }>;
expect(actions.length).toBe(3);
});
});
describe("buildGroupWelcomeText", () => {
it("includes bot name", () => {
const text = buildGroupWelcomeText("MyBot");
expect(text).toContain("MyBot");
expect(text).toContain("@MyBot");
});
it("defaults to OpenClaw", () => {
const text = buildGroupWelcomeText();
expect(text).toContain("OpenClaw");
});
});

View File

@@ -0,0 +1,57 @@
/**
* Builds an Adaptive Card for welcoming users when the bot is added to a conversation.
*/
const DEFAULT_PROMPT_STARTERS = [
"What can you do?",
"Summarize my last meeting",
"Help me draft an email",
];
export type WelcomeCardOptions = {
/** Bot display name. Falls back to "OpenClaw". */
botName?: string;
/** Custom prompt starters. Falls back to defaults. */
promptStarters?: string[];
};
/**
* Build a welcome Adaptive Card for 1:1 personal chats.
*/
export function buildWelcomeCard(options?: WelcomeCardOptions): Record<string, unknown> {
const botName = options?.botName || "OpenClaw";
const starters = options?.promptStarters?.length
? options.promptStarters
: DEFAULT_PROMPT_STARTERS;
return {
type: "AdaptiveCard",
version: "1.5",
body: [
{
type: "TextBlock",
text: `Hi! I'm ${botName}.`,
weight: "bolder",
size: "medium",
},
{
type: "TextBlock",
text: "I can help you with questions, tasks, and more. Here are some things to try:",
wrap: true,
},
],
actions: starters.map((label) => ({
type: "Action.Submit",
title: label,
data: { msteams: { type: "imBack", value: label } },
})),
};
}
/**
* Build a brief welcome message for group chats (when the bot is @mentioned).
*/
export function buildGroupWelcomeText(botName?: string): string {
const name = botName || "OpenClaw";
return `Hi! I'm ${name}. Mention me with @${name} to get started.`;
}

1385
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -49,7 +49,7 @@ function findFatalUnresolvedImport(lines) {
}
const normalizedLine = line.replace(ANSI_ESCAPE_RE, "");
if (!normalizedLine.includes("extensions/")) {
if (!normalizedLine.includes("extensions/") && !normalizedLine.includes("node_modules/")) {
return normalizedLine;
}
}

View File

@@ -2628,6 +2628,58 @@ describe("dispatchReplyFromConfig", () => {
);
});
it("passes configOverride to replyResolver when provided", async () => {
setNoAbort();
const cfg = emptyConfig;
const dispatcher = createDispatcher();
const ctx = buildTestCtx({ Provider: "msteams", Surface: "msteams" });
const overrideCfg = {
agents: { defaults: { userTimezone: "America/New_York" } },
} as OpenClawConfig;
let receivedCfg: OpenClawConfig | undefined;
const replyResolver = async (
_ctx: MsgContext,
_opts?: GetReplyOptions,
cfgArg?: OpenClawConfig,
) => {
receivedCfg = cfgArg;
return { text: "hi" } satisfies ReplyPayload;
};
await dispatchReplyFromConfig({
ctx,
cfg,
dispatcher,
replyResolver,
configOverride: overrideCfg,
});
expect(receivedCfg).toBe(overrideCfg);
});
it("passes base cfg to replyResolver when configOverride is not provided", async () => {
setNoAbort();
const cfg = { agents: { defaults: { userTimezone: "UTC" } } } as OpenClawConfig;
const dispatcher = createDispatcher();
const ctx = buildTestCtx({ Provider: "telegram", Surface: "telegram" });
let receivedCfg: OpenClawConfig | undefined;
const replyResolver = async (
_ctx: MsgContext,
_opts?: GetReplyOptions,
cfgArg?: OpenClawConfig,
) => {
receivedCfg = cfgArg;
return { text: "hi" } satisfies ReplyPayload;
};
await dispatchReplyFromConfig({ ctx, cfg, dispatcher, replyResolver });
expect(receivedCfg).toBe(cfg);
});
it("suppresses isReasoning payloads from final replies (WhatsApp channel)", async () => {
setNoAbort();
const dispatcher = createDispatcher();

View File

@@ -152,6 +152,8 @@ export async function dispatchReplyFromConfig(params: {
dispatcher: ReplyDispatcher;
replyOptions?: Omit<GetReplyOptions, "onToolResult" | "onBlockReply">;
replyResolver?: typeof import("./get-reply-from-config.runtime.js").getReplyFromConfig;
/** Optional config override passed to getReplyFromConfig (e.g. per-sender timezone). */
configOverride?: OpenClawConfig;
}): Promise<DispatchFromConfigResult> {
const { ctx, cfg, dispatcher } = params;
const diagnosticsEnabled = isDiagnosticsEnabled(cfg);
@@ -641,7 +643,7 @@ export async function dispatchReplyFromConfig(params: {
return run();
},
},
cfg,
params.configOverride ?? cfg,
);
if (ctx.AcpDispatchTailAfterReset === true) {

View File

@@ -121,4 +121,16 @@ export type MSTeamsConfig = {
healthMonitor?: ChannelHealthMonitorConfig;
/** Outbound response prefix override for this channel/account. */
responsePrefix?: string;
/** Show a welcome Adaptive Card when the bot is added to a 1:1 chat. Default: true. */
welcomeCard?: boolean;
/** Custom prompt starter labels shown on the welcome card. */
promptStarters?: string[];
/** Show a welcome message when the bot is added to a group chat. Default: false. */
groupWelcomeCard?: boolean;
/** Enable the Teams feedback loop (thumbs up/down) on AI-generated messages. Default: true. */
feedbackEnabled?: boolean;
/** Enable background reflection when a user gives negative feedback. Default: true. */
feedbackReflection?: boolean;
/** Minimum interval (ms) between reflections per session. Default: 300000 (5 min). */
feedbackReflectionCooldownMs?: number;
};

View File

@@ -2091,10 +2091,10 @@ export const GENERATED_BUNDLED_PLUGIN_METADATA = [
channel: {
id: "msteams",
label: "Microsoft Teams",
selectionLabel: "Microsoft Teams (Bot Framework)",
selectionLabel: "Microsoft Teams (Teams SDK)",
docsPath: "/channels/msteams",
docsLabel: "msteams",
blurb: "Bot Framework; enterprise support.",
blurb: "Teams SDK; enterprise support.",
aliases: ["teams"],
order: 60,
},