mirror of
https://github.com/moltbot/moltbot.git
synced 2026-04-28 08:52:45 +00:00
144 lines
3.6 KiB
TypeScript
144 lines
3.6 KiB
TypeScript
import fs from "node:fs/promises";
|
|
import os from "node:os";
|
|
import path from "node:path";
|
|
import type { AssistantMessage } from "@mariozechner/pi-ai";
|
|
import type { OpenClawConfig } from "../../config/config.js";
|
|
import type { EmbeddedRunAttemptResult } from "../pi-embedded-runner/run/types.js";
|
|
|
|
export type EmbeddedPiRunnerTestWorkspace = {
|
|
tempRoot: string;
|
|
agentDir: string;
|
|
workspaceDir: string;
|
|
};
|
|
|
|
export async function createEmbeddedPiRunnerTestWorkspace(
|
|
prefix: string,
|
|
): Promise<EmbeddedPiRunnerTestWorkspace> {
|
|
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
|
const agentDir = path.join(tempRoot, "agent");
|
|
const workspaceDir = path.join(tempRoot, "workspace");
|
|
await fs.mkdir(agentDir, { recursive: true });
|
|
await fs.mkdir(workspaceDir, { recursive: true });
|
|
return { tempRoot, agentDir, workspaceDir };
|
|
}
|
|
|
|
export async function cleanupEmbeddedPiRunnerTestWorkspace(
|
|
workspace: EmbeddedPiRunnerTestWorkspace | undefined,
|
|
): Promise<void> {
|
|
if (!workspace) {
|
|
return;
|
|
}
|
|
await fs.rm(workspace.tempRoot, { recursive: true, force: true });
|
|
}
|
|
|
|
export function createEmbeddedPiRunnerOpenAiConfig(modelIds: string[]): OpenClawConfig {
|
|
return {
|
|
models: {
|
|
providers: {
|
|
openai: {
|
|
api: "openai-responses",
|
|
apiKey: "sk-test",
|
|
baseUrl: "https://example.com",
|
|
models: modelIds.map((id) => ({
|
|
id,
|
|
name: `Mock ${id}`,
|
|
reasoning: false,
|
|
input: ["text"],
|
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
contextWindow: 16_000,
|
|
maxTokens: 2048,
|
|
})),
|
|
},
|
|
},
|
|
},
|
|
};
|
|
}
|
|
|
|
export async function immediateEnqueue<T>(task: () => Promise<T>): Promise<T> {
|
|
return await task();
|
|
}
|
|
|
|
export function createMockUsage(input: number, output: number) {
|
|
return {
|
|
input,
|
|
output,
|
|
cacheRead: 0,
|
|
cacheWrite: 0,
|
|
totalTokens: input + output,
|
|
cost: {
|
|
input: 0,
|
|
output: 0,
|
|
cacheRead: 0,
|
|
cacheWrite: 0,
|
|
total: 0,
|
|
},
|
|
};
|
|
}
|
|
|
|
const baseUsage = createMockUsage(0, 0);
|
|
|
|
export function buildEmbeddedRunnerAssistant(
|
|
overrides: Partial<AssistantMessage>,
|
|
): AssistantMessage {
|
|
return {
|
|
role: "assistant",
|
|
content: [],
|
|
api: "openai-responses",
|
|
provider: "openai",
|
|
model: "mock-1",
|
|
usage: baseUsage,
|
|
stopReason: "stop",
|
|
timestamp: Date.now(),
|
|
...overrides,
|
|
};
|
|
}
|
|
|
|
export function makeEmbeddedRunnerAttempt(
|
|
overrides: Partial<EmbeddedRunAttemptResult>,
|
|
): EmbeddedRunAttemptResult {
|
|
return {
|
|
aborted: false,
|
|
timedOut: false,
|
|
timedOutDuringCompaction: false,
|
|
promptError: null,
|
|
sessionIdUsed: "session:test",
|
|
systemPromptReport: undefined,
|
|
messagesSnapshot: [],
|
|
assistantTexts: [],
|
|
toolMetas: [],
|
|
lastAssistant: undefined,
|
|
didSendViaMessagingTool: false,
|
|
messagingToolSentTexts: [],
|
|
messagingToolSentMediaUrls: [],
|
|
messagingToolSentTargets: [],
|
|
cloudCodeAssistFormatError: false,
|
|
...overrides,
|
|
};
|
|
}
|
|
|
|
export function createResolvedEmbeddedRunnerModel(
|
|
provider: string,
|
|
modelId: string,
|
|
options?: { baseUrl?: string },
|
|
) {
|
|
return {
|
|
model: {
|
|
id: modelId,
|
|
name: modelId,
|
|
api: "openai-responses",
|
|
provider,
|
|
baseUrl: options?.baseUrl ?? `https://example.com/${provider}`,
|
|
reasoning: false,
|
|
input: ["text"],
|
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
contextWindow: 16_000,
|
|
maxTokens: 2048,
|
|
},
|
|
error: undefined,
|
|
authStorage: {
|
|
setRuntimeApiKey: () => undefined,
|
|
},
|
|
modelRegistry: {},
|
|
};
|
|
}
|