mirror of
https://github.com/moltbot/moltbot.git
synced 2026-05-06 15:18:58 +00:00
fix: route tasks json through lean cli path
This commit is contained in:
@@ -25,6 +25,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Agents/reasoning: treat orphan closing reasoning tags with following answer text as a privacy boundary across delivery, history, streaming, and Control UI sanitizers so malformed local-model output cannot leak chain-of-thought text. Fixes #67092. Thanks @AnildoSilva.
|
||||
- Memory-core: run one-shot memory CLI commands through transient builtin and QMD managers so `memory index`, `memory status --index`, and `memory search` no longer start long-lived file watchers that can hit macOS `EMFILE` limits. Fixes #59101; carries forward #49851. Thanks @mbear469210-coder and @maoyuanxue.
|
||||
- Agents/ACP: ship the Claude ACP adapter with OpenClaw and require Claude result messages before idle can complete a prompt, preventing parent agents from waking early on long-running `sessions_spawn(runtime: "acp", agentId: "claude")` children. Fixes #72080. Thanks @siavash-saki and @iannwu.
|
||||
- CLI/tasks: route `tasks --json`, `tasks list --json`, and `tasks audit --json` through a lean JSON path so read-only task inspection no longer loads unrelated plugin/runtime command graphs. Fixes #66238. Thanks @ChuckChambers.
|
||||
- Memory-core: re-resolve the active runtime config whenever `memory_search` or `memory_get` executes, so provider changes made by `config.patch` stop leaving stale embedding backends behind in existing tool instances. Fixes #61098. Thanks @BradGroux and @Linux2010.
|
||||
- WebChat: keep bare `/new` and `/reset` startup instructions out of visible chat history while preserving `/reset <note>` as user-visible transcript text. Fixes #72369. Thanks @collynes and @haishmg.
|
||||
- CLI/doctor: remove dangling channel config, heartbeat targets, and channel model overrides when stale plugin repair removes a missing channel plugin, preventing Gateway boot loops after failed plugin reinstalls. Fixes #65293. Thanks @yidecode.
|
||||
|
||||
@@ -86,6 +86,7 @@ Usage:
|
||||
- `pnpm tsx scripts/bench-cli-startup.ts --runs 12`
|
||||
- `pnpm tsx scripts/bench-cli-startup.ts --preset real`
|
||||
- `pnpm tsx scripts/bench-cli-startup.ts --preset real --case status --case gatewayStatus --runs 3`
|
||||
- `pnpm tsx scripts/bench-cli-startup.ts --preset real --case tasksJson --case tasksListJson --case tasksAuditJson --runs 3`
|
||||
- `pnpm tsx scripts/bench-cli-startup.ts --entry openclaw.mjs --entry-secondary dist/entry.js --preset all`
|
||||
- `pnpm tsx scripts/bench-cli-startup.ts --preset all --output .artifacts/cli-startup-bench-all.json`
|
||||
- `pnpm tsx scripts/bench-cli-startup.ts --preset real --case gatewayStatusJson --output .artifacts/cli-startup-bench-smoke.json`
|
||||
@@ -95,7 +96,7 @@ Usage:
|
||||
Presets:
|
||||
|
||||
- `startup`: `--version`, `--help`, `health`, `health --json`, `status --json`, `status`
|
||||
- `real`: `health`, `status`, `status --json`, `sessions`, `sessions --json`, `agents list --json`, `gateway status`, `gateway status --json`, `gateway health --json`, `config get gateway.port`
|
||||
- `real`: `health`, `status`, `status --json`, `sessions`, `sessions --json`, `tasks --json`, `tasks list --json`, `tasks audit --json`, `agents list --json`, `gateway status`, `gateway status --json`, `gateway health --json`, `config get gateway.port`
|
||||
- `all`: both presets
|
||||
|
||||
Output includes `sampleCount`, avg, p50, p95, min/max, exit-code/signal distribution, and max RSS summaries for each command. Optional `--cpu-prof-dir` / `--heap-prof-dir` writes V8 profiles per run so timing and profile capture use the same harness.
|
||||
|
||||
@@ -81,6 +81,24 @@ const COMMAND_CASES: readonly CommandCase[] = [
|
||||
args: ["sessions", "--json"],
|
||||
presets: ["real"],
|
||||
},
|
||||
{
|
||||
id: "tasksJson",
|
||||
name: "tasks --json",
|
||||
args: ["tasks", "--json"],
|
||||
presets: ["real"],
|
||||
},
|
||||
{
|
||||
id: "tasksListJson",
|
||||
name: "tasks list --json",
|
||||
args: ["tasks", "list", "--json"],
|
||||
presets: ["real"],
|
||||
},
|
||||
{
|
||||
id: "tasksAuditJson",
|
||||
name: "tasks audit --json",
|
||||
args: ["tasks", "audit", "--json"],
|
||||
presets: ["real"],
|
||||
},
|
||||
{
|
||||
id: "agentsListJson",
|
||||
name: "agents list --json",
|
||||
|
||||
@@ -10,6 +10,8 @@ export type CliRoutedCommandId =
|
||||
| "config-unset"
|
||||
| "models-list"
|
||||
| "models-status"
|
||||
| "tasks-list"
|
||||
| "tasks-audit"
|
||||
| "channels-list"
|
||||
| "channels-status";
|
||||
|
||||
@@ -131,6 +133,23 @@ export const cliCommandCatalog: readonly CliCommandCatalogEntry[] = [
|
||||
policy: { ensureCliPath: false, routeConfigGuard: "always" },
|
||||
route: { id: "models-status" },
|
||||
},
|
||||
{
|
||||
commandPath: ["tasks", "list"],
|
||||
exact: true,
|
||||
policy: { ensureCliPath: false, routeConfigGuard: "when-suppressed", loadPlugins: "never" },
|
||||
route: { id: "tasks-list" },
|
||||
},
|
||||
{
|
||||
commandPath: ["tasks", "audit"],
|
||||
exact: true,
|
||||
policy: { ensureCliPath: false, routeConfigGuard: "when-suppressed", loadPlugins: "never" },
|
||||
route: { id: "tasks-audit" },
|
||||
},
|
||||
{
|
||||
commandPath: ["tasks"],
|
||||
policy: { ensureCliPath: false, routeConfigGuard: "when-suppressed", loadPlugins: "never" },
|
||||
route: { id: "tasks-list" },
|
||||
},
|
||||
{ commandPath: ["backup"], policy: { bypassConfigGuard: true } },
|
||||
{ commandPath: ["doctor"], policy: { bypassConfigGuard: true } },
|
||||
{
|
||||
|
||||
@@ -261,3 +261,69 @@ export function parseChannelsStatusRouteArgs(argv: string[]) {
|
||||
timeout: timeout.value,
|
||||
};
|
||||
}
|
||||
|
||||
function parseTasksListRouteArgsForCommandPath(argv: string[], commandPath: string[]) {
|
||||
if (!hasFlag(argv, "--json")) {
|
||||
return null;
|
||||
}
|
||||
const positionals = getCommandPositionalsWithRootOptions(argv, {
|
||||
commandPath,
|
||||
booleanFlags: ["--json"],
|
||||
valueFlags: ["--runtime", "--status"],
|
||||
});
|
||||
if (!positionals || positionals.length !== 0) {
|
||||
return null;
|
||||
}
|
||||
const runtime = parseOptionalFlagValue(argv, "--runtime");
|
||||
if (!runtime.ok) {
|
||||
return null;
|
||||
}
|
||||
const status = parseOptionalFlagValue(argv, "--status");
|
||||
if (!status.ok) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
json: true as const,
|
||||
runtime: runtime.value,
|
||||
status: status.value,
|
||||
};
|
||||
}
|
||||
|
||||
export function parseTasksListRouteArgs(argv: string[]) {
|
||||
return (
|
||||
parseTasksListRouteArgsForCommandPath(argv, ["tasks"]) ??
|
||||
parseTasksListRouteArgsForCommandPath(argv, ["tasks", "list"])
|
||||
);
|
||||
}
|
||||
|
||||
export function parseTasksAuditRouteArgs(argv: string[]) {
|
||||
if (!hasFlag(argv, "--json")) {
|
||||
return null;
|
||||
}
|
||||
const positionals = getCommandPositionalsWithRootOptions(argv, {
|
||||
commandPath: ["tasks", "audit"],
|
||||
booleanFlags: ["--json"],
|
||||
valueFlags: ["--severity", "--code", "--limit"],
|
||||
});
|
||||
if (!positionals || positionals.length !== 0) {
|
||||
return null;
|
||||
}
|
||||
const severity = parseOptionalFlagValue(argv, "--severity");
|
||||
if (!severity.ok) {
|
||||
return null;
|
||||
}
|
||||
const code = parseOptionalFlagValue(argv, "--code");
|
||||
if (!code.ok) {
|
||||
return null;
|
||||
}
|
||||
const limit = getPositiveIntFlagValue(argv, "--limit");
|
||||
if (limit === null) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
json: true as const,
|
||||
severity: severity.value,
|
||||
code: code.value,
|
||||
limit,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
|
||||
export type RouteSpec = {
|
||||
matches: (path: string[]) => boolean;
|
||||
canRun?: (argv: string[]) => boolean;
|
||||
loadPlugins?: boolean | ((argv: string[]) => boolean);
|
||||
run: (argv: string[]) => Promise<boolean>;
|
||||
};
|
||||
@@ -27,6 +28,7 @@ function createParsedRoute(params: {
|
||||
return {
|
||||
matches: (path) =>
|
||||
matchesCommandPath(path, params.entry.commandPath, { exact: params.entry.exact }),
|
||||
canRun: (argv) => Boolean(params.definition.parseArgs(argv)),
|
||||
loadPlugins: params.entry.route?.preloadPlugins
|
||||
? createCommandLoadPlugins(params.entry.commandPath)
|
||||
: undefined,
|
||||
|
||||
@@ -11,6 +11,8 @@ import {
|
||||
parseModelsStatusRouteArgs,
|
||||
parseSessionsRouteArgs,
|
||||
parseStatusRouteArgs,
|
||||
parseTasksAuditRouteArgs,
|
||||
parseTasksListRouteArgs,
|
||||
} from "./route-args.js";
|
||||
|
||||
type RouteArgParser<TArgs> = (argv: string[]) => TArgs | null;
|
||||
@@ -132,6 +134,20 @@ export const routedCommandDefinitions = {
|
||||
await modelsStatusCommand(args, defaultRuntime);
|
||||
},
|
||||
}),
|
||||
"tasks-list": defineRoutedCommand({
|
||||
parseArgs: parseTasksListRouteArgs,
|
||||
runParsedArgs: async (args) => {
|
||||
const { tasksListJsonCommand } = await import("../../commands/tasks-json.js");
|
||||
await tasksListJsonCommand(args, defaultRuntime);
|
||||
},
|
||||
}),
|
||||
"tasks-audit": defineRoutedCommand({
|
||||
parseArgs: parseTasksAuditRouteArgs,
|
||||
runParsedArgs: async (args) => {
|
||||
const { tasksAuditJsonCommand } = await import("../../commands/tasks-json.js");
|
||||
await tasksAuditJsonCommand(args, defaultRuntime);
|
||||
},
|
||||
}),
|
||||
"channels-list": defineRoutedCommand({
|
||||
parseArgs: parseChannelsListRouteArgs,
|
||||
runParsedArgs: async (args) => {
|
||||
|
||||
@@ -7,6 +7,8 @@ const modelsListCommandMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const modelsStatusCommandMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const runDaemonStatusMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const statusJsonCommandMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const tasksListJsonCommandMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const tasksAuditJsonCommandMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const channelsListCommandMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const channelsStatusCommandMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
|
||||
@@ -38,6 +40,15 @@ vi.mock("../../commands/status-json.js", () => ({
|
||||
statusJsonCommand: statusJsonCommandMock,
|
||||
}));
|
||||
|
||||
vi.mock("../../commands/tasks-json.js", () => ({
|
||||
tasksListJsonCommand: tasksListJsonCommandMock,
|
||||
tasksAuditJsonCommand: tasksAuditJsonCommandMock,
|
||||
}));
|
||||
|
||||
vi.mock("../../commands/tasks.js", () => {
|
||||
throw new Error("routed task JSON commands must not import the full tasks command module");
|
||||
});
|
||||
|
||||
vi.mock("../../commands/channels/list.js", () => ({
|
||||
channelsListCommand: channelsListCommandMock,
|
||||
}));
|
||||
@@ -376,4 +387,117 @@ describe("program routes", () => {
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it("routes tasks list JSON through the lean task JSON command", async () => {
|
||||
const rootRoute = expectRoute(["tasks"]);
|
||||
expect(rootRoute?.loadPlugins).toBeUndefined();
|
||||
expect(rootRoute?.canRun?.(["node", "openclaw", "tasks"])).toBe(false);
|
||||
await expect(
|
||||
rootRoute?.run([
|
||||
"node",
|
||||
"openclaw",
|
||||
"tasks",
|
||||
"--json",
|
||||
"--runtime",
|
||||
"cli",
|
||||
"--status=running",
|
||||
]),
|
||||
).resolves.toBe(true);
|
||||
expect(tasksListJsonCommandMock).toHaveBeenCalledWith(
|
||||
{ json: true, runtime: "cli", status: "running" },
|
||||
expect.any(Object),
|
||||
);
|
||||
|
||||
const listRoute = expectRoute(["tasks", "list"]);
|
||||
expect(listRoute?.loadPlugins).toBeUndefined();
|
||||
await expect(
|
||||
listRoute?.run(["node", "openclaw", "tasks", "list", "--json", "--runtime=cron"]),
|
||||
).resolves.toBe(true);
|
||||
expect(tasksListJsonCommandMock).toHaveBeenLastCalledWith(
|
||||
{ json: true, runtime: "cron", status: undefined },
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it("routes parent task filter values that command-path discovery sees as positionals", async () => {
|
||||
const separateValueArgv = [
|
||||
"node",
|
||||
"openclaw",
|
||||
"tasks",
|
||||
"--json",
|
||||
"--runtime",
|
||||
"cli",
|
||||
"--status",
|
||||
"running",
|
||||
];
|
||||
const separateValueRoute = findRoutedCommand(["tasks", "cli"], separateValueArgv);
|
||||
expect(separateValueRoute).not.toBeNull();
|
||||
await expect(separateValueRoute?.run(separateValueArgv)).resolves.toBe(true);
|
||||
expect(tasksListJsonCommandMock).toHaveBeenCalledWith(
|
||||
{ json: true, runtime: "cli", status: "running" },
|
||||
expect.any(Object),
|
||||
);
|
||||
|
||||
const parentOptionBeforeSubcommandArgv = [
|
||||
"node",
|
||||
"openclaw",
|
||||
"tasks",
|
||||
"--runtime",
|
||||
"cli",
|
||||
"list",
|
||||
"--json",
|
||||
];
|
||||
const parentOptionBeforeSubcommandRoute = findRoutedCommand(
|
||||
["tasks", "cli"],
|
||||
parentOptionBeforeSubcommandArgv,
|
||||
);
|
||||
expect(parentOptionBeforeSubcommandRoute).not.toBeNull();
|
||||
await expect(
|
||||
parentOptionBeforeSubcommandRoute?.run(parentOptionBeforeSubcommandArgv),
|
||||
).resolves.toBe(true);
|
||||
expect(tasksListJsonCommandMock).toHaveBeenLastCalledWith(
|
||||
{ json: true, runtime: "cli", status: undefined },
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it("routes tasks audit JSON through the lean task JSON command", async () => {
|
||||
const route = expectRoute(["tasks", "audit"]);
|
||||
expect(route?.loadPlugins).toBeUndefined();
|
||||
expect(route?.canRun?.(["node", "openclaw", "tasks", "audit"])).toBe(false);
|
||||
await expect(
|
||||
route?.run([
|
||||
"node",
|
||||
"openclaw",
|
||||
"tasks",
|
||||
"audit",
|
||||
"--json",
|
||||
"--severity",
|
||||
"error",
|
||||
"--code=stale_running",
|
||||
"--limit",
|
||||
"5",
|
||||
]),
|
||||
).resolves.toBe(true);
|
||||
expect(tasksAuditJsonCommandMock).toHaveBeenCalledWith(
|
||||
{ json: true, severity: "error", code: "stale_running", limit: 5 },
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it("returns false for task JSON routes when option values are missing or unknown", async () => {
|
||||
await expectRunFalse(["tasks"], ["node", "openclaw", "tasks", "--json", "--runtime"]);
|
||||
await expectRunFalse(["tasks", "list"], ["node", "openclaw", "tasks", "list"]);
|
||||
await expectRunFalse(
|
||||
["tasks", "audit"],
|
||||
["node", "openclaw", "tasks", "audit", "--json", "--limit"],
|
||||
);
|
||||
await expectRunFalse(
|
||||
["tasks", "audit"],
|
||||
["node", "openclaw", "tasks", "audit", "--json", "--unknown"],
|
||||
);
|
||||
expect(
|
||||
findRoutedCommand(["tasks", "cli"], ["node", "openclaw", "tasks", "--runtime", "cli"]),
|
||||
).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -2,9 +2,12 @@ import { routedCommands, type RouteSpec } from "./route-specs.js";
|
||||
|
||||
export type { RouteSpec } from "./route-specs.js";
|
||||
|
||||
export function findRoutedCommand(path: string[]): RouteSpec | null {
|
||||
export function findRoutedCommand(path: string[], argv?: string[]): RouteSpec | null {
|
||||
for (const route of routedCommands) {
|
||||
if (route.matches(path)) {
|
||||
if (argv && route.canRun && !route.canRun(argv)) {
|
||||
continue;
|
||||
}
|
||||
return route;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -136,7 +136,10 @@ describe("tryRouteCli", () => {
|
||||
true,
|
||||
);
|
||||
|
||||
expect(findRoutedCommandMock).toHaveBeenCalledWith(["status"]);
|
||||
expect(findRoutedCommandMock).toHaveBeenCalledWith(
|
||||
["status"],
|
||||
["node", "openclaw", "--log-level", "debug", "status"],
|
||||
);
|
||||
expect(ensureConfigReadyMock).toHaveBeenCalledWith({
|
||||
runtime: expect.any(Object),
|
||||
commandPath: ["status"],
|
||||
@@ -151,4 +154,18 @@ describe("tryRouteCli", () => {
|
||||
|
||||
expect(emitCliBannerMock).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("falls back before bootstrap when the route cannot parse the argv", async () => {
|
||||
findRoutedCommandMock.mockReturnValue({
|
||||
canRun: () => false,
|
||||
loadPlugins: true,
|
||||
run: runRouteMock,
|
||||
});
|
||||
|
||||
await expect(tryRouteCli(["node", "openclaw", "tasks", "list"])).resolves.toBe(false);
|
||||
|
||||
expect(ensureConfigReadyMock).not.toHaveBeenCalled();
|
||||
expect(ensurePluginRegistryLoadedMock).not.toHaveBeenCalled();
|
||||
expect(runRouteMock).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -49,10 +49,13 @@ export async function tryRouteCli(argv: string[]): Promise<boolean> {
|
||||
if (!invocation.commandPath[0]) {
|
||||
return false;
|
||||
}
|
||||
const route = findRoutedCommand(invocation.commandPath);
|
||||
const route = findRoutedCommand(invocation.commandPath, argv);
|
||||
if (!route) {
|
||||
return false;
|
||||
}
|
||||
if (route.canRun && !route.canRun(argv)) {
|
||||
return false;
|
||||
}
|
||||
await prepareRoutedCommand({
|
||||
argv,
|
||||
commandPath: invocation.commandPath,
|
||||
|
||||
165
src/commands/tasks-json.test.ts
Normal file
165
src/commands/tasks-json.test.ts
Normal file
@@ -0,0 +1,165 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { RuntimeEnv } from "../runtime.js";
|
||||
import {
|
||||
createManagedTaskFlow,
|
||||
resetTaskFlowRegistryForTests,
|
||||
} from "../tasks/task-flow-registry.js";
|
||||
import {
|
||||
createTaskRecord,
|
||||
resetTaskRegistryDeliveryRuntimeForTests,
|
||||
resetTaskRegistryForTests,
|
||||
} from "../tasks/task-registry.js";
|
||||
import { withTempDir } from "../test-helpers/temp-dir.js";
|
||||
import { tasksAuditJsonCommand, tasksListJsonCommand } from "./tasks-json.js";
|
||||
|
||||
const ORIGINAL_STATE_DIR = process.env.OPENCLAW_STATE_DIR;
|
||||
|
||||
function createRuntime(): RuntimeEnv {
|
||||
return {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
function readJsonLog(runtime: RuntimeEnv): unknown {
|
||||
return JSON.parse(String(vi.mocked(runtime.log).mock.calls[0]?.[0]));
|
||||
}
|
||||
|
||||
async function withTaskJsonStateDir(run: () => Promise<void>): Promise<void> {
|
||||
await withTempDir({ prefix: "openclaw-tasks-json-command-" }, async (root) => {
|
||||
process.env.OPENCLAW_STATE_DIR = root;
|
||||
resetTaskRegistryDeliveryRuntimeForTests();
|
||||
resetTaskRegistryForTests({ persist: false });
|
||||
resetTaskFlowRegistryForTests({ persist: false });
|
||||
try {
|
||||
await run();
|
||||
} finally {
|
||||
resetTaskRegistryDeliveryRuntimeForTests();
|
||||
resetTaskRegistryForTests({ persist: false });
|
||||
resetTaskFlowRegistryForTests({ persist: false });
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
describe("tasks JSON commands", () => {
|
||||
beforeEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
if (ORIGINAL_STATE_DIR === undefined) {
|
||||
delete process.env.OPENCLAW_STATE_DIR;
|
||||
} else {
|
||||
process.env.OPENCLAW_STATE_DIR = ORIGINAL_STATE_DIR;
|
||||
}
|
||||
resetTaskRegistryDeliveryRuntimeForTests();
|
||||
resetTaskRegistryForTests({ persist: false });
|
||||
resetTaskFlowRegistryForTests({ persist: false });
|
||||
});
|
||||
|
||||
it("lists task records with runtime and status filters", async () => {
|
||||
await withTaskJsonStateDir(async () => {
|
||||
createTaskRecord({
|
||||
runtime: "cli",
|
||||
ownerKey: "agent:main:main",
|
||||
scopeKind: "session",
|
||||
runId: "run-cli",
|
||||
status: "running",
|
||||
task: "Inspect issue backlog",
|
||||
});
|
||||
createTaskRecord({
|
||||
runtime: "cron",
|
||||
ownerKey: "agent:main:main",
|
||||
scopeKind: "session",
|
||||
runId: "run-cron",
|
||||
status: "queued",
|
||||
task: "Refresh schedule",
|
||||
});
|
||||
|
||||
const runtime = createRuntime();
|
||||
await tasksListJsonCommand({ json: true, runtime: "cli", status: "running" }, runtime);
|
||||
|
||||
const payload = readJsonLog(runtime) as {
|
||||
count: number;
|
||||
runtime: string | null;
|
||||
status: string | null;
|
||||
tasks: Array<{ runtime: string; status: string; runId: string }>;
|
||||
};
|
||||
expect(payload).toMatchObject({
|
||||
count: 1,
|
||||
runtime: "cli",
|
||||
status: "running",
|
||||
});
|
||||
expect(payload.tasks).toEqual([
|
||||
expect.objectContaining({ runtime: "cli", status: "running", runId: "run-cli" }),
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps audit JSON shape and combined task-flow sorting", async () => {
|
||||
await withTaskJsonStateDir(async () => {
|
||||
const now = Date.now();
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(now - 40 * 60_000);
|
||||
createTaskRecord({
|
||||
runtime: "cli",
|
||||
ownerKey: "agent:main:main",
|
||||
scopeKind: "session",
|
||||
runId: "task-stale-running",
|
||||
status: "running",
|
||||
task: "Inspect issue backlog",
|
||||
});
|
||||
vi.setSystemTime(now);
|
||||
const runningFlow = createManagedTaskFlow({
|
||||
ownerKey: "agent:main:main",
|
||||
controllerId: "tests/tasks-json-command",
|
||||
goal: "Running flow",
|
||||
status: "running",
|
||||
createdAt: now - 45 * 60_000,
|
||||
updatedAt: now - 45 * 60_000,
|
||||
});
|
||||
createManagedTaskFlow({
|
||||
ownerKey: "agent:main:main",
|
||||
controllerId: "tests/tasks-json-command",
|
||||
goal: "Waiting flow",
|
||||
status: "waiting",
|
||||
createdAt: now - 40 * 60_000,
|
||||
updatedAt: now - 40 * 60_000,
|
||||
});
|
||||
|
||||
const runtime = createRuntime();
|
||||
await tasksAuditJsonCommand({ json: true, limit: 1 }, runtime);
|
||||
|
||||
const payload = readJsonLog(runtime) as {
|
||||
count: number;
|
||||
filteredCount: number;
|
||||
displayed: number;
|
||||
filters: { limit: number | null };
|
||||
summary: {
|
||||
byCode: Record<string, number>;
|
||||
taskFlows: { byCode: Record<string, number> };
|
||||
combined: { total: number; errors: number; warnings: number };
|
||||
};
|
||||
findings: Array<{ kind: string; code: string; token?: string }>;
|
||||
};
|
||||
expect(payload.count).toBe(5);
|
||||
expect(payload.filteredCount).toBe(5);
|
||||
expect(payload.displayed).toBe(1);
|
||||
expect(payload.filters.limit).toBe(1);
|
||||
expect(payload.summary.byCode.stale_running).toBe(1);
|
||||
expect(payload.summary.taskFlows.byCode.stale_running).toBe(1);
|
||||
expect(payload.summary.taskFlows.byCode.stale_waiting).toBe(1);
|
||||
expect(payload.summary.taskFlows.byCode.missing_linked_tasks).toBe(2);
|
||||
expect(payload.summary.combined).toEqual({ total: 5, errors: 3, warnings: 2 });
|
||||
expect(payload.findings).toEqual([
|
||||
expect.objectContaining({
|
||||
kind: "task_flow",
|
||||
code: "stale_running",
|
||||
token: runningFlow.flowId,
|
||||
}),
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
189
src/commands/tasks-json.ts
Normal file
189
src/commands/tasks-json.ts
Normal file
@@ -0,0 +1,189 @@
|
||||
import type { RuntimeEnv } from "../runtime.js";
|
||||
import { writeRuntimeJson } from "../runtime.js";
|
||||
import { listTaskRecords } from "../tasks/runtime-internal.js";
|
||||
import {
|
||||
listTaskFlowAuditFindings,
|
||||
summarizeTaskFlowAuditFindings,
|
||||
type TaskFlowAuditCode,
|
||||
type TaskFlowAuditSeverity,
|
||||
} from "../tasks/task-flow-registry.audit.js";
|
||||
import type { TaskFlowRecord } from "../tasks/task-flow-registry.types.js";
|
||||
import { listTaskFlowRecords } from "../tasks/task-flow-runtime-internal.js";
|
||||
import {
|
||||
listTaskAuditFindings,
|
||||
summarizeTaskAuditFindings,
|
||||
type TaskAuditCode,
|
||||
type TaskAuditSeverity,
|
||||
} from "../tasks/task-registry.audit.js";
|
||||
import { compareTaskAuditFindingSortKeys } from "../tasks/task-registry.audit.shared.js";
|
||||
import type { TaskRecord } from "../tasks/task-registry.types.js";
|
||||
|
||||
type TaskSystemAuditCode = TaskAuditCode | TaskFlowAuditCode;
|
||||
type TaskSystemAuditSeverity = TaskAuditSeverity | TaskFlowAuditSeverity;
|
||||
|
||||
type TaskSystemAuditFinding = {
|
||||
kind: "task" | "task_flow";
|
||||
severity: TaskSystemAuditSeverity;
|
||||
code: TaskSystemAuditCode;
|
||||
detail: string;
|
||||
ageMs?: number;
|
||||
status?: string;
|
||||
token?: string;
|
||||
task?: TaskRecord;
|
||||
flow?: TaskFlowRecord;
|
||||
};
|
||||
|
||||
function listTaskJsonRecords(): TaskRecord[] {
|
||||
// Keep the routed JSON path a read-only store snapshot; maintenance reconciliation imports
|
||||
// broader task runtimes and can keep JSON-only CLI processes alive.
|
||||
return listTaskRecords();
|
||||
}
|
||||
|
||||
export type TasksListJsonArgs = {
|
||||
json?: boolean;
|
||||
runtime?: string;
|
||||
status?: string;
|
||||
};
|
||||
|
||||
export type TasksAuditJsonArgs = {
|
||||
json?: boolean;
|
||||
severity?: string;
|
||||
code?: string;
|
||||
limit?: number;
|
||||
};
|
||||
|
||||
function compareSystemAuditFindings(left: TaskSystemAuditFinding, right: TaskSystemAuditFinding) {
|
||||
return compareTaskAuditFindingSortKeys(
|
||||
{
|
||||
severity: left.severity,
|
||||
ageMs: left.ageMs,
|
||||
createdAt: left.task?.createdAt ?? left.flow?.createdAt ?? 0,
|
||||
},
|
||||
{
|
||||
severity: right.severity,
|
||||
ageMs: right.ageMs,
|
||||
createdAt: right.task?.createdAt ?? right.flow?.createdAt ?? 0,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
function toSystemAuditFindings(params: {
|
||||
severityFilter?: TaskSystemAuditSeverity;
|
||||
codeFilter?: TaskSystemAuditCode;
|
||||
}) {
|
||||
const tasks = listTaskJsonRecords();
|
||||
const flows = listTaskFlowRecords();
|
||||
const taskFindings = listTaskAuditFindings({ tasks });
|
||||
const flowFindings = listTaskFlowAuditFindings({ flows });
|
||||
const allFindings: TaskSystemAuditFinding[] = [
|
||||
...taskFindings.map((finding) => ({
|
||||
kind: "task" as const,
|
||||
severity: finding.severity,
|
||||
code: finding.code,
|
||||
detail: finding.detail,
|
||||
ageMs: finding.ageMs,
|
||||
status: finding.task.status,
|
||||
token: finding.task.taskId,
|
||||
task: finding.task,
|
||||
})),
|
||||
...flowFindings.map((finding) => ({
|
||||
kind: "task_flow" as const,
|
||||
severity: finding.severity,
|
||||
code: finding.code,
|
||||
detail: finding.detail,
|
||||
ageMs: finding.ageMs,
|
||||
status: finding.flow?.status ?? "n/a",
|
||||
token: finding.flow?.flowId,
|
||||
...(finding.flow ? { flow: finding.flow } : {}),
|
||||
})),
|
||||
];
|
||||
const filteredFindings = allFindings
|
||||
.filter((finding) => {
|
||||
if (params.severityFilter && finding.severity !== params.severityFilter) {
|
||||
return false;
|
||||
}
|
||||
if (params.codeFilter && finding.code !== params.codeFilter) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.toSorted(compareSystemAuditFindings);
|
||||
const sortedAllFindings = [...allFindings].toSorted(compareSystemAuditFindings);
|
||||
return {
|
||||
allFindings: sortedAllFindings,
|
||||
filteredFindings,
|
||||
taskFindings,
|
||||
summary: {
|
||||
total: sortedAllFindings.length,
|
||||
errors: sortedAllFindings.filter((finding) => finding.severity === "error").length,
|
||||
warnings: sortedAllFindings.filter((finding) => finding.severity !== "error").length,
|
||||
taskFlows: summarizeTaskFlowAuditFindings(flowFindings),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function buildTasksListJsonPayload(opts: TasksListJsonArgs) {
|
||||
const runtimeFilter = opts.runtime?.trim();
|
||||
const statusFilter = opts.status?.trim();
|
||||
const tasks = listTaskJsonRecords().filter((task) => {
|
||||
if (runtimeFilter && task.runtime !== runtimeFilter) {
|
||||
return false;
|
||||
}
|
||||
if (statusFilter && task.status !== statusFilter) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
return {
|
||||
count: tasks.length,
|
||||
runtime: runtimeFilter ?? null,
|
||||
status: statusFilter ?? null,
|
||||
tasks,
|
||||
};
|
||||
}
|
||||
|
||||
export function buildTasksAuditJsonPayload(opts: TasksAuditJsonArgs) {
|
||||
const severityFilter = opts.severity?.trim() as TaskSystemAuditSeverity | undefined;
|
||||
const codeFilter = opts.code?.trim() as TaskSystemAuditCode | undefined;
|
||||
const { allFindings, filteredFindings, taskFindings, summary } = toSystemAuditFindings({
|
||||
severityFilter,
|
||||
codeFilter,
|
||||
});
|
||||
const limit = typeof opts.limit === "number" && opts.limit > 0 ? opts.limit : undefined;
|
||||
const displayed = limit ? filteredFindings.slice(0, limit) : filteredFindings;
|
||||
const legacySummary = summarizeTaskAuditFindings(taskFindings);
|
||||
return {
|
||||
count: allFindings.length,
|
||||
filteredCount: filteredFindings.length,
|
||||
displayed: displayed.length,
|
||||
filters: {
|
||||
severity: severityFilter ?? null,
|
||||
code: codeFilter ?? null,
|
||||
limit: limit ?? null,
|
||||
},
|
||||
summary: {
|
||||
...legacySummary,
|
||||
taskFlows: summary.taskFlows,
|
||||
combined: {
|
||||
total: summary.total,
|
||||
errors: summary.errors,
|
||||
warnings: summary.warnings,
|
||||
},
|
||||
},
|
||||
findings: displayed,
|
||||
};
|
||||
}
|
||||
|
||||
export async function tasksListJsonCommand(
|
||||
opts: TasksListJsonArgs,
|
||||
runtime: RuntimeEnv,
|
||||
): Promise<void> {
|
||||
writeRuntimeJson(runtime, buildTasksListJsonPayload(opts));
|
||||
}
|
||||
|
||||
export async function tasksAuditJsonCommand(
|
||||
opts: TasksAuditJsonArgs,
|
||||
runtime: RuntimeEnv,
|
||||
): Promise<void> {
|
||||
writeRuntimeJson(runtime, buildTasksAuditJsonPayload(opts));
|
||||
}
|
||||
Reference in New Issue
Block a user