diff --git a/.env.example b/.env.example
index 1daf2d8..4da6711 100644
--- a/.env.example
+++ b/.env.example
@@ -1,4 +1,4 @@
-# GhostCrew Configuration
+# PentestAgent Configuration
# API Keys (set at least one for chat model)
OPENAI_API_KEY=
@@ -8,11 +8,11 @@ TAVILY_API_KEY=
# Chat Model (any LiteLLM-supported model)
# OpenAI: gpt-5, gpt-4.1, gpt-4.1-mini
# Anthropic: claude-sonnet-4-20250514, claude-opus-4-20250514
-GHOSTCREW_MODEL=gpt-5
+PENTESTAGENT_MODEL=gpt-5
# Embeddings (for RAG knowledge base)
# Options: openai, local (default: openai if OPENAI_API_KEY set, else local)
-# GHOSTCREW_EMBEDDINGS=local
+# PENTESTAGENT_EMBEDDINGS=local
# Settings
-GHOSTCREW_DEBUG=false
\ No newline at end of file
+PENTESTAGENT_DEBUG=false
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index 335143f..2f0ed15 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,9 +1,9 @@
-# GhostCrew - AI Penetration Testing Agent
+# PentestAgent - AI Penetration Testing Agent
# Base image with common tools
FROM python:3.11-slim
-LABEL maintainer="GhostCrew"
+LABEL maintainer="PentestAgent"
LABEL description="AI penetration testing"
# Set environment variables
@@ -50,14 +50,14 @@ RUN pip install --no-cache-dir --upgrade pip && \
COPY . .
# Create non-root user for security
-RUN useradd -m -s /bin/bash ghostcrew && \
- chown -R ghostcrew:ghostcrew /app
+RUN useradd -m -s /bin/bash pentestagent && \
+ chown -R pentestagent:pentestagent /app
# Switch to non-root user (can switch back for privileged operations)
-USER ghostcrew
+USER pentestagent
# Expose any needed ports
EXPOSE 8080
# Default command
-CMD ["python", "-m", "ghostcrew"]
+CMD ["python", "-m", "pentestagent"]
diff --git a/Dockerfile.kali b/Dockerfile.kali
index 2787e07..228879a 100644
--- a/Dockerfile.kali
+++ b/Dockerfile.kali
@@ -1,10 +1,10 @@
-# GhostCrew Kali Linux Image
+# PentestAgent Kali Linux Image
# Full penetration testing environment
FROM kalilinux/kali-rolling
LABEL maintainer="Masic"
-LABEL description="GhostCrew with Kali Linux tools"
+LABEL description="PentestAgent with Kali Linux tools"
# Set environment variables
ENV DEBIAN_FRONTEND=noninteractive
@@ -82,4 +82,4 @@ COPY docker-entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
-CMD ["python3", "-m", "ghostcrew"]
+CMD ["python3", "-m", "pentestagent"]
diff --git a/README.md b/README.md
index 1657342..e0fb81f 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,11 @@
-

+

-# GHOSTCREW
-### AI Penetration Testing Agents
+# PentestAgent
+### AI Penetration Testing
-[](https://www.python.org/) [](LICENSE.txt) [](https://github.com/GH05TCREW/ghostcrew/releases) [](https://github.com/GH05TCREW/ghostcrew) [](https://github.com/GH05TCREW/ghostcrew)
-
-[🇺🇸 English](README.md) | [🇨🇳 中文文档](README_zh.md)
+[](https://www.python.org/) [](LICENSE.txt) [](https://github.com/GH05TCREW/pentestagent/releases) [](https://github.com/GH05TCREW/pentestagent) [](https://github.com/GH05TCREW/pentestagent)
@@ -22,8 +20,8 @@ https://github.com/user-attachments/assets/a67db2b5-672a-43df-b709-149c8eaee975
```bash
# Clone
-git clone https://github.com/GH05TCREW/ghostcrew.git
-cd ghostcrew
+git clone https://github.com/GH05TCREW/pentestagent.git
+cd pentestagent
# Setup (creates venv, installs deps)
.\scripts\setup.ps1 # Windows
@@ -43,14 +41,14 @@ Create `.env` in the project root:
```
ANTHROPIC_API_KEY=sk-ant-...
-GHOSTCREW_MODEL=claude-sonnet-4-20250514
+PENTESTAGENT_MODEL=claude-sonnet-4-20250514
```
Or for OpenAI:
```
OPENAI_API_KEY=sk-...
-GHOSTCREW_MODEL=gpt-5
+PENTESTAGENT_MODEL=gpt-5
```
Any [LiteLLM-supported model](https://docs.litellm.ai/docs/providers) works.
@@ -58,9 +56,9 @@ Any [LiteLLM-supported model](https://docs.litellm.ai/docs/providers) works.
## Run
```bash
-ghostcrew # Launch TUI
-ghostcrew -t 192.168.1.1 # Launch with target
-ghostcrew --docker # Run tools in Docker container
+pentestagent # Launch TUI
+pentestagent -t 192.168.1.1 # Launch with target
+pentestagent --docker # Run tools in Docker container
```
## Docker
@@ -73,13 +71,13 @@ Run tools inside a Docker container for isolation and pre-installed pentesting t
# Base image with nmap, netcat, curl
docker run -it --rm \
-e ANTHROPIC_API_KEY=your-key \
- -e GHOSTCREW_MODEL=claude-sonnet-4-20250514 \
- ghcr.io/gh05tcrew/ghostcrew:latest
+ -e PENTESTAGENT_MODEL=claude-sonnet-4-20250514 \
+ ghcr.io/gh05tcrew/pentestagent:latest
# Kali image with metasploit, sqlmap, hydra, etc.
docker run -it --rm \
-e ANTHROPIC_API_KEY=your-key \
- ghcr.io/gh05tcrew/ghostcrew:kali
+ ghcr.io/gh05tcrew/pentestagent:kali
```
### Option 2: Build locally
@@ -89,20 +87,20 @@ docker run -it --rm \
docker compose build
# Run
-docker compose run --rm ghostcrew
+docker compose run --rm pentestagent
# Or with Kali
docker compose --profile kali build
-docker compose --profile kali run --rm ghostcrew-kali
+docker compose --profile kali run --rm pentestagent-kali
```
-The container runs GhostCrew with access to Linux pentesting tools. The agent can use `nmap`, `msfconsole`, `sqlmap`, etc. directly via the terminal tool.
+The container runs PentestAgent with access to Linux pentesting tools. The agent can use `nmap`, `msfconsole`, `sqlmap`, etc. directly via the terminal tool.
Requires Docker to be installed and running.
## Modes
-GhostCrew has three modes, accessible via commands in the TUI:
+PentestAgent has three modes, accessible via commands in the TUI:
| Mode | Command | Description |
|------|---------|-------------|
@@ -130,25 +128,25 @@ Press `Esc` to stop a running agent. `Ctrl+Q` to quit.
## Playbooks
-GhostCrew includes prebuilt **attack playbooks** for black-box security testing. Playbooks define a structured approach to specific security assessments.
+PentestAgent includes prebuilt **attack playbooks** for black-box security testing. Playbooks define a structured approach to specific security assessments.
**Run a playbook:**
```bash
-ghostcrew run -t example.com --playbook thp3_web
+pentestagent run -t example.com --playbook thp3_web
```

## Tools
-GhostCrew includes built-in tools and supports MCP (Model Context Protocol) for extensibility.
+PentestAgent includes built-in tools and supports MCP (Model Context Protocol) for extensibility.
**Built-in tools:** `terminal`, `browser`, `notes`, `web_search` (requires `TAVILY_API_KEY`)
### MCP Integration
-Add external tools via MCP servers in `ghostcrew/mcp/mcp_servers.json`:
+Add external tools via MCP servers in `pentestagent/mcp/mcp_servers.json`:
```json
{
@@ -167,23 +165,23 @@ Add external tools via MCP servers in `ghostcrew/mcp/mcp_servers.json`:
### CLI Tool Management
```bash
-ghostcrew tools list # List all tools
-ghostcrew tools info # Show tool details
-ghostcrew mcp list # List MCP servers
-ghostcrew mcp add [args...] # Add MCP server
-ghostcrew mcp test # Test MCP connection
+pentestagent tools list # List all tools
+pentestagent tools info # Show tool details
+pentestagent mcp list # List MCP servers
+pentestagent mcp add [args...] # Add MCP server
+pentestagent mcp test # Test MCP connection
```
## Knowledge
-- **RAG:** Place methodologies, CVEs, or wordlists in `ghostcrew/knowledge/sources/` for automatic context injection.
+- **RAG:** Place methodologies, CVEs, or wordlists in `pentestagent/knowledge/sources/` for automatic context injection.
- **Notes:** Agents save findings to `loot/notes.json` with categories (`credential`, `vulnerability`, `finding`, `artifact`). Notes persist across sessions and are injected into agent context.
- **Shadow Graph:** In Crew mode, the orchestrator builds a knowledge graph from notes to derive strategic insights (e.g., "We have credentials for host X").
## Project Structure
```
-ghostcrew/
+pentestagent/
agents/ # Agent implementations
config/ # Settings and constants
interface/ # TUI and CLI
@@ -199,10 +197,10 @@ ghostcrew/
```bash
pip install -e ".[dev]"
-pytest # Run tests
-pytest --cov=ghostcrew # With coverage
-black ghostcrew # Format
-ruff check ghostcrew # Lint
+pytest # Run tests
+pytest --cov=pentestagent # With coverage
+black pentestagent # Format
+ruff check pentestagent # Lint
```
## Legal
diff --git a/README_zh.md b/README_zh.md
deleted file mode 100644
index 8703a5b..0000000
--- a/README_zh.md
+++ /dev/null
@@ -1,202 +0,0 @@
-
-
-

-
-# GHOSTCREW
-### AI 渗透测试智能体
-
-[](https://www.python.org/) [](LICENSE.txt) [](https://github.com/GH05TCREW/ghostcrew/releases) [](https://github.com/GH05TCREW/ghostcrew) [](https://github.com/GH05TCREW/ghostcrew)
-
-[🇺🇸 English](README.md) | [🇨🇳 中文文档](README_zh.md)
-
-
-
-https://github.com/user-attachments/assets/a67db2b5-672a-43df-b709-149c8eaee975
-
-## 要求
-
-- Python 3.10+
-- OpenAI, Anthropic 或其他支持 LiteLLM 的提供商的 API 密钥
-
-## 安装
-
-```bash
-# 克隆仓库
-git clone https://github.com/GH05TCREW/ghostcrew.git
-cd ghostcrew
-
-# 设置 (创建虚拟环境, 安装依赖)
-.\scripts\setup.ps1 # Windows
-./scripts/setup.sh # Linux/macOS
-
-# 或者手动安装
-python -m venv venv
-.\venv\Scripts\Activate.ps1 # Windows
-source venv/bin/activate # Linux/macOS
-pip install -e ".[all]"
-playwright install chromium # 浏览器工具需要
-```
-
-## 配置
-
-在项目根目录创建 `.env` 文件:
-
-```
-ANTHROPIC_API_KEY=sk-ant-...
-GHOSTCREW_MODEL=claude-sonnet-4-20250514
-```
-
-或者使用 OpenAI:
-
-```
-OPENAI_API_KEY=sk-...
-GHOSTCREW_MODEL=gpt-5
-```
-
-任何 [LiteLLM 支持的模型](https://docs.litellm.ai/docs/providers) 都可以使用。
-
-## 运行
-
-```bash
-ghostcrew # 启动 TUI (终端用户界面)
-ghostcrew -t 192.168.1.1 # 启动并指定目标
-ghostcrew --docker # 在 Docker 容器中运行工具
-```
-
-## Docker
-
-在 Docker 容器中运行工具,以实现隔离并使用预安装的渗透测试工具。
-
-### 选项 1: 拉取预构建镜像 (最快)
-
-```bash
-# 基础镜像 (包含 nmap, netcat, curl)
-docker run -it --rm \
- -e ANTHROPIC_API_KEY=your-key \
- -e GHOSTCREW_MODEL=claude-sonnet-4-20250514 \
- ghcr.io/gh05tcrew/ghostcrew:latest
-
-# Kali 镜像 (包含 metasploit, sqlmap, hydra 等)
-docker run -it --rm \
- -e ANTHROPIC_API_KEY=your-key \
- ghcr.io/gh05tcrew/ghostcrew:kali
-```
-
-### 选项 2: 本地构建
-
-```bash
-# 构建
-docker compose build
-
-# 运行
-docker compose run --rm ghostcrew
-
-# 或者使用 Kali
-docker compose --profile kali build
-docker compose --profile kali run --rm ghostcrew-kali
-```
-
-容器运行 GhostCrew 并可以访问 Linux 渗透测试工具。代理可以通过终端工具直接使用 `nmap`, `msfconsole`, `sqlmap` 等。
-
-需要安装并运行 Docker。
-
-## 模式
-
-GhostCrew 有三种模式,可通过 TUI 中的命令访问:
-
-| 模式 | 命令 | 描述 |
-|------|---------|-------------|
-| 辅助 (Assist) | (默认) | 与代理聊天。你控制流程。 |
-| 代理 (Agent) | `/agent <任务>` | 自主执行单个任务。 |
-| 团队 (Crew) | `/crew <任务>` | 多代理模式。协调器生成专门的工作者。 |
-
-### TUI 命令
-
-```
-/agent 运行自主代理执行任务
-/crew 运行多代理团队执行任务
-/target 设置目标
-/tools 列出可用工具
-/notes 显示保存的笔记
-/report 从会话生成报告
-/memory 显示令牌/内存使用情况
-/prompt 显示系统提示词
-/clear 清除聊天和历史记录
-/quit 退出 (也可以用 /exit, /q)
-/help 显示帮助 (也可以用 /h, /?)
-```
-
-按 `Esc` 停止正在运行的代理。按 `Ctrl+Q` 退出。
-
-## 工具
-
-GhostCrew 包含内置工具,并支持 MCP (Model Context Protocol) 进行扩展。
-
-**内置工具:** `terminal` (终端), `browser` (浏览器), `notes` (笔记), `web_search` (网络搜索, 需要 `TAVILY_API_KEY`)
-
-### MCP 集成
-
-通过 `ghostcrew/mcp/mcp_servers.json` 添加外部工具 (MCP 服务器):
-
-```json
-{
- "mcpServers": {
- "nmap": {
- "command": "npx",
- "args": ["-y", "gc-nmap-mcp"],
- "env": {
- "NMAP_PATH": "/usr/bin/nmap"
- }
- }
- }
-}
-```
-
-### CLI 工具管理
-
-```bash
-ghostcrew tools list # 列出所有工具
-ghostcrew tools info # 显示工具详情
-ghostcrew mcp list # 列出 MCP 服务器
-ghostcrew mcp add [args...] # 添加 MCP 服务器
-ghostcrew mcp test # 测试 MCP 连接
-```
-
-## 知识库
-
-- **RAG (检索增强生成):** 将方法论、CVE 或字典放在 `ghostcrew/knowledge/sources/` 中,以便自动注入上下文。
-- **笔记:** 代理将发现保存到 `loot/notes.json`,分类为 (`credential` 凭据, `vulnerability` 漏洞, `finding` 发现, `artifact` 工件)。笔记在会话之间持久保存,并注入到代理上下文中。
-- **影子图 (Shadow Graph):** 在团队模式下,协调器从笔记构建知识图谱,以得出战略见解 (例如,“我们拥有主机 X 的凭据”)。
-
-## 项目结构
-
-```
-ghostcrew/
- agents/ # 代理实现
- config/ # 设置和常量
- interface/ # TUI 和 CLI
- knowledge/ # RAG 系统和影子图
- llm/ # LiteLLM 包装器
- mcp/ # MCP 客户端和服务器配置
- playbooks/ # 攻击剧本
- runtime/ # 执行环境
- tools/ # 内置工具
-```
-
-## 开发
-
-```bash
-pip install -e ".[dev]"
-pytest # 运行测试
-pytest --cov=ghostcrew # 带覆盖率运行
-black ghostcrew # 格式化代码
-ruff check ghostcrew # 代码检查
-```
-
-## 法律声明
-
-仅用于您有明确授权进行测试的系统。未经授权的访问是非法的。
-
-## 许可证
-
-MIT
diff --git a/assets/ghostcrew-logo.png b/assets/ghostcrew-logo.png
deleted file mode 100644
index 12b1926..0000000
Binary files a/assets/ghostcrew-logo.png and /dev/null differ
diff --git a/assets/pentestagent-logo.png b/assets/pentestagent-logo.png
new file mode 100644
index 0000000..8b8d8f9
Binary files /dev/null and b/assets/pentestagent-logo.png differ
diff --git a/docker-compose.yml b/docker-compose.yml
index f341dd2..aa0861e 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,26 +1,26 @@
services:
- ghostcrew:
+ pentestagent:
build:
context: .
dockerfile: Dockerfile
- container_name: ghostcrew
+ container_name: pentestagent
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- - GHOSTCREW_MODEL=${GHOSTCREW_MODEL}
- - GHOSTCREW_DEBUG=${GHOSTCREW_DEBUG:-false}
+ - PENTESTAGENT_MODEL=${PENTESTAGENT_MODEL}
+ - PENTESTAGENT_DEBUG=${PENTESTAGENT_DEBUG:-false}
volumes:
- ./loot:/app/loot
networks:
- - ghostcrew-net
+ - pentestagent-net
stdin_open: true
tty: true
- ghostcrew-kali:
+ pentestagent-kali:
build:
context: .
dockerfile: Dockerfile.kali
- container_name: ghostcrew-kali
+ container_name: pentestagent-kali
privileged: true # Required for VPN and some tools
cap_add:
- NET_ADMIN
@@ -28,18 +28,18 @@ services:
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- - GHOSTCREW_MODEL=${GHOSTCREW_MODEL}
+ - PENTESTAGENT_MODEL=${PENTESTAGENT_MODEL}
- ENABLE_TOR=${ENABLE_TOR:-false}
- INIT_METASPLOIT=${INIT_METASPLOIT:-false}
volumes:
- ./loot:/app/loot
networks:
- - ghostcrew-net
+ - pentestagent-net
stdin_open: true
tty: true
profiles:
- kali
networks:
- ghostcrew-net:
+ pentestagent-net:
driver: bridge
diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh
index 733b363..1373637 100644
--- a/docker-entrypoint.sh
+++ b/docker-entrypoint.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# GhostCrew Docker Entrypoint
+# PentestAgent Docker Entrypoint
set -e
@@ -9,7 +9,7 @@ GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
-echo -e "${GREEN}🔧 GhostCrew Container Starting...${NC}"
+echo -e "${GREEN}🔧 PentestAgent Container Starting...${NC}"
# Start VPN if config provided
if [ -f "/vpn/config.ovpn" ]; then
@@ -41,10 +41,10 @@ fi
# Create output directory with timestamp
OUTPUT_DIR="/output/$(date +%Y%m%d_%H%M%S)"
mkdir -p "$OUTPUT_DIR"
-export GHOSTCREW_OUTPUT_DIR="$OUTPUT_DIR"
+export PENTESTAGENT_OUTPUT_DIR="$OUTPUT_DIR"
echo -e "${GREEN}📁 Output directory: $OUTPUT_DIR${NC}"
-echo -e "${GREEN}🚀 Starting GhostCrew...${NC}"
+echo -e "${GREEN}🚀 Starting PentestAgent...${NC}"
# Execute the main command
exec "$@"
diff --git a/ghostcrew/__init__.py b/ghostcrew/__init__.py
deleted file mode 100644
index 012d4d5..0000000
--- a/ghostcrew/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""GhostCrew - AI penetration testing."""
-
-__version__ = "0.2.0"
-__author__ = "Masic"
diff --git a/ghostcrew/__main__.py b/ghostcrew/__main__.py
deleted file mode 100644
index 6a796f8..0000000
--- a/ghostcrew/__main__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-"""GhostCrew entry point for `python -m ghostcrew`."""
-
-from ghostcrew.interface.main import main
-
-if __name__ == "__main__":
- main()
diff --git a/ghostcrew/agents/crew/worker_pool.py b/ghostcrew/agents/crew/worker_pool.py
index 5e7632e..059b624 100644
--- a/ghostcrew/agents/crew/worker_pool.py
+++ b/ghostcrew/agents/crew/worker_pool.py
@@ -44,7 +44,7 @@ class WorkerPool:
def _generate_id(self) -> str:
"""Generate unique worker ID."""
- worker_id = f"ghost-{self._next_id}"
+ worker_id = f"agent-{self._next_id}"
self._next_id += 1
return worker_id
@@ -93,7 +93,7 @@ class WorkerPool:
async def _run_worker(self, worker: AgentWorker) -> None:
"""Run a single worker agent."""
- from ..ghostcrew_agent import GhostCrewAgent
+ from ..pa_agent import PentestAgentAgent
# Wait for dependencies
if worker.depends_on:
@@ -111,7 +111,7 @@ class WorkerPool:
from ...config.constants import WORKER_MAX_ITERATIONS
- agent = GhostCrewAgent(
+ agent = PentestAgentAgent(
llm=self.llm,
tools=self.tools,
runtime=worker_runtime, # Use isolated runtime
diff --git a/ghostcrew/agents/ghostcrew_agent/__init__.py b/ghostcrew/agents/ghostcrew_agent/__init__.py
deleted file mode 100644
index 560e24d..0000000
--- a/ghostcrew/agents/ghostcrew_agent/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""GhostCrew main agent implementation."""
-
-from .ghostcrew_agent import GhostCrewAgent
-
-__all__ = ["GhostCrewAgent"]
diff --git a/ghostcrew/interface/cli.py b/ghostcrew/interface/cli.py
index 0ce35c3..29f9929 100644
--- a/ghostcrew/interface/cli.py
+++ b/ghostcrew/interface/cli.py
@@ -1,4 +1,4 @@
-"""Non-interactive CLI mode for GhostCrew."""
+"""Non-interactive CLI mode for PentestAgent."""
import asyncio
import time
@@ -12,12 +12,12 @@ from rich.text import Text
console = Console()
-# Ghost theme colors (matching TUI)
-GHOST_PRIMARY = "#d4d4d4" # light gray - primary text
-GHOST_SECONDARY = "#9a9a9a" # medium gray - secondary text
-GHOST_DIM = "#6b6b6b" # dim gray - muted text
-GHOST_BORDER = "#3a3a3a" # dark gray - borders
-GHOST_ACCENT = "#7a7a7a" # accent gray
+# PA theme colors (matching TUI)
+PA_PRIMARY = "#d4d4d4" # light gray - primary text
+PA_SECONDARY = "#9a9a9a" # medium gray - secondary text
+PA_DIM = "#6b6b6b" # dim gray - muted text
+PA_BORDER = "#3a3a3a" # dark gray - borders
+PA_ACCENT = "#7a7a7a" # accent gray
async def run_cli(
@@ -30,7 +30,7 @@ async def run_cli(
mode: str = "agent",
):
"""
- Run GhostCrew in non-interactive mode.
+ Run PentestAgent in non-interactive mode.
Args:
target: Target to test
@@ -41,7 +41,7 @@ async def run_cli(
use_docker: Run tools in Docker container
mode: Execution mode ("agent" or "crew")
"""
- from ..agents.ghostcrew_agent import GhostCrewAgent
+ from ..agents.pa_agent import PentestAgentAgent
from ..knowledge import RAGEngine
from ..llm import LLM
from ..runtime.docker_runtime import DockerRuntime
@@ -50,27 +50,27 @@ async def run_cli(
# Startup panel
start_text = Text()
- start_text.append("GHOSTCREW", style=f"bold {GHOST_PRIMARY}")
- start_text.append(" - Non-interactive Mode\n\n", style=GHOST_DIM)
- start_text.append("Target: ", style=GHOST_SECONDARY)
- start_text.append(f"{target}\n", style=GHOST_PRIMARY)
- start_text.append("Model: ", style=GHOST_SECONDARY)
- start_text.append(f"{model}\n", style=GHOST_PRIMARY)
- start_text.append("Mode: ", style=GHOST_SECONDARY)
- start_text.append(f"{mode.title()}\n", style=GHOST_PRIMARY)
- start_text.append("Runtime: ", style=GHOST_SECONDARY)
- start_text.append(f"{'Docker' if use_docker else 'Local'}\n", style=GHOST_PRIMARY)
- start_text.append("Max loops: ", style=GHOST_SECONDARY)
- start_text.append(f"{max_loops}\n", style=GHOST_PRIMARY)
+ start_text.append("PENTESTAGENT", style=f"bold {PA_PRIMARY}")
+ start_text.append(" - Non-interactive Mode\n\n", style=PA_DIM)
+ start_text.append("Target: ", style=PA_SECONDARY)
+ start_text.append(f"{target}\n", style=PA_PRIMARY)
+ start_text.append("Model: ", style=PA_SECONDARY)
+ start_text.append(f"{model}\n", style=PA_PRIMARY)
+ start_text.append("Mode: ", style=PA_SECONDARY)
+ start_text.append(f"{mode.title()}\n", style=PA_PRIMARY)
+ start_text.append("Runtime: ", style=PA_SECONDARY)
+ start_text.append(f"{'Docker' if use_docker else 'Local'}\n", style=PA_PRIMARY)
+ start_text.append("Max loops: ", style=PA_SECONDARY)
+ start_text.append(f"{max_loops}\n", style=PA_PRIMARY)
task_msg = task or f"Perform a penetration test on {target}"
- start_text.append("Task: ", style=GHOST_SECONDARY)
- start_text.append(task_msg, style=GHOST_PRIMARY)
+ start_text.append("Task: ", style=PA_SECONDARY)
+ start_text.append(task_msg, style=PA_PRIMARY)
console.print()
console.print(
Panel(
- start_text, title=f"[{GHOST_SECONDARY}]Starting", border_style=GHOST_BORDER
+ start_text, title=f"[{PA_SECONDARY}]Starting", border_style=PA_BORDER
)
)
console.print()
@@ -99,13 +99,13 @@ async def run_cli(
register_tool_instance(tool)
mcp_count = len(mcp_tools)
if mcp_count > 0:
- console.print(f"[{GHOST_DIM}]Loaded {mcp_count} MCP tools[/]")
+ console.print(f"[{PA_DIM}]Loaded {mcp_count} MCP tools[/]")
except Exception:
pass # MCP is optional, continue without it
# Initialize runtime - Docker or Local
if use_docker:
- console.print(f"[{GHOST_DIM}]Starting Docker container...[/]")
+ console.print(f"[{PA_DIM}]Starting Docker container...[/]")
runtime = DockerRuntime(mcp_manager=mcp_manager)
else:
runtime = LocalRuntime(mcp_manager=mcp_manager)
@@ -127,11 +127,11 @@ async def run_cli(
last_msg_intermediate = False # Track if previous message was intermediate (to avoid double counting tokens)
stopped_reason = None
- def print_status(msg: str, style: str = GHOST_DIM):
+ def print_status(msg: str, style: str = PA_DIM):
elapsed = int(time.time() - start_time)
mins, secs = divmod(elapsed, 60)
timestamp = f"[{mins:02d}:{secs:02d}]"
- console.print(f"[{GHOST_DIM}]{timestamp}[/] [{style}]{msg}[/]")
+ console.print(f"[{PA_DIM}]{timestamp}[/] [{style}]{msg}[/]")
def display_message(content: str, title: str) -> bool:
"""Display a message panel if it hasn't been shown yet."""
@@ -141,8 +141,8 @@ async def run_cli(
console.print(
Panel(
Markdown(content),
- title=f"[{GHOST_PRIMARY}]{title}",
- border_style=GHOST_BORDER,
+ title=f"[{PA_PRIMARY}]{title}",
+ border_style=PA_BORDER,
)
)
console.print()
@@ -160,7 +160,7 @@ async def run_cli(
status_text = f"Interrupted ({stopped_reason})"
lines = [
- "# GhostCrew Penetration Test Report",
+ "# PentestAgent Penetration Test Report",
"",
"## Executive Summary",
"",
@@ -267,7 +267,7 @@ async def run_cli(
[
"---",
"",
- f"*Report generated by GhostCrew on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*",
+ f"*Report generated by PentestAgent on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*",
]
)
@@ -291,14 +291,14 @@ async def run_cli(
content = generate_report()
report_path.write_text(content, encoding="utf-8")
- console.print(f"[{GHOST_SECONDARY}]Report saved: {report_path}[/]")
+ console.print(f"[{PA_SECONDARY}]Report saved: {report_path}[/]")
async def generate_summary():
"""Ask the LLM to summarize findings when stopped early."""
if not tool_log:
return None
- print_status("Generating summary...", GHOST_SECONDARY)
+ print_status("Generating summary...", PA_SECONDARY)
# Build context from tool results (use full results, not truncated)
context_lines = ["Summarize the penetration test findings so far:\n"]
@@ -345,28 +345,28 @@ async def run_cli(
status = f"STOPPED ({stopped_reason})"
final_text = Text()
- final_text.append(f"{status}\n\n", style=f"bold {GHOST_PRIMARY}")
- final_text.append("Duration: ", style=GHOST_DIM)
- final_text.append(f"{mins}m {secs}s\n", style=GHOST_SECONDARY)
- final_text.append("Loops: ", style=GHOST_DIM)
- final_text.append(f"{iteration}/{max_loops}\n", style=GHOST_SECONDARY)
- final_text.append("Tools: ", style=GHOST_DIM)
- final_text.append(f"{tool_count}\n", style=GHOST_SECONDARY)
+ final_text.append(f"{status}\n\n", style=f"bold {PA_PRIMARY}")
+ final_text.append("Duration: ", style=PA_DIM)
+ final_text.append(f"{mins}m {secs}s\n", style=PA_SECONDARY)
+ final_text.append("Loops: ", style=PA_DIM)
+ final_text.append(f"{iteration}/{max_loops}\n", style=PA_SECONDARY)
+ final_text.append("Tools: ", style=PA_DIM)
+ final_text.append(f"{tool_count}\n", style=PA_SECONDARY)
if total_tokens > 0:
- final_text.append("Tokens: ", style=GHOST_DIM)
- final_text.append(f"{total_tokens:,}\n", style=GHOST_SECONDARY)
+ final_text.append("Tokens: ", style=PA_DIM)
+ final_text.append(f"{total_tokens:,}\n", style=PA_SECONDARY)
if findings_count > 0:
- final_text.append("Findings: ", style=GHOST_DIM)
- final_text.append(f"{findings_count}", style=GHOST_SECONDARY)
+ final_text.append("Findings: ", style=PA_DIM)
+ final_text.append(f"{findings_count}", style=PA_SECONDARY)
console.print()
console.print(
Panel(
final_text,
- title=f"[{GHOST_SECONDARY}]{title}",
- border_style=GHOST_BORDER,
+ title=f"[{PA_SECONDARY}]{title}",
+ border_style=PA_BORDER,
)
)
@@ -388,13 +388,13 @@ async def run_cli(
if event_type == "spawn":
task = data.get("task", "")
- print_status(f"Spawned worker {worker_id}: {task}", GHOST_ACCENT)
+ print_status(f"Spawned worker {worker_id}: {task}", PA_ACCENT)
elif event_type == "tool":
tool_name = data.get("tool", "unknown")
tool_count += 1
print_status(
- f"Worker {worker_id} using tool: {tool_name}", GHOST_DIM
+ f"Worker {worker_id} using tool: {tool_name}", PA_DIM
)
# Log tool usage (limited info available from event)
@@ -429,7 +429,7 @@ async def run_cli(
elif event_type == "status":
status = data.get("status", "")
- print_status(f"Worker {worker_id} status: {status}", GHOST_DIM)
+ print_status(f"Worker {worker_id} status: {status}", PA_DIM)
elif event_type == "warning":
reason = data.get("reason", "unknown")
@@ -456,17 +456,17 @@ async def run_cli(
phase = update.get("phase", "")
if phase == "starting":
- print_status("Crew orchestrator starting...", GHOST_PRIMARY)
+ print_status("Crew orchestrator starting...", PA_PRIMARY)
elif phase == "thinking":
content = update.get("content", "")
if content:
- display_message(content, "GhostCrew Plan")
+ display_message(content, "PentestAgent Plan")
elif phase == "tool_call":
tool = update.get("tool", "")
args = update.get("args", {})
- print_status(f"Orchestrator calling: {tool}", GHOST_ACCENT)
+ print_status(f"Orchestrator calling: {tool}", PA_ACCENT)
elif phase == "complete":
report_content = update.get("report", "")
@@ -487,7 +487,7 @@ async def run_cli(
else:
# Default Agent Mode
- agent = GhostCrewAgent(
+ agent = PentestAgentAgent(
llm=llm,
tools=tools,
runtime=runtime,
@@ -611,39 +611,39 @@ async def run_cli(
# Metasploit-style output with better spacing
console.print() # Blank line before each tool
- print_status(f"$ {name} ({tool_count})", GHOST_ACCENT)
+ print_status(f"$ {name} ({tool_count})", PA_ACCENT)
# Show command/args on separate indented line (truncated for display)
if command_text:
display_cmd = command_text[:80]
if len(command_text) > 80:
display_cmd += "..."
- console.print(f" [{GHOST_DIM}]{display_cmd}[/]")
+ console.print(f" [{PA_DIM}]{display_cmd}[/]")
# Show result on separate line with status indicator
if response.tool_results and i < len(response.tool_results):
tr = response.tool_results[i]
if tr.error:
console.print(
- f" [{GHOST_DIM}][!] {tr.error[:100]}[/]"
+ f" [{PA_DIM}][!] {tr.error[:100]}[/]"
)
elif tr.result:
# Show exit code or brief result
result_line = tr.result[:100].replace("\n", " ")
if exit_code == 0 or "success" in result_line.lower():
- console.print(f" [{GHOST_DIM}][+] OK[/]")
+ console.print(f" [{PA_DIM}][+] OK[/]")
elif exit_code is not None and exit_code != 0:
console.print(
- f" [{GHOST_DIM}][-] Exit {exit_code}[/]"
+ f" [{PA_DIM}][-] Exit {exit_code}[/]"
)
else:
console.print(
- f" [{GHOST_DIM}][*] {result_line[:60]}...[/]"
+ f" [{PA_DIM}][*] {result_line[:60]}...[/]"
)
# Print assistant content immediately (analysis/findings)
if response.content:
- if display_message(response.content, "GhostCrew"):
+ if display_message(response.content, "PentestAgent"):
messages.append(response.content)
# Check max loops limit
diff --git a/ghostcrew/interface/tui.py b/ghostcrew/interface/tui.py
index eedb4d2..f9897b1 100644
--- a/ghostcrew/interface/tui.py
+++ b/ghostcrew/interface/tui.py
@@ -1,5 +1,5 @@
"""
-GhostCrew TUI - Terminal User Interface
+PentestAgent TUI - Terminal User Interface
"""
import asyncio
@@ -51,7 +51,7 @@ class CrewTree(Tree):
if TYPE_CHECKING:
- from ..agents.ghostcrew_agent import GhostCrewAgent
+ from ..agents.pa_agent import PentestAgentAgent
def wrap_text_lines(text: str, width: int = 80) -> List[str]:
@@ -142,7 +142,7 @@ class HelpScreen(ModalScreen):
def compose(self) -> ComposeResult:
yield Container(
- Static("GhostCrew Help", id="help-title"),
+ Static("PentestAgent Help", id="help-title"),
Static(self._get_help_text(), id="help-content"),
Center(Button("Close", id="help-close")),
id="help-container",
@@ -200,7 +200,7 @@ class ThinkingMessage(Static):
class ToolMessage(Static):
"""Tool execution message"""
- # Standard tool icon and color (ghost theme)
+ # Standard tool icon and color (pa theme)
TOOL_ICON = "$"
TOOL_COLOR = "#9a9a9a" # spirit gray
@@ -262,7 +262,7 @@ class AssistantMessage(Static):
text = Text()
text.append("| ", style="#525252")
text.append(">> ", style="#9a9a9a")
- text.append("Ghost\n", style="bold #d4d4d4")
+ text.append("PentestAgent\n", style="bold #d4d4d4")
# Wrap content - use 70 chars to account for sidebar + prefix
for line in wrap_text_lines(self.message_content, width=70):
@@ -331,7 +331,7 @@ class StatusBar(Static):
# Use fixed-width labels (pad dots to 4 chars so text doesn't jump)
dots_padded = dots.ljust(4)
- # Ghost theme status colors (muted, ethereal)
+ # PA theme status colors (muted, ethereal)
status_map = {
"idle": ("Ready", "#6b6b6b"),
"initializing": (f"Initializing{dots_padded}", "#9a9a9a"),
@@ -366,11 +366,11 @@ class StatusBar(Static):
# ----- Main TUI App -----
-class GhostCrewTUI(App):
- """Main GhostCrew TUI Application"""
+class PentestAgentTUI(App):
+ """Main PentestAgent TUI Application"""
# ═══════════════════════════════════════════════════════════
- # GHOST THEME - Ethereal grays emerging from darkness
+ # PA THEME - Ethereal grays
# ═══════════════════════════════════════════════════════════
# Void: #0a0a0a (terminal black - the darkness)
# Shadow: #121212 (subtle surface)
@@ -559,7 +559,7 @@ class GhostCrewTUI(App):
Binding("tab", "focus_next", "Next", show=False),
]
- TITLE = "GhostCrew"
+ TITLE = "PentestAgent"
SUB_TITLE = "AI Penetration Testing"
def __init__(
@@ -575,7 +575,7 @@ class GhostCrewTUI(App):
self.use_docker = use_docker
# Agent components
- self.agent: Optional["GhostCrewAgent"] = None
+ self.agent: Optional["PentestAgentAgent"] = None
self.runtime = None
self.mcp_manager = None
self.all_tools = []
@@ -641,7 +641,7 @@ class GhostCrewTUI(App):
try:
import os
- from ..agents.ghostcrew_agent import GhostCrewAgent
+ from ..agents.pa_agent import PentestAgentAgent
from ..knowledge import RAGEngine
from ..llm import LLM, ModelConfig
from ..mcp import MCPManager
@@ -665,7 +665,7 @@ class GhostCrewTUI(App):
if knowledge_path:
try:
# Determine embedding method: env var > auto-detect
- embeddings_setting = os.getenv("GHOSTCREW_EMBEDDINGS", "").lower()
+ embeddings_setting = os.getenv("PENTESTAGENT_EMBEDDINGS", "").lower()
if embeddings_setting == "local":
use_local = True
elif embeddings_setting == "openai":
@@ -714,7 +714,7 @@ class GhostCrewTUI(App):
self.all_tools = get_all_tools()
# Agent
- self.agent = GhostCrewAgent(
+ self.agent = PentestAgentAgent(
llm=llm,
tools=self.all_tools,
runtime=self.runtime,
@@ -732,7 +732,7 @@ class GhostCrewTUI(App):
runtime_str = "Docker" if self.use_docker else "Local"
self._add_system(
- f"+ GhostCrew ready\n"
+ f"+ PentestAgent ready\n"
f" Model: {self.model} | Tools: {len(self.all_tools)} | MCP: {mcp_server_count} | RAG: {rag_doc_count}\n"
f" Runtime: {runtime_str} | Mode: Assist (use /agent or /crew for autonomous modes)"
)
@@ -940,7 +940,7 @@ class GhostCrewTUI(App):
notes = await get_all_notes()
if not notes:
self._add_system(
- "No notes found. Ghost saves findings using the notes tool during testing."
+ "No notes found. PentestAgent saves findings using the notes tool during testing."
)
return
@@ -1791,8 +1791,8 @@ def run_tui(
model: str = None,
use_docker: bool = False,
):
- """Run the GhostCrew TUI"""
- app = GhostCrewTUI(
+ """Run the PentestAgent TUI"""
+ app = PentestAgentTUI(
target=target,
model=model,
use_docker=use_docker,
diff --git a/pentestagent/__init__.py b/pentestagent/__init__.py
new file mode 100644
index 0000000..062af62
--- /dev/null
+++ b/pentestagent/__init__.py
@@ -0,0 +1,4 @@
+"""PentestAgent - AI penetration testing."""
+
+__version__ = "0.2.0"
+__author__ = "Masic"
diff --git a/pentestagent/__main__.py b/pentestagent/__main__.py
new file mode 100644
index 0000000..f15cea4
--- /dev/null
+++ b/pentestagent/__main__.py
@@ -0,0 +1,6 @@
+"""PentestAgent entry point for `python -m pentestagent`."""
+
+from pentestagent.interface.main import main
+
+if __name__ == "__main__":
+ main()
diff --git a/ghostcrew/agents/__init__.py b/pentestagent/agents/__init__.py
similarity index 89%
rename from ghostcrew/agents/__init__.py
rename to pentestagent/agents/__init__.py
index f1b84cd..ab58e89 100644
--- a/ghostcrew/agents/__init__.py
+++ b/pentestagent/agents/__init__.py
@@ -1,4 +1,4 @@
-"""Agent system for GhostCrew."""
+"""Agent system for PentestAgent."""
from .base_agent import AgentMessage, BaseAgent
from .crew import AgentStatus, AgentWorker, CrewOrchestrator, CrewState
diff --git a/ghostcrew/agents/base_agent.py b/pentestagent/agents/base_agent.py
similarity index 99%
rename from ghostcrew/agents/base_agent.py
rename to pentestagent/agents/base_agent.py
index a4cbb8a..32d1b1d 100644
--- a/ghostcrew/agents/base_agent.py
+++ b/pentestagent/agents/base_agent.py
@@ -1,4 +1,4 @@
-"""Base agent class for GhostCrew."""
+"""Base agent class for PentestAgent."""
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
diff --git a/ghostcrew/agents/crew/__init__.py b/pentestagent/agents/crew/__init__.py
similarity index 100%
rename from ghostcrew/agents/crew/__init__.py
rename to pentestagent/agents/crew/__init__.py
diff --git a/ghostcrew/agents/crew/models.py b/pentestagent/agents/crew/models.py
similarity index 100%
rename from ghostcrew/agents/crew/models.py
rename to pentestagent/agents/crew/models.py
diff --git a/ghostcrew/agents/crew/orchestrator.py b/pentestagent/agents/crew/orchestrator.py
similarity index 99%
rename from ghostcrew/agents/crew/orchestrator.py
rename to pentestagent/agents/crew/orchestrator.py
index 848e429..ce7e7cf 100644
--- a/ghostcrew/agents/crew/orchestrator.py
+++ b/pentestagent/agents/crew/orchestrator.py
@@ -6,7 +6,7 @@ from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Optional
from ...config.constants import DEFAULT_MAX_ITERATIONS
from ...knowledge.graph import ShadowGraph
-from ..prompts import ghost_crew
+from ..prompts import pa_crew
from .models import CrewState, WorkerCallback
from .tools import create_crew_tools
from .worker_pool import WorkerPool
@@ -122,7 +122,7 @@ class CrewOrchestrator:
f"- {i}" for i in graph_insights
)
- return ghost_crew.render(
+ return pa_crew.render(
target=self.target or "Not specified",
prior_context=self.prior_context or "None - starting fresh",
notes_context=notes_context + insights_text,
diff --git a/ghostcrew/agents/crew/tools.py b/pentestagent/agents/crew/tools.py
similarity index 100%
rename from ghostcrew/agents/crew/tools.py
rename to pentestagent/agents/crew/tools.py
diff --git a/pentestagent/agents/crew/worker_pool.py b/pentestagent/agents/crew/worker_pool.py
new file mode 100644
index 0000000..059b624
--- /dev/null
+++ b/pentestagent/agents/crew/worker_pool.py
@@ -0,0 +1,336 @@
+"""Worker pool for managing concurrent agent execution."""
+
+import asyncio
+import time
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
+
+from .models import AgentStatus, AgentWorker, WorkerCallback
+
+if TYPE_CHECKING:
+ from ...llm import LLM
+ from ...runtime import Runtime
+ from ...tools import Tool
+
+
+class WorkerPool:
+ """Manages concurrent execution of worker agents."""
+
+ def __init__(
+ self,
+ llm: "LLM",
+ tools: List["Tool"],
+ runtime: "Runtime",
+ target: str = "",
+ rag_engine: Any = None,
+ on_worker_event: Optional[WorkerCallback] = None,
+ ):
+ self.llm = llm
+ self.tools = tools
+ self.runtime = runtime
+ self.target = target
+ self.rag_engine = rag_engine
+ self.on_worker_event = on_worker_event
+
+ self._workers: Dict[str, AgentWorker] = {}
+ self._tasks: Dict[str, asyncio.Task] = {}
+ self._results: Dict[str, str] = {}
+ self._next_id = 0
+ self._lock = asyncio.Lock()
+
+ def _emit(self, worker_id: str, event: str, data: Dict[str, Any]) -> None:
+ """Emit event to callback if registered."""
+ if self.on_worker_event:
+ self.on_worker_event(worker_id, event, data)
+
+ def _generate_id(self) -> str:
+ """Generate unique worker ID."""
+ worker_id = f"agent-{self._next_id}"
+ self._next_id += 1
+ return worker_id
+
+ async def spawn(
+ self,
+ task: str,
+ priority: int = 1,
+ depends_on: Optional[List[str]] = None,
+ ) -> str:
+ """
+ Spawn a new worker agent.
+
+ Args:
+ task: The task description for the agent
+ priority: Higher priority runs first (for future use)
+ depends_on: List of agent IDs that must complete first
+
+ Returns:
+ The worker ID
+ """
+ async with self._lock:
+ worker_id = self._generate_id()
+
+ worker = AgentWorker(
+ id=worker_id,
+ task=task,
+ priority=priority,
+ depends_on=depends_on or [],
+ )
+ self._workers[worker_id] = worker
+
+ # Emit spawn event for UI
+ self._emit(
+ worker_id,
+ "spawn",
+ {
+ "worker_type": worker_id,
+ "task": task,
+ },
+ )
+
+ # Start the agent task
+ self._tasks[worker_id] = asyncio.create_task(self._run_worker(worker))
+
+ return worker_id
+
+ async def _run_worker(self, worker: AgentWorker) -> None:
+ """Run a single worker agent."""
+ from ..pa_agent import PentestAgentAgent
+
+ # Wait for dependencies
+ if worker.depends_on:
+ await self._wait_for_dependencies(worker.depends_on)
+
+ worker.status = AgentStatus.RUNNING
+ worker.started_at = time.time()
+ self._emit(worker.id, "status", {"status": "running"})
+
+ # Create isolated runtime for this worker (prevents browser state conflicts)
+ from ...runtime.runtime import LocalRuntime
+
+ worker_runtime = LocalRuntime()
+ await worker_runtime.start()
+
+ from ...config.constants import WORKER_MAX_ITERATIONS
+
+ agent = PentestAgentAgent(
+ llm=self.llm,
+ tools=self.tools,
+ runtime=worker_runtime, # Use isolated runtime
+ target=self.target,
+ rag_engine=self.rag_engine,
+ max_iterations=WORKER_MAX_ITERATIONS,
+ )
+
+ try:
+ final_response = ""
+ hit_max_iterations = False
+ is_infeasible = False
+
+ async for response in agent.agent_loop(worker.task):
+ # Track tool calls
+ if response.tool_calls:
+ for tc in response.tool_calls:
+ if tc.name not in worker.tools_used:
+ worker.tools_used.append(tc.name)
+ self._emit(worker.id, "tool", {"tool": tc.name})
+
+ # Track tokens (avoid double counting)
+ if response.usage:
+ total = response.usage.get("total_tokens", 0)
+ is_intermediate = response.metadata.get("intermediate", False)
+ has_tools = bool(response.tool_calls)
+
+ # Same logic as CLI to avoid double counting
+ should_count = False
+ if is_intermediate:
+ should_count = True
+ worker.last_msg_intermediate = True
+ elif has_tools:
+ if not getattr(worker, "last_msg_intermediate", False):
+ should_count = True
+ worker.last_msg_intermediate = False
+ else:
+ should_count = True
+ worker.last_msg_intermediate = False
+
+ if should_count and total > 0:
+ self._emit(worker.id, "tokens", {"tokens": total})
+
+ # Capture final response (text without tool calls)
+ if response.content and not response.tool_calls:
+ final_response = response.content
+
+ # Check metadata flags
+ if response.metadata:
+ if response.metadata.get("max_iterations_reached"):
+ hit_max_iterations = True
+ if response.metadata.get("replan_impossible"):
+ is_infeasible = True
+
+ # Prioritize structured results from the plan over chatty summaries
+ plan_summary = ""
+ plan = getattr(worker_runtime, "plan", None)
+ if plan and plan.steps:
+ from ...tools.finish import StepStatus
+
+ # Include ALL steps regardless of status - skips and failures are valuable context
+ # Note: PlanStep stores failure/skip reasons in the 'result' field
+ steps_with_info = [s for s in plan.steps if s.result]
+ if steps_with_info:
+ summary_lines = []
+ for s in steps_with_info:
+ status_marker = {
+ StepStatus.COMPLETE: "✓",
+ StepStatus.SKIP: "⊘",
+ StepStatus.FAIL: "✗",
+ }.get(s.status, "·")
+ info = s.result or "No details"
+ summary_lines.append(f"{status_marker} {s.description}: {info}")
+ plan_summary = "\n".join(summary_lines)
+
+ # Use plan summary if available, otherwise fallback to chat response
+ worker.result = plan_summary or final_response or "No findings."
+
+ worker.completed_at = time.time()
+ self._results[worker.id] = worker.result
+
+ if is_infeasible:
+ worker.status = AgentStatus.FAILED
+ self._emit(
+ worker.id,
+ "failed",
+ {
+ "summary": worker.result[:200],
+ "reason": "Task determined infeasible",
+ },
+ )
+ elif hit_max_iterations:
+ worker.status = AgentStatus.WARNING
+ self._emit(
+ worker.id,
+ "warning",
+ {
+ "summary": worker.result[:200],
+ "reason": "Max iterations reached",
+ },
+ )
+ else:
+ worker.status = AgentStatus.COMPLETE
+ self._emit(
+ worker.id,
+ "complete",
+ {
+ "summary": worker.result[:200],
+ },
+ )
+
+ except asyncio.CancelledError:
+ worker.status = AgentStatus.CANCELLED
+ worker.completed_at = time.time()
+ self._emit(worker.id, "cancelled", {})
+ raise
+
+ except Exception as e:
+ worker.error = str(e)
+ worker.status = AgentStatus.ERROR
+ worker.completed_at = time.time()
+ self._emit(worker.id, "error", {"error": str(e)})
+
+ finally:
+ # Cleanup worker's isolated runtime
+ try:
+ await worker_runtime.stop()
+ except Exception:
+ pass # Best effort cleanup
+
+ async def _wait_for_dependencies(self, depends_on: List[str]) -> None:
+ """Wait for dependent workers to complete."""
+ for dep_id in depends_on:
+ if dep_id in self._tasks:
+ try:
+ await self._tasks[dep_id]
+ except (asyncio.CancelledError, Exception):
+ pass # Dependency failed, but we continue
+
+ async def wait_for(self, agent_ids: Optional[List[str]] = None) -> Dict[str, Any]:
+ """
+ Wait for specified agents (or all) to complete.
+
+ Args:
+ agent_ids: List of agent IDs to wait for. None = wait for all.
+
+ Returns:
+ Dict mapping agent_id to result/error
+ """
+ if agent_ids is None:
+ agent_ids = list(self._tasks.keys())
+
+ results = {}
+ for agent_id in agent_ids:
+ if agent_id in self._tasks:
+ try:
+ await self._tasks[agent_id]
+ except (asyncio.CancelledError, Exception):
+ pass
+
+ worker = self._workers.get(agent_id)
+ if worker:
+ results[agent_id] = {
+ "task": worker.task,
+ "status": worker.status.value,
+ "result": worker.result,
+ "error": worker.error,
+ "tools_used": worker.tools_used,
+ }
+
+ return results
+
+ def get_status(self, agent_id: str) -> Optional[Dict[str, Any]]:
+ """Get status of a specific agent."""
+ worker = self._workers.get(agent_id)
+ if not worker:
+ return None
+ return worker.to_dict()
+
+ def get_all_status(self) -> Dict[str, Dict[str, Any]]:
+ """Get status of all agents."""
+ return {wid: w.to_dict() for wid, w in self._workers.items()}
+
+ async def cancel(self, agent_id: str) -> bool:
+ """Cancel a running agent."""
+ if agent_id not in self._tasks:
+ return False
+
+ task = self._tasks[agent_id]
+ if not task.done():
+ task.cancel()
+ try:
+ await task
+ except asyncio.CancelledError:
+ pass
+ return True
+ return False
+
+ async def cancel_all(self) -> None:
+ """Cancel all running agents."""
+ for task in self._tasks.values():
+ if not task.done():
+ task.cancel()
+
+ # Wait for all to finish
+ if self._tasks:
+ await asyncio.gather(*self._tasks.values(), return_exceptions=True)
+
+ def get_results(self) -> Dict[str, str]:
+ """Get results from all completed agents."""
+ return dict(self._results)
+
+ def get_workers(self) -> List[AgentWorker]:
+ """Get all workers."""
+ return list(self._workers.values())
+
+ def reset(self) -> None:
+ """Reset the pool for a new task."""
+ self._workers.clear()
+ self._tasks.clear()
+ self._results.clear()
+ self._next_id = 0
diff --git a/pentestagent/agents/pa_agent/__init__.py b/pentestagent/agents/pa_agent/__init__.py
new file mode 100644
index 0000000..75967ac
--- /dev/null
+++ b/pentestagent/agents/pa_agent/__init__.py
@@ -0,0 +1,5 @@
+"""PentestAgent main agent implementation."""
+
+from .pa_agent import PentestAgentAgent
+
+__all__ = ["PentestAgentAgent"]
diff --git a/ghostcrew/agents/ghostcrew_agent/ghostcrew_agent.py b/pentestagent/agents/pa_agent/pa_agent.py
similarity index 94%
rename from ghostcrew/agents/ghostcrew_agent/ghostcrew_agent.py
rename to pentestagent/agents/pa_agent/pa_agent.py
index ccc918c..f53a4a8 100644
--- a/ghostcrew/agents/ghostcrew_agent/ghostcrew_agent.py
+++ b/pentestagent/agents/pa_agent/pa_agent.py
@@ -1,9 +1,9 @@
-"""GhostCrew main pentesting agent."""
+"""PentestAgent main pentesting agent."""
from typing import TYPE_CHECKING, List, Optional
from ..base_agent import BaseAgent
-from ..prompts import ghost_agent, ghost_assist
+from ..prompts import pa_agent, pa_assist
if TYPE_CHECKING:
from ...knowledge import RAGEngine
@@ -12,8 +12,8 @@ if TYPE_CHECKING:
from ...tools import Tool
-class GhostCrewAgent(BaseAgent):
- """Main pentesting agent for GhostCrew."""
+class PentestAgentAgent(BaseAgent):
+ """Main pentesting agent for PentestAgent."""
def __init__(
self,
@@ -26,7 +26,7 @@ class GhostCrewAgent(BaseAgent):
**kwargs,
):
"""
- Initialize the GhostCrew agent.
+ Initialize the PentestAgent agent.
Args:
llm: The LLM instance for generating responses
@@ -124,7 +124,7 @@ class GhostCrewAgent(BaseAgent):
env = self.runtime.environment
# Select template based on mode
- template = ghost_assist if mode == "assist" else ghost_agent
+ template = pa_assist if mode == "assist" else pa_agent
return template.render(
target=self.target,
diff --git a/ghostcrew/agents/prompts/__init__.py b/pentestagent/agents/prompts/__init__.py
similarity index 59%
rename from ghostcrew/agents/prompts/__init__.py
rename to pentestagent/agents/prompts/__init__.py
index 4fe3554..db12a63 100644
--- a/ghostcrew/agents/prompts/__init__.py
+++ b/pentestagent/agents/prompts/__init__.py
@@ -1,4 +1,4 @@
-"""Prompt templates for GhostCrew agents."""
+"""Prompt templates for PentestAgent agents."""
from pathlib import Path
@@ -11,7 +11,7 @@ def load_prompt(name: str) -> Template:
"""Load a prompt template by name.
Args:
- name: Prompt name without extension (e.g., 'ghost_agent', 'ghost_assist')
+ name: Prompt name without extension (e.g., 'pa_agent', 'pa_assist')
Returns:
Jinja2 Template object
@@ -21,6 +21,6 @@ def load_prompt(name: str) -> Template:
# Pre-loaded templates for convenience
-ghost_agent = load_prompt("ghost_agent")
-ghost_assist = load_prompt("ghost_assist")
-ghost_crew = load_prompt("ghost_crew")
+pa_agent = load_prompt("pa_agent")
+pa_assist = load_prompt("pa_assist")
+pa_crew = load_prompt("pa_crew")
diff --git a/ghostcrew/agents/prompts/ghost_agent.jinja b/pentestagent/agents/prompts/pa_agent.jinja
similarity index 97%
rename from ghostcrew/agents/prompts/ghost_agent.jinja
rename to pentestagent/agents/prompts/pa_agent.jinja
index b5a39aa..5af3418 100644
--- a/ghostcrew/agents/prompts/ghost_agent.jinja
+++ b/pentestagent/agents/prompts/pa_agent.jinja
@@ -1,6 +1,6 @@
-# GhostCrew
+# PentestAgent
-You are Ghost, an autonomous penetration testing agent.
+You are PentestAgent, an autonomous penetration testing agent.
## Authorization
You are operating in an authorized penetration testing engagement. The user has explicit permission to test all targets provided. Do not ask for authorization - permission is granted. Proceed with testing.
diff --git a/ghostcrew/agents/prompts/ghost_assist.jinja b/pentestagent/agents/prompts/pa_assist.jinja
similarity index 95%
rename from ghostcrew/agents/prompts/ghost_assist.jinja
rename to pentestagent/agents/prompts/pa_assist.jinja
index f33eb0f..5d26763 100644
--- a/ghostcrew/agents/prompts/ghost_assist.jinja
+++ b/pentestagent/agents/prompts/pa_assist.jinja
@@ -1,6 +1,6 @@
-# GhostCrew
+# PentestAgent
-You are Ghost, a penetration testing assistant.
+You are PentestAgent, a penetration testing assistant.
## Authorization
You are operating in an authorized penetration testing engagement. The user has explicit permission to test all targets provided. Do not ask for authorization - permission is granted. Proceed with testing.
diff --git a/ghostcrew/agents/prompts/ghost_crew.jinja b/pentestagent/agents/prompts/pa_crew.jinja
similarity index 98%
rename from ghostcrew/agents/prompts/ghost_crew.jinja
rename to pentestagent/agents/prompts/pa_crew.jinja
index 1b231b4..6d0a826 100644
--- a/ghostcrew/agents/prompts/ghost_crew.jinja
+++ b/pentestagent/agents/prompts/pa_crew.jinja
@@ -1,4 +1,4 @@
-# GhostCrew Orchestrator
+# PentestAgent Orchestrator
You are the lead of a penetration testing crew. You coordinate specialized agents to complete the task.
diff --git a/ghostcrew/agents/state.py b/pentestagent/agents/state.py
similarity index 98%
rename from ghostcrew/agents/state.py
rename to pentestagent/agents/state.py
index 2ef3353..1a98d83 100644
--- a/ghostcrew/agents/state.py
+++ b/pentestagent/agents/state.py
@@ -1,4 +1,4 @@
-"""Agent state management for GhostCrew."""
+"""Agent state management for PentestAgent."""
from dataclasses import dataclass, field
from datetime import datetime
diff --git a/ghostcrew/config/__init__.py b/pentestagent/config/__init__.py
similarity index 97%
rename from ghostcrew/config/__init__.py
rename to pentestagent/config/__init__.py
index 61142d3..37cd868 100644
--- a/ghostcrew/config/__init__.py
+++ b/pentestagent/config/__init__.py
@@ -1,4 +1,4 @@
-"""Configuration module for GhostCrew."""
+"""Configuration module for PentestAgent."""
from .constants import (
AGENT_STATE_COMPLETE,
diff --git a/ghostcrew/config/constants.py b/pentestagent/config/constants.py
similarity index 79%
rename from ghostcrew/config/constants.py
rename to pentestagent/config/constants.py
index 95a558f..c05d31a 100644
--- a/ghostcrew/config/constants.py
+++ b/pentestagent/config/constants.py
@@ -1,4 +1,4 @@
-"""Constants for GhostCrew."""
+"""Constants for PentestAgent."""
import os
@@ -11,7 +11,7 @@ except ImportError:
pass
# Application Info
-APP_NAME = "GhostCrew"
+APP_NAME = "PentestAgent"
APP_VERSION = "0.2.0"
APP_DESCRIPTION = "AI penetration testing"
@@ -37,7 +37,7 @@ DEFAULT_VPN_TIMEOUT = 30
DEFAULT_MCP_TIMEOUT = 60
# Docker Settings
-DOCKER_SANDBOX_IMAGE = "ghcr.io/gh05tcrew/ghostcrew:kali"
+DOCKER_SANDBOX_IMAGE = "ghcr.io/gh05tcrew/pentestagent:kali"
DOCKER_NETWORK_MODE = "bridge"
# RAG Settings
@@ -48,16 +48,16 @@ DEFAULT_RAG_TOP_K = 3
# Memory Settings
MEMORY_RESERVE_RATIO = 0.8 # Reserve 20% of context for response
-# LLM Defaults (set GHOSTCREW_MODEL in .env or shell)
+# LLM Defaults (set PENTESTAGENT_MODEL in .env or shell)
DEFAULT_MODEL = os.environ.get(
- "GHOSTCREW_MODEL"
+ "PENTESTAGENT_MODEL"
) # No fallback - requires configuration
DEFAULT_TEMPERATURE = 0.7
DEFAULT_MAX_TOKENS = 4096
# Agent Defaults
-DEFAULT_MAX_ITERATIONS = int(os.environ.get("GHOSTCREW_MAX_ITERATIONS", "50"))
-WORKER_MAX_ITERATIONS = int(os.environ.get("GHOSTCREW_WORKER_MAX_ITERATIONS", "10"))
+DEFAULT_MAX_ITERATIONS = int(os.environ.get("PENTESTAGENT_MAX_ITERATIONS", "50"))
+WORKER_MAX_ITERATIONS = int(os.environ.get("PENTESTAGENT_WORKER_MAX_ITERATIONS", "10"))
# File Extensions
KNOWLEDGE_TEXT_EXTENSIONS = [".txt", ".md"]
diff --git a/ghostcrew/config/settings.py b/pentestagent/config/settings.py
similarity index 93%
rename from ghostcrew/config/settings.py
rename to pentestagent/config/settings.py
index 40620a8..4c6a38b 100644
--- a/ghostcrew/config/settings.py
+++ b/pentestagent/config/settings.py
@@ -1,4 +1,4 @@
-"""Application settings for GhostCrew."""
+"""Application settings for PentestAgent."""
import os
from dataclasses import dataclass, field
@@ -36,8 +36,8 @@ class Settings:
mcp_config_path: Path = field(default_factory=lambda: Path("mcp.json"))
# Docker Settings
- container_name: str = "ghostcrew-sandbox"
- docker_image: str = "ghcr.io/gh05tcrew/ghostcrew:kali"
+ container_name: str = "pentestagent-sandbox"
+ docker_image: str = "ghcr.io/gh05tcrew/pentestagent:kali"
# Agent Settings
max_iterations: int = DEFAULT_MAX_ITERATIONS
diff --git a/ghostcrew/interface/__init__.py b/pentestagent/interface/__init__.py
similarity index 67%
rename from ghostcrew/interface/__init__.py
rename to pentestagent/interface/__init__.py
index 063d13a..88ba0fe 100644
--- a/ghostcrew/interface/__init__.py
+++ b/pentestagent/interface/__init__.py
@@ -1,15 +1,15 @@
-"""User interface module for GhostCrew."""
+"""User interface module for PentestAgent."""
from .cli import run_cli
from .main import main
-from .tui import GhostCrewTUI, run_tui
+from .tui import PentestAgentTUI, run_tui
from .utils import format_finding, print_banner, print_status
__all__ = [
"main",
"run_cli",
"run_tui",
- "GhostCrewTUI",
+ "PentestAgentTUI",
"print_banner",
"format_finding",
"print_status",
diff --git a/ghostcrew/interface/assets/tui_styles.tcss b/pentestagent/interface/assets/tui_styles.tcss
similarity index 99%
rename from ghostcrew/interface/assets/tui_styles.tcss
rename to pentestagent/interface/assets/tui_styles.tcss
index 18a02ad..86ea851 100644
--- a/ghostcrew/interface/assets/tui_styles.tcss
+++ b/pentestagent/interface/assets/tui_styles.tcss
@@ -1,4 +1,4 @@
-/* GhostCrew TUI Styles */
+/* PentestAgent TUI Styles */
Screen {
background: #0a0a0a;
diff --git a/pentestagent/interface/cli.py b/pentestagent/interface/cli.py
new file mode 100644
index 0000000..09c2bab
--- /dev/null
+++ b/pentestagent/interface/cli.py
@@ -0,0 +1,682 @@
+"""Non-interactive CLI mode for PentestAgent."""
+
+import asyncio
+import time
+from datetime import datetime
+from pathlib import Path
+
+from rich.console import Console
+from rich.markdown import Markdown
+from rich.panel import Panel
+from rich.text import Text
+
+console = Console()
+
+# PA theme colors (matching TUI)
+PA_PRIMARY = "#d4d4d4" # light gray - primary text
+PA_SECONDARY = "#9a9a9a" # medium gray - secondary text
+PA_DIM = "#6b6b6b" # dim gray - muted text
+PA_BORDER = "#3a3a3a" # dark gray - borders
+PA_ACCENT = "#7a7a7a" # accent gray
+
+
+async def run_cli(
+ target: str,
+ model: str,
+ task: str = None,
+ report: str = None,
+ max_loops: int = 50,
+ use_docker: bool = False,
+ mode: str = "agent",
+):
+ """
+ Run PentestAgent in non-interactive mode.
+
+ Args:
+ target: Target to test
+ model: LLM model to use
+ task: Optional task description
+ report: Report path ("auto" for loot/reports/_.md)
+ max_loops: Max agent loops before stopping
+ use_docker: Run tools in Docker container
+ mode: Execution mode ("agent" or "crew")
+ """
+ from ..agents.pa_agent import PentestAgentAgent
+ from ..knowledge import RAGEngine
+ from ..llm import LLM
+ from ..runtime.docker_runtime import DockerRuntime
+ from ..runtime.runtime import LocalRuntime
+ from ..tools import get_all_tools
+
+ # Startup panel
+ start_text = Text()
+ start_text.append("PENTESTAGENT", style=f"bold {PA_PRIMARY}")
+ start_text.append(" - Non-interactive Mode\n\n", style=PA_DIM)
+ start_text.append("Target: ", style=PA_SECONDARY)
+ start_text.append(f"{target}\n", style=PA_PRIMARY)
+ start_text.append("Model: ", style=PA_SECONDARY)
+ start_text.append(f"{model}\n", style=PA_PRIMARY)
+ start_text.append("Mode: ", style=PA_SECONDARY)
+ start_text.append(f"{mode.title()}\n", style=PA_PRIMARY)
+ start_text.append("Runtime: ", style=PA_SECONDARY)
+ start_text.append(f"{'Docker' if use_docker else 'Local'}\n", style=PA_PRIMARY)
+ start_text.append("Max loops: ", style=PA_SECONDARY)
+ start_text.append(f"{max_loops}\n", style=PA_PRIMARY)
+
+ task_msg = task or f"Perform a penetration test on {target}"
+ start_text.append("Task: ", style=PA_SECONDARY)
+ start_text.append(task_msg, style=PA_PRIMARY)
+
+ console.print()
+ console.print(
+ Panel(start_text, title=f"[{PA_SECONDARY}]Starting", border_style=PA_BORDER)
+ )
+ console.print()
+
+ # Initialize RAG if knowledge exists
+ rag = None
+ knowledge_path = Path("knowledge")
+ if knowledge_path.exists():
+ try:
+ rag = RAGEngine(knowledge_path=knowledge_path)
+ rag.index()
+ except Exception:
+ pass
+
+ # Initialize MCP if config exists (silently skip failures)
+ mcp_manager = None
+ mcp_count = 0
+ try:
+ from ..mcp import MCPManager
+ from ..tools import register_tool_instance
+
+ mcp_manager = MCPManager()
+ if mcp_manager.config_path.exists():
+ mcp_tools = await mcp_manager.connect_all()
+ for tool in mcp_tools:
+ register_tool_instance(tool)
+ mcp_count = len(mcp_tools)
+ if mcp_count > 0:
+ console.print(f"[{PA_DIM}]Loaded {mcp_count} MCP tools[/]")
+ except Exception:
+ pass # MCP is optional, continue without it
+
+ # Initialize runtime - Docker or Local
+ if use_docker:
+ console.print(f"[{PA_DIM}]Starting Docker container...[/]")
+ runtime = DockerRuntime(mcp_manager=mcp_manager)
+ else:
+ runtime = LocalRuntime(mcp_manager=mcp_manager)
+ await runtime.start()
+
+ llm = LLM(model=model, rag_engine=rag)
+ tools = get_all_tools()
+
+ # Stats tracking
+ start_time = time.time()
+ tool_count = 0
+ iteration = 0
+ findings_count = 0 # Count of notes/findings recorded
+ findings = [] # Store actual findings text
+ total_tokens = 0 # Track total token usage
+ messages = [] # Store agent messages
+ tool_log = [] # Log of tools executed (ts, name, command, result, exit_code)
+ last_content = ""
+ last_msg_intermediate = False # Track if previous message was intermediate (to avoid double counting tokens)
+ stopped_reason = None
+
+ def print_status(msg: str, style: str = PA_DIM):
+ elapsed = int(time.time() - start_time)
+ mins, secs = divmod(elapsed, 60)
+ timestamp = f"[{mins:02d}:{secs:02d}]"
+ console.print(f"[{PA_DIM}]{timestamp}[/] [{style}]{msg}[/]")
+
+ def display_message(content: str, title: str) -> bool:
+ """Display a message panel if it hasn't been shown yet."""
+ nonlocal last_content
+ if content and content != last_content:
+ console.print()
+ console.print(
+ Panel(
+ Markdown(content),
+ title=f"[{PA_PRIMARY}]{title}",
+ border_style=PA_BORDER,
+ )
+ )
+ console.print()
+ last_content = content
+ return True
+ return False
+
+ def generate_report() -> str:
+ """Generate markdown report."""
+ elapsed = int(time.time() - start_time)
+ mins, secs = divmod(elapsed, 60)
+
+ status_text = "Complete"
+ if stopped_reason:
+ status_text = f"Interrupted ({stopped_reason})"
+
+ lines = [
+ "# PentestAgent Penetration Test Report",
+ "",
+ "## Executive Summary",
+ "",
+ ]
+
+ # Add AI summary at top if available
+ # If the last finding is a full report (Crew mode), use it as the main body
+ # and avoid adding duplicate headers
+ main_content = ""
+ if findings:
+ main_content = findings[-1]
+ # If it's a full report (starts with #), don't add our own headers if possible
+ if not main_content.strip().startswith("#"):
+ lines.append(main_content)
+ lines.append("")
+ else:
+ # It's a full report, so we might want to replace the default header
+ # or just append it. Let's append it but skip the "Executive Summary" header above if we could.
+ # For now, just append it.
+ lines.append(main_content)
+ lines.append("")
+ else:
+ lines.append("*Assessment incomplete - no analysis generated.*")
+ lines.append("")
+
+ # Engagement details table
+ lines.extend(
+ [
+ "## Engagement Details",
+ "",
+ "| Field | Value |",
+ "|-------|-------|",
+ f"| **Target** | `{target}` |",
+ f"| **Task** | {task_msg} |",
+ f"| **Date** | {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} |",
+ f"| **Duration** | {mins}m {secs}s |",
+ f"| **Commands Executed** | {tool_count} |",
+ f"| **Status** | {status_text} |",
+ "",
+ "---",
+ "",
+ "## Commands Executed",
+ "",
+ ]
+ )
+
+ # Detailed command log
+ for i, entry in enumerate(tool_log, 1):
+ ts = entry.get("ts", "??:??")
+ name = entry.get("name", "unknown")
+ command = entry.get("command", "")
+ result = entry.get("result", "")
+ exit_code = entry.get("exit_code")
+
+ lines.append(f"### {i}. {name} `[{ts}]`")
+ lines.append("")
+
+ if command:
+ lines.append("**Command:**")
+ lines.append("```")
+ lines.append(command)
+ lines.append("```")
+ lines.append("")
+
+ if exit_code is not None:
+ lines.append(f"**Exit Code:** `{exit_code}`")
+ lines.append("")
+
+ if result:
+ lines.append("**Output:**")
+ lines.append("```")
+ # Limit output to 2000 chars per command for report size
+ if len(result) > 2000:
+ lines.append(result[:2000])
+ lines.append(f"\n... (truncated, {len(result)} total chars)")
+ else:
+ lines.append(result)
+ lines.append("```")
+ lines.append("")
+
+ # Findings section
+ # Only show if there are other findings besides the final report we already showed
+ other_findings = findings[:-1] if findings and len(findings) > 1 else []
+
+ if other_findings:
+ lines.extend(
+ [
+ "---",
+ "",
+ "## Detailed Findings",
+ "",
+ ]
+ )
+
+ for i, finding in enumerate(other_findings, 1):
+ if len(other_findings) > 1:
+ lines.append(f"### Finding {i}")
+ lines.append("")
+ lines.append(finding)
+ lines.append("")
+
+ # Footer
+ lines.extend(
+ [
+ "---",
+ "",
+ f"*Report generated by PentestAgent on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*",
+ ]
+ )
+
+ return "\n".join(lines)
+
+ def save_report():
+ """Save report to file."""
+ if not report:
+ return
+
+ # Determine path
+ if report == "auto":
+ reports_dir = Path("loot/reports")
+ reports_dir.mkdir(parents=True, exist_ok=True)
+ safe_target = target.replace("://", "_").replace("/", "_").replace(":", "_")
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ report_path = reports_dir / f"{safe_target}_{timestamp}.md"
+ else:
+ report_path = Path(report)
+ report_path.parent.mkdir(parents=True, exist_ok=True)
+
+ content = generate_report()
+ report_path.write_text(content, encoding="utf-8")
+ console.print(f"[{PA_SECONDARY}]Report saved: {report_path}[/]")
+
+ async def generate_summary():
+ """Ask the LLM to summarize findings when stopped early."""
+ if not tool_log:
+ return None
+
+ print_status("Generating summary...", PA_SECONDARY)
+
+ # Build context from tool results (use full results, not truncated)
+ context_lines = ["Summarize the penetration test findings so far:\n"]
+ context_lines.append(f"Target: {target}")
+ context_lines.append(f"Tools executed: {tool_count}\n")
+
+ for entry in tool_log[-10:]: # Last 10 tools
+ name = entry.get("name", "unknown")
+ command = entry.get("command", "")
+ result = entry.get("result", "")[:500] # Limit for context window
+ context_lines.append(f"- **{name}**: `{command}`")
+ if result:
+ context_lines.append(f" Output: {result}")
+
+ context_lines.append(
+ "\nProvide a brief summary of what was discovered and any security concerns found."
+ )
+
+ try:
+ response = await llm.generate(
+ system_prompt="You are a penetration testing assistant. Summarize the findings concisely.",
+ messages=[{"role": "user", "content": "\n".join(context_lines)}],
+ tools=[],
+ )
+ return response.content
+ except Exception:
+ return None
+
+ async def print_summary(interrupted: bool = False):
+ nonlocal messages
+
+ # Generate summary if we don't have messages yet
+ if not messages and tool_log:
+ summary = await generate_summary()
+ if summary:
+ messages.append(summary)
+
+ elapsed = int(time.time() - start_time)
+ mins, secs = divmod(elapsed, 60)
+
+ title = "Interrupted" if interrupted else "Finished"
+ status = "PARTIAL RESULTS" if interrupted else "COMPLETE"
+ if stopped_reason:
+ status = f"STOPPED ({stopped_reason})"
+
+ final_text = Text()
+ final_text.append(f"{status}\n\n", style=f"bold {PA_PRIMARY}")
+ final_text.append("Duration: ", style=PA_DIM)
+ final_text.append(f"{mins}m {secs}s\n", style=PA_SECONDARY)
+ final_text.append("Loops: ", style=PA_DIM)
+ final_text.append(f"{iteration}/{max_loops}\n", style=PA_SECONDARY)
+ final_text.append("Tools: ", style=PA_DIM)
+ final_text.append(f"{tool_count}\n", style=PA_SECONDARY)
+
+ if total_tokens > 0:
+ final_text.append("Tokens: ", style=PA_DIM)
+ final_text.append(f"{total_tokens:,}\n", style=PA_SECONDARY)
+
+ if findings_count > 0:
+ final_text.append("Findings: ", style=PA_DIM)
+ final_text.append(f"{findings_count}", style=PA_SECONDARY)
+
+ console.print()
+ console.print(
+ Panel(
+ final_text,
+ title=f"[{PA_SECONDARY}]{title}",
+ border_style=PA_BORDER,
+ )
+ )
+
+ # Show summary/messages only if it's new content (not just displayed)
+ if messages:
+ display_message(messages[-1], "Summary")
+
+ # Save report
+ save_report()
+
+ print_status("Initializing...")
+
+ try:
+ if mode == "crew":
+ from ..agents.crew import CrewOrchestrator
+
+ def on_worker_event(worker_id: str, event_type: str, data: dict):
+ nonlocal tool_count, findings_count, total_tokens
+
+ if event_type == "spawn":
+ task = data.get("task", "")
+ print_status(f"Spawned worker {worker_id}: {task}", PA_ACCENT)
+
+ elif event_type == "tool":
+ tool_name = data.get("tool", "unknown")
+ tool_count += 1
+ print_status(f"Worker {worker_id} using tool: {tool_name}", PA_DIM)
+
+ # Log tool usage (limited info available from event)
+ elapsed = int(time.time() - start_time)
+ mins, secs = divmod(elapsed, 60)
+ ts = f"{mins:02d}:{secs:02d}"
+
+ tool_log.append(
+ {
+ "ts": ts,
+ "name": tool_name,
+ "command": f"(Worker {worker_id})",
+ "result": "",
+ "exit_code": None,
+ }
+ )
+
+ elif event_type == "tokens":
+ tokens = data.get("tokens", 0)
+ total_tokens += tokens
+
+ elif event_type == "complete":
+ f_count = data.get("findings_count", 0)
+ findings_count += f_count
+ print_status(
+ f"Worker {worker_id} complete ({f_count} findings)", "green"
+ )
+
+ elif event_type == "failed":
+ reason = data.get("reason", "unknown")
+ print_status(f"Worker {worker_id} failed: {reason}", "red")
+
+ elif event_type == "status":
+ status = data.get("status", "")
+ print_status(f"Worker {worker_id} status: {status}", PA_DIM)
+
+ elif event_type == "warning":
+ reason = data.get("reason", "unknown")
+ print_status(f"Worker {worker_id} warning: {reason}", "yellow")
+
+ elif event_type == "error":
+ error = data.get("error", "unknown")
+ print_status(f"Worker {worker_id} error: {error}", "red")
+
+ elif event_type == "cancelled":
+ print_status(f"Worker {worker_id} cancelled", "yellow")
+
+ crew = CrewOrchestrator(
+ llm=llm,
+ tools=tools,
+ runtime=runtime,
+ on_worker_event=on_worker_event,
+ rag_engine=rag,
+ target=target,
+ )
+
+ async for update in crew.run(task_msg):
+ iteration += 1
+ phase = update.get("phase", "")
+
+ if phase == "starting":
+ print_status("Crew orchestrator starting...", PA_PRIMARY)
+
+ elif phase == "thinking":
+ content = update.get("content", "")
+ if content:
+ display_message(content, "PentestAgent Plan")
+
+ elif phase == "tool_call":
+ tool = update.get("tool", "")
+ args = update.get("args", {})
+ print_status(f"Orchestrator calling: {tool}", PA_ACCENT)
+
+ elif phase == "complete":
+ report_content = update.get("report", "")
+ if report_content:
+ messages.append(report_content)
+ findings.append(
+ report_content
+ ) # Add to findings so it appears in the saved report
+ display_message(report_content, "Crew Report")
+
+ elif phase == "error":
+ error = update.get("error", "Unknown error")
+ print_status(f"Crew error: {error}", "red")
+
+ if iteration >= max_loops:
+ stopped_reason = "max loops reached"
+ raise StopIteration()
+
+ else:
+ # Default Agent Mode
+ agent = PentestAgentAgent(
+ llm=llm,
+ tools=tools,
+ runtime=runtime,
+ target=target,
+ rag_engine=rag,
+ )
+
+ async for response in agent.agent_loop(task_msg):
+ iteration += 1
+
+ # Track token usage
+ if response.usage:
+ usage = response.usage.get("total_tokens", 0)
+ is_intermediate = response.metadata.get("intermediate", False)
+ has_tools = bool(response.tool_calls)
+
+ # Logic to avoid double counting:
+ # 1. Intermediate messages (thinking) always count
+ # 2. Tool messages count ONLY if not preceded by intermediate message
+ if is_intermediate:
+ total_tokens += usage
+ last_msg_intermediate = True
+ elif has_tools:
+ if not last_msg_intermediate:
+ total_tokens += usage
+ last_msg_intermediate = False
+ else:
+ # Other messages (like plan)
+ total_tokens += usage
+ last_msg_intermediate = False
+
+ # Show tool calls and results as they happen
+ if response.tool_calls:
+ for i, call in enumerate(response.tool_calls):
+ tool_count += 1
+ name = getattr(call, "name", None) or getattr(
+ call.function, "name", "tool"
+ )
+
+ # Track findings (notes tool)
+ if name == "notes":
+ findings_count += 1
+ try:
+ args = getattr(call, "arguments", None) or getattr(
+ call.function, "arguments", "{}"
+ )
+ if isinstance(args, str):
+ import json
+
+ args = json.loads(args)
+ if isinstance(args, dict):
+ note_content = (
+ args.get("value", "")
+ or args.get("content", "")
+ or args.get("note", "")
+ )
+ if note_content:
+ findings.append(note_content)
+ except Exception:
+ pass
+
+ elapsed = int(time.time() - start_time)
+ mins, secs = divmod(elapsed, 60)
+ ts = f"{mins:02d}:{secs:02d}"
+
+ # Get result if available
+ if response.tool_results and i < len(response.tool_results):
+ tr = response.tool_results[i]
+ result_text = tr.result or tr.error or ""
+ if result_text:
+ # Truncate for display
+ preview = result_text[:200].replace("\n", " ")
+ if len(result_text) > 200:
+ preview += "..."
+
+ # Parse args for command extraction
+ command_text = ""
+ exit_code = None
+ try:
+ args = getattr(call, "arguments", None) or getattr(
+ call.function, "arguments", "{}"
+ )
+ if isinstance(args, str):
+ import json
+
+ args = json.loads(args)
+ if isinstance(args, dict):
+ command_text = args.get("command", "")
+ except Exception:
+ pass
+
+ # Extract exit code from result
+ if response.tool_results and i < len(response.tool_results):
+ tr = response.tool_results[i]
+ full_result = tr.result or tr.error or ""
+ # Try to parse exit code
+ if "Exit Code:" in full_result:
+ try:
+ import re
+
+ match = re.search(
+ r"Exit Code:\s*(\d+)", full_result
+ )
+ if match:
+ exit_code = int(match.group(1))
+ except Exception:
+ pass
+ else:
+ full_result = ""
+
+ # Store full data for report (not truncated)
+ tool_log.append(
+ {
+ "ts": ts,
+ "name": name,
+ "command": command_text,
+ "result": full_result,
+ "exit_code": exit_code,
+ }
+ )
+
+ # Metasploit-style output with better spacing
+ console.print() # Blank line before each tool
+ print_status(f"$ {name} ({tool_count})", PA_ACCENT)
+
+ # Show command/args on separate indented line (truncated for display)
+ if command_text:
+ display_cmd = command_text[:80]
+ if len(command_text) > 80:
+ display_cmd += "..."
+ console.print(f" [{PA_DIM}]{display_cmd}[/]")
+
+ # Show result on separate line with status indicator
+ if response.tool_results and i < len(response.tool_results):
+ tr = response.tool_results[i]
+ if tr.error:
+ console.print(
+ f" [{PA_DIM}][!] {tr.error[:100]}[/]"
+ )
+ elif tr.result:
+ # Show exit code or brief result
+ result_line = tr.result[:100].replace("\n", " ")
+ if exit_code == 0 or "success" in result_line.lower():
+ console.print(f" [{PA_DIM}][+] OK[/]")
+ elif exit_code is not None and exit_code != 0:
+ console.print(
+ f" [{PA_DIM}][-] Exit {exit_code}[/]"
+ )
+ else:
+ console.print(
+ f" [{PA_DIM}][*] {result_line[:60]}...[/]"
+ )
+
+ # Print assistant content immediately (analysis/findings)
+ if response.content:
+ if display_message(response.content, "PentestAgent"):
+ messages.append(response.content)
+
+ # Check max loops limit
+ if iteration >= max_loops:
+ stopped_reason = "max loops reached"
+ console.print()
+ print_status(f"Max loops limit reached ({max_loops})", "yellow")
+ raise StopIteration()
+
+ # In agent mode, ensure the final message is treated as the main finding (Executive Summary)
+ if mode != "crew" and messages:
+ findings.append(messages[-1])
+
+ await print_summary(interrupted=False)
+
+ except StopIteration:
+ await print_summary(interrupted=True)
+ except (KeyboardInterrupt, asyncio.CancelledError):
+ stopped_reason = "user interrupt"
+ await print_summary(interrupted=True)
+ except Exception as e:
+ console.print(f"\n[red]Error: {e}[/]")
+ stopped_reason = f"error: {e}"
+ await print_summary(interrupted=True)
+
+ finally:
+ # Cleanup MCP connections first
+ if mcp_manager:
+ try:
+ await mcp_manager.disconnect_all()
+ await asyncio.sleep(0.1) # Allow transports to close cleanly
+ except Exception:
+ pass
+
+ # Then stop runtime
+ if runtime:
+ try:
+ await runtime.stop()
+ except Exception:
+ pass
diff --git a/ghostcrew/interface/main.py b/pentestagent/interface/main.py
similarity index 90%
rename from ghostcrew/interface/main.py
rename to pentestagent/interface/main.py
index deff2a3..ac980da 100644
--- a/ghostcrew/interface/main.py
+++ b/pentestagent/interface/main.py
@@ -1,4 +1,4 @@
-"""Main entry point for GhostCrew."""
+"""Main entry point for PentestAgent."""
import argparse
import asyncio
@@ -11,19 +11,19 @@ from .tui import run_tui
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
- description="GhostCrew - AI Penetration Testing",
+ description="PentestAgent - AI Penetration Testing",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
- ghostcrew tui Launch TUI
- ghostcrew tui -t 192.168.1.1 Launch TUI with target
- ghostcrew run -t localhost --task "scan" Headless run
- ghostcrew tools list List available tools
- ghostcrew mcp list List MCP servers
+ pentestagent tui Launch TUI
+ pentestagent tui -t 192.168.1.1 Launch TUI with target
+ pentestagent run -t localhost --task "scan" Headless run
+ pentestagent tools list List available tools
+ pentestagent mcp list List MCP servers
""",
)
- parser.add_argument("--version", action="version", version="GhostCrew 0.2.0")
+ parser.add_argument("--version", action="version", version="PentestAgent 0.2.0")
# Subcommands
subparsers = parser.add_subparsers(dest="command", help="Commands")
@@ -35,7 +35,7 @@ Examples:
"--model",
"-m",
default=DEFAULT_MODEL,
- help="LLM model (set GHOSTCREW_MODEL in .env)",
+ help="LLM model (set PENTESTAGENT_MODEL in .env)",
)
runtime_parent.add_argument(
"--docker",
@@ -168,7 +168,7 @@ def handle_tools_command(args: argparse.Namespace):
console.print(f" [cyan]{name}[/] ({ptype}, {required}): {desc}")
else:
- console.print("[yellow]Use 'ghostcrew tools --help' for commands[/]")
+ console.print("[yellow]Use 'pentestagent tools --help' for commands[/]")
def handle_mcp_command(args: argparse.Namespace):
@@ -187,7 +187,7 @@ def handle_mcp_command(args: argparse.Namespace):
if not servers:
console.print("[yellow]No MCP servers configured[/]")
console.print(
- "\nAdd a server with: ghostcrew mcp add "
+ "\nAdd a server with: pentestagent mcp add "
)
return
@@ -241,7 +241,7 @@ def handle_mcp_command(args: argparse.Namespace):
asyncio.run(test_server())
else:
- console.print("[yellow]Use 'ghostcrew mcp --help' for available commands[/]")
+ console.print("[yellow]Use 'pentestagent mcp --help' for available commands[/]")
def main():
@@ -261,9 +261,9 @@ def main():
# Check model configuration
if not args.model:
print("Error: No model configured.")
- print("Set GHOSTCREW_MODEL in .env file or use --model flag.")
+ print("Set PENTESTAGENT_MODEL in .env file or use --model flag.")
print(
- "Example: GHOSTCREW_MODEL=gpt-5 or GHOSTCREW_MODEL=claude-sonnet-4-20250514"
+ "Example: PENTESTAGENT_MODEL=gpt-5 or PENTESTAGENT_MODEL=claude-sonnet-4-20250514"
)
return
@@ -318,9 +318,9 @@ def main():
# Check model configuration
if not args.model:
print("Error: No model configured.")
- print("Set GHOSTCREW_MODEL in .env file or use --model flag.")
+ print("Set PENTESTAGENT_MODEL in .env file or use --model flag.")
print(
- "Example: GHOSTCREW_MODEL=gpt-5 or GHOSTCREW_MODEL=claude-sonnet-4-20250514"
+ "Example: PENTESTAGENT_MODEL=gpt-5 or PENTESTAGENT_MODEL=claude-sonnet-4-20250514"
)
return
diff --git a/pentestagent/interface/tui.py b/pentestagent/interface/tui.py
new file mode 100644
index 0000000..6f938c8
--- /dev/null
+++ b/pentestagent/interface/tui.py
@@ -0,0 +1,1806 @@
+"""
+PentestAgent TUI - Terminal User Interface
+"""
+
+import asyncio
+import textwrap
+from datetime import datetime
+from pathlib import Path
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
+
+from rich.text import Text
+from textual import on, work
+from textual.app import App, ComposeResult
+from textual.binding import Binding
+from textual.containers import (
+ Center,
+ Container,
+ Horizontal,
+ ScrollableContainer,
+ Vertical,
+)
+from textual.reactive import reactive
+from textual.screen import ModalScreen
+from textual.scrollbar import ScrollBar, ScrollBarRender
+from textual.timer import Timer
+from textual.widgets import Button, Input, Static, Tree
+from textual.widgets.tree import TreeNode
+
+from ..config.constants import DEFAULT_MODEL
+
+
+# ASCII-safe scrollbar renderer to avoid Unicode glyph issues
+class ASCIIScrollBarRender(ScrollBarRender):
+ """Scrollbar renderer using ASCII-safe characters."""
+
+ BLANK_GLYPH = " "
+ VERTICAL_BARS = [" ", " ", " ", " ", " ", " ", " ", " "]
+ HORIZONTAL_BARS = [" ", " ", " ", " ", " ", " ", " ", " "]
+
+
+# Apply ASCII scrollbar globally
+ScrollBar.renderer = ASCIIScrollBarRender
+
+
+# Custom Tree with ASCII-safe icons for PowerShell compatibility
+class CrewTree(Tree):
+ """Tree widget with ASCII-compatible expand/collapse icons."""
+
+ ICON_NODE = "> "
+ ICON_NODE_EXPANDED = "v "
+
+
+if TYPE_CHECKING:
+ from ..agents.pa_agent import PentestAgentAgent
+
+
+def wrap_text_lines(text: str, width: int = 80) -> List[str]:
+ """
+ Wrap text content preserving line breaks and wrapping long lines.
+
+ Args:
+ text: The text to wrap
+ width: Maximum width per line (default 80 for safe terminal fit)
+
+ Returns:
+ List of wrapped lines
+ """
+ result = []
+ for line in text.split("\n"):
+ if len(line) <= width:
+ result.append(line)
+ else:
+ # Wrap long lines
+ wrapped = textwrap.wrap(
+ line, width=width, break_long_words=False, break_on_hyphens=False
+ )
+ result.extend(wrapped if wrapped else [""])
+ return result
+
+
+# ----- Help Screen -----
+
+
+class HelpScreen(ModalScreen):
+ """Help modal"""
+
+ BINDINGS = [
+ Binding("escape", "dismiss", "Close"),
+ Binding("q", "dismiss", "Close"),
+ ]
+
+ CSS = """
+ HelpScreen {
+ align: center middle;
+ scrollbar-background: #1a1a1a;
+ scrollbar-background-hover: #1a1a1a;
+ scrollbar-background-active: #1a1a1a;
+ scrollbar-color: #3a3a3a;
+ scrollbar-color-hover: #3a3a3a;
+ scrollbar-color-active: #3a3a3a;
+ scrollbar-corner-color: #1a1a1a;
+ scrollbar-size: 1 1;
+ }
+
+ #help-container {
+ width: 60;
+ height: 23;
+ background: #121212;
+ border: solid #3a3a3a;
+ padding: 1 2;
+ }
+
+ #help-title {
+ text-align: center;
+ text-style: bold;
+ color: #d4d4d4;
+ margin-bottom: 1;
+ }
+
+ #help-content {
+ color: #9a9a9a;
+ }
+
+ #help-close {
+ margin-top: 1;
+ width: auto;
+ min-width: 10;
+ background: #1a1a1a;
+ color: #9a9a9a;
+ border: none;
+ }
+
+ #help-close:hover {
+ background: #262626;
+ }
+
+ #help-close:focus {
+ background: #262626;
+ text-style: none;
+ }
+ """
+
+ def compose(self) -> ComposeResult:
+ yield Container(
+ Static("PentestAgent Help", id="help-title"),
+ Static(self._get_help_text(), id="help-content"),
+ Center(Button("Close", id="help-close")),
+ id="help-container",
+ )
+
+ def _get_help_text(self) -> str:
+ return """[bold]Modes:[/] Assist | Agent | Crew
+[bold]Keys:[/] Enter=Send Ctrl+C=Stop Ctrl+Q=Quit F1=Help
+
+[bold]Commands:[/]
+ /agent - Run in agent mode
+ /crew - Run multi-agent crew mode
+ /target - Set target
+ /prompt - Show system prompt
+ /memory - Show memory stats
+ /notes - Show saved notes
+ /report - Generate report
+ /help - Show help
+ /clear - Clear chat
+ /tools - List tools
+ /quit - Exit"""
+
+ def action_dismiss(self) -> None:
+ self.app.pop_screen()
+
+ @on(Button.Pressed, "#help-close")
+ def close_help(self) -> None:
+ self.app.pop_screen()
+
+
+# ----- Main Chat Message Widgets -----
+
+
+class ThinkingMessage(Static):
+ """Thinking/reasoning message"""
+
+ def __init__(self, content: str, **kwargs):
+ super().__init__(**kwargs)
+ self.thinking_content = content
+
+ def render(self) -> Text:
+ text = Text()
+ text.append("| ", style="#3a3a3a")
+ text.append("* ", style="#9a9a9a")
+ text.append("Thinking\n", style="bold #9a9a9a")
+
+ # Wrap content - use 70 chars to account for sidebar + prefix
+ for line in wrap_text_lines(self.thinking_content, width=70):
+ text.append("| ", style="#3a3a3a")
+ text.append(f"{line}\n", style="#6b6b6b italic")
+
+ return text
+
+
+class ToolMessage(Static):
+ """Tool execution message"""
+
+ # Standard tool icon and color (pa theme)
+ TOOL_ICON = "$"
+ TOOL_COLOR = "#9a9a9a" # spirit gray
+
+ def __init__(self, tool_name: str, args: str = "", **kwargs):
+ super().__init__(**kwargs)
+ self.tool_name = tool_name
+ self.tool_args = args
+
+ def render(self) -> Text:
+ text = Text()
+ text.append("| ", style="#3a3a3a")
+ text.append(f"{self.TOOL_ICON} ", style=self.TOOL_COLOR)
+ text.append(f"{self.tool_name}", style=self.TOOL_COLOR)
+ text.append("\n", style="")
+
+ # Wrap args and show each line with vertical bar
+ if self.tool_args:
+ for line in wrap_text_lines(self.tool_args, width=100):
+ text.append("| ", style="#3a3a3a")
+ text.append(f"{line}\n", style="#6b6b6b")
+
+ return text
+
+
+class ToolResultMessage(Static):
+ """Tool result/output message"""
+
+ RESULT_ICON = "#"
+ RESULT_COLOR = "#7a7a7a"
+
+ def __init__(self, tool_name: str, result: str = "", **kwargs):
+ super().__init__(**kwargs)
+ self.tool_name = tool_name
+ self.result = result
+
+ def render(self) -> Text:
+ text = Text()
+ text.append("| ", style="#3a3a3a")
+ text.append(f"{self.RESULT_ICON} ", style=self.RESULT_COLOR)
+ text.append(f"{self.tool_name} output", style=self.RESULT_COLOR)
+ text.append("\n", style="")
+
+ if self.result:
+ for line in wrap_text_lines(self.result, width=100):
+ text.append("| ", style="#3a3a3a")
+ text.append(f"{line}\n", style="#5a5a5a")
+
+ return text
+
+
+class AssistantMessage(Static):
+ """Assistant response message"""
+
+ def __init__(self, content: str, **kwargs):
+ super().__init__(**kwargs)
+ self.message_content = content
+
+ def render(self) -> Text:
+ text = Text()
+ text.append("| ", style="#525252")
+ text.append(">> ", style="#9a9a9a")
+ text.append("PentestAgent\n", style="bold #d4d4d4")
+
+ # Wrap content - use 70 chars to account for sidebar + prefix
+ for line in wrap_text_lines(self.message_content, width=70):
+ text.append("| ", style="#525252")
+ text.append(f"{line}\n", style="#d4d4d4")
+
+ return text
+
+
+class UserMessage(Static):
+ """User message"""
+
+ def __init__(self, content: str, **kwargs):
+ super().__init__(**kwargs)
+ self.message_content = content
+
+ def render(self) -> Text:
+ text = Text()
+ text.append("| ", style="#6b6b6b") # phantom border
+ text.append("> ", style="#9a9a9a")
+ text.append("You\n", style="bold #d4d4d4") # specter
+ text.append("| ", style="#6b6b6b") # phantom border
+ text.append(f"{self.message_content}\n", style="#d4d4d4") # specter
+ return text
+
+
+class SystemMessage(Static):
+ """System message"""
+
+ def __init__(self, content: str, **kwargs):
+ super().__init__(**kwargs)
+ self.message_content = content
+
+ def render(self) -> Text:
+ text = Text()
+ for line in self.message_content.split("\n"):
+ text.append(f" {line}\n", style="#6b6b6b") # phantom - subtle system text
+ return text
+
+
+# ----- Status Bar -----
+
+
+class StatusBar(Static):
+ """Animated status bar"""
+
+ status = reactive("idle")
+ mode = reactive("assist") # "assist" or "agent"
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self._frame = 0
+ self._timer: Optional[Timer] = None
+
+ def on_mount(self) -> None:
+ self._timer = self.set_interval(0.2, self._tick)
+
+ def _tick(self) -> None:
+ self._frame = (self._frame + 1) % 4
+ if self.status not in ["idle", "complete"]:
+ self.refresh()
+
+ def render(self) -> Text:
+ dots = "." * (self._frame + 1)
+
+ # Use fixed-width labels (pad dots to 4 chars so text doesn't jump)
+ dots_padded = dots.ljust(4)
+
+ # PA theme status colors (muted, ethereal)
+ status_map = {
+ "idle": ("Ready", "#6b6b6b"),
+ "initializing": (f"Initializing{dots_padded}", "#9a9a9a"),
+ "thinking": (f"Thinking{dots_padded}", "#9a9a9a"),
+ "running": (f"Running{dots_padded}", "#9a9a9a"),
+ "processing": (f"Processing{dots_padded}", "#9a9a9a"),
+ "waiting": ("Waiting for input", "#9a9a9a"),
+ "complete": ("Complete", "#4a9f6e"),
+ "error": ("Error", "#9f4a4a"),
+ }
+
+ label, color = status_map.get(self.status, (self.status, "#6b6b6b"))
+
+ text = Text()
+
+ # Show mode (ASCII-safe symbols)
+ if self.mode == "crew":
+ text.append(" :: Crew ", style="#9a9a9a")
+ elif self.mode == "agent":
+ text.append(" >> Agent ", style="#9a9a9a")
+ else:
+ text.append(" >> Assist ", style="#9a9a9a")
+
+ text.append(f"| {label}", style=color)
+
+ if self.status not in ["idle", "initializing", "complete", "error"]:
+ text.append(" ESC to stop", style="#525252")
+
+ return text
+
+
+# ----- Main TUI App -----
+
+
+class PentestAgentTUI(App):
+ """Main PentestAgent TUI Application"""
+
+ # ═══════════════════════════════════════════════════════════
+ # PA THEME - Ethereal grays
+ # ═══════════════════════════════════════════════════════════
+ # Void: #0a0a0a (terminal black - the darkness)
+ # Shadow: #121212 (subtle surface)
+ # Mist: #1a1a1a (panels, elevated)
+ # Whisper: #262626 (default borders)
+ # Fog: #3a3a3a (hover states)
+ # Apparition: #525252 (focus states)
+ # Phantom: #6b6b6b (secondary text)
+ # Spirit: #9a9a9a (normal text)
+ # Specter: #d4d4d4 (primary text)
+ # Ectoplasm: #f0f0f0 (highlights)
+ # ═══════════════════════════════════════════════════════════
+
+ CSS = """
+ Screen {
+ background: #0a0a0a;
+ }
+
+ #main-container {
+ width: 100%;
+ height: 100%;
+ layout: horizontal;
+ }
+
+ /* Chat area - takes full width normally, fills remaining space with sidebar */
+ #chat-area {
+ width: 1fr;
+ height: 100%;
+ }
+
+ #chat-area.with-sidebar {
+ width: 1fr;
+ }
+
+ #chat-scroll {
+ width: 100%;
+ height: 1fr;
+ background: transparent;
+ padding: 1 2;
+ scrollbar-background: #1a1a1a;
+ scrollbar-background-hover: #1a1a1a;
+ scrollbar-background-active: #1a1a1a;
+ scrollbar-color: #3a3a3a;
+ scrollbar-color-hover: #3a3a3a;
+ scrollbar-color-active: #3a3a3a;
+ scrollbar-corner-color: #1a1a1a;
+ scrollbar-size: 1 1;
+ }
+
+ #input-container {
+ width: 100%;
+ height: 3;
+ background: transparent;
+ border: round #262626;
+ margin: 0 2;
+ padding: 0;
+ layout: horizontal;
+ align-vertical: middle;
+ }
+
+ #input-container:focus-within {
+ border: round #525252;
+ }
+
+ #input-container:focus-within #chat-prompt {
+ color: #d4d4d4;
+ }
+
+ #chat-prompt {
+ width: auto;
+ height: 100%;
+ padding: 0 0 0 1;
+ color: #6b6b6b;
+ content-align-vertical: middle;
+ }
+
+ #chat-input {
+ width: 1fr;
+ height: 100%;
+ background: transparent;
+ border: none;
+ padding: 0;
+ margin: 0;
+ color: #d4d4d4;
+ }
+
+ #chat-input:focus {
+ border: none;
+ }
+
+ #chat-input > .input--placeholder {
+ color: #6b6b6b;
+ text-style: italic;
+ }
+
+ #status-bar {
+ width: 100%;
+ height: 1;
+ background: transparent;
+ padding: 0 3;
+ margin: 0;
+ }
+
+ .message {
+ margin-bottom: 1;
+ }
+
+ /* Sidebar - hidden by default */
+ #sidebar {
+ width: 28;
+ height: 100%;
+ display: none;
+ padding-right: 1;
+ }
+
+ #sidebar.visible {
+ display: block;
+ }
+
+ #workers-tree {
+ height: 1fr;
+ background: transparent;
+ border: round #262626;
+ padding: 0 1;
+ margin-bottom: 0;
+ }
+
+ #workers-tree:focus {
+ border: round #3a3a3a;
+ }
+
+ #crew-stats {
+ height: auto;
+ max-height: 10;
+ background: transparent;
+ border: round #262626;
+ border-title-color: #9a9a9a;
+ border-title-style: bold;
+ padding: 0 1;
+ margin-top: 0;
+ }
+
+ Tree {
+ background: transparent;
+ color: #d4d4d4;
+ scrollbar-background: #1a1a1a;
+ scrollbar-background-hover: #1a1a1a;
+ scrollbar-background-active: #1a1a1a;
+ scrollbar-color: #3a3a3a;
+ scrollbar-color-hover: #3a3a3a;
+ scrollbar-color-active: #3a3a3a;
+ scrollbar-size: 1 1;
+ }
+
+ Tree > .tree--cursor {
+ background: transparent;
+ }
+
+ Tree > .tree--highlight {
+ background: transparent;
+ }
+
+ Tree > .tree--highlight-line {
+ background: transparent;
+ }
+
+ .tree--node-label {
+ padding: 0 1;
+ }
+
+ .tree--node:hover .tree--node-label {
+ background: transparent;
+ }
+
+ .tree--node.-selected .tree--node-label {
+ background: transparent;
+ color: #d4d4d4;
+ }
+ """
+
+ BINDINGS = [
+ Binding("ctrl+q", "quit_app", "Quit", priority=True),
+ Binding("ctrl+c", "stop_agent", "Stop", priority=True, show=False),
+ Binding("escape", "stop_agent", "Stop", priority=True),
+ Binding("f1", "show_help", "Help"),
+ Binding("tab", "focus_next", "Next", show=False),
+ ]
+
+ TITLE = "PentestAgent"
+ SUB_TITLE = "AI Penetration Testing"
+
+ def __init__(
+ self,
+ target: Optional[str] = None,
+ model: str = None,
+ use_docker: bool = False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.target = target
+ self.model = model or DEFAULT_MODEL
+ self.use_docker = use_docker
+
+ # Agent components
+ self.agent: Optional["PentestAgentAgent"] = None
+ self.runtime = None
+ self.mcp_manager = None
+ self.all_tools = []
+ self.rag_engine = None # RAG engine
+
+ # State
+ self._mode = "assist" # "assist", "agent", or "crew"
+ self._is_running = False
+ self._is_initializing = True # Block input during init
+ self._should_stop = False
+ self._current_worker = None # Track running worker for cancellation
+ self._current_crew = None # Track crew orchestrator for cancellation
+
+ # Crew mode state
+ self._crew_workers: Dict[str, Dict[str, Any]] = {}
+ self._crew_worker_nodes: Dict[str, TreeNode] = {}
+ self._crew_orchestrator_node: Optional[TreeNode] = None
+ self._crew_findings_count = 0
+ self._viewing_worker_id: Optional[str] = None
+ self._worker_events: Dict[str, List[Dict]] = {}
+ self._crew_start_time: Optional[float] = None
+ self._crew_tokens_used: int = 0
+ self._crew_stats_timer: Optional[Timer] = None
+ self._spinner_timer: Optional[Timer] = None
+ self._spinner_frame: int = 0
+ self._spinner_frames = [
+ "⠋",
+ "⠙",
+ "⠹",
+ "⠸",
+ "⠼",
+ "⠴",
+ "⠦",
+ "⠧",
+ "⠇",
+ "⠏",
+ ] # Braille dots spinner
+
+ def compose(self) -> ComposeResult:
+ with Horizontal(id="main-container"):
+ # Chat area (left side)
+ with Vertical(id="chat-area"):
+ yield ScrollableContainer(id="chat-scroll")
+ yield StatusBar(id="status-bar")
+ with Horizontal(id="input-container"):
+ yield Static("> ", id="chat-prompt")
+ yield Input(placeholder="Enter task or type /help", id="chat-input")
+
+ # Sidebar (right side, hidden by default)
+ with Vertical(id="sidebar"):
+ yield CrewTree("CREW", id="workers-tree")
+ yield Static("", id="crew-stats")
+
+ async def on_mount(self) -> None:
+ """Initialize on mount"""
+ self._initialize_agent()
+
+ @work(thread=False)
+ async def _initialize_agent(self) -> None:
+ """Initialize agent"""
+ self._set_status("initializing")
+
+ try:
+ import os
+
+ from ..agents.pa_agent import PentestAgentAgent
+ from ..knowledge import RAGEngine
+ from ..llm import LLM, ModelConfig
+ from ..mcp import MCPManager
+ from ..runtime.docker_runtime import DockerRuntime
+ from ..runtime.runtime import LocalRuntime
+ from ..tools import get_all_tools, register_tool_instance
+
+ # RAG Engine - auto-load knowledge sources
+ rag_doc_count = 0
+ knowledge_path = None
+
+ # Check local knowledge dir first (must have files, not just exist)
+ local_knowledge = Path("knowledge")
+ bundled_path = Path(__file__).parent.parent / "knowledge" / "sources"
+
+ if local_knowledge.exists() and any(local_knowledge.rglob("*.*")):
+ knowledge_path = local_knowledge
+ elif bundled_path.exists():
+ knowledge_path = bundled_path
+
+ if knowledge_path:
+ try:
+ # Determine embedding method: env var > auto-detect
+ embeddings_setting = os.getenv(
+ "PENTESTAGENT_EMBEDDINGS", ""
+ ).lower()
+ if embeddings_setting == "local":
+ use_local = True
+ elif embeddings_setting == "openai":
+ use_local = False
+ else:
+ # Auto: use OpenAI if key available, else local
+ use_local = not os.getenv("OPENAI_API_KEY")
+
+ self.rag_engine = RAGEngine(
+ knowledge_path=knowledge_path, use_local_embeddings=use_local
+ )
+ await asyncio.to_thread(self.rag_engine.index)
+ rag_doc_count = self.rag_engine.get_document_count()
+ except Exception as e:
+ self._add_system(f"[!] RAG: {e}")
+ self.rag_engine = None
+
+ # MCP - auto-load if config exists
+ mcp_server_count = 0
+ try:
+ self.mcp_manager = MCPManager()
+ if self.mcp_manager.config_path.exists():
+ mcp_tools = await self.mcp_manager.connect_all()
+ for tool in mcp_tools:
+ register_tool_instance(tool)
+ mcp_server_count = len(self.mcp_manager.servers)
+ except Exception as e:
+ self._add_system(f"[!] MCP: {e}")
+
+ # Runtime - Docker or Local
+ if self.use_docker:
+ self._add_system("+ Starting Docker container...")
+ self.runtime = DockerRuntime(mcp_manager=self.mcp_manager)
+ else:
+ self.runtime = LocalRuntime(mcp_manager=self.mcp_manager)
+ await self.runtime.start()
+
+ # LLM
+ llm = LLM(
+ model=self.model,
+ config=ModelConfig(temperature=0.7),
+ rag_engine=self.rag_engine,
+ )
+
+ # Tools
+ self.all_tools = get_all_tools()
+
+ # Agent
+ self.agent = PentestAgentAgent(
+ llm=llm,
+ tools=self.all_tools,
+ runtime=self.runtime,
+ target=self.target,
+ rag_engine=self.rag_engine,
+ )
+
+ self._set_status("idle", "assist")
+ self._is_initializing = False # Allow input now
+
+ # Show ready message
+ tools_str = ", ".join(t.name for t in self.all_tools[:5])
+ if len(self.all_tools) > 5:
+ tools_str += f", +{len(self.all_tools) - 5} more"
+
+ runtime_str = "Docker" if self.use_docker else "Local"
+ self._add_system(
+ f"+ PentestAgent ready\n"
+ f" Model: {self.model} | Tools: {len(self.all_tools)} | MCP: {mcp_server_count} | RAG: {rag_doc_count}\n"
+ f" Runtime: {runtime_str} | Mode: Assist (use /agent or /crew for autonomous modes)"
+ )
+
+ # Show target if provided (but don't auto-start)
+ if self.target:
+ self._add_system(f" Target: {self.target}")
+
+ except Exception as e:
+ import traceback
+
+ self._add_system(f"[!] Init failed: {e}\n{traceback.format_exc()}")
+ self._set_status("error")
+ self._is_initializing = False # Allow input even on error
+
+ def _set_status(self, status: str, mode: Optional[str] = None) -> None:
+ """Update status bar"""
+ try:
+ bar = self.query_one("#status-bar", StatusBar)
+ bar.status = status
+ if mode:
+ bar.mode = mode
+ self._mode = mode
+ except Exception:
+ pass
+
+ def _add_message(self, widget: Static) -> None:
+ """Add a message widget to chat"""
+ try:
+ scroll = self.query_one("#chat-scroll", ScrollableContainer)
+ widget.add_class("message")
+ scroll.mount(widget)
+ scroll.scroll_end(animate=False)
+ except Exception:
+ pass
+
+ def _add_system(self, content: str) -> None:
+ self._add_message(SystemMessage(content))
+
+ def _add_user(self, content: str) -> None:
+ self._add_message(UserMessage(content))
+
+ def _add_assistant(self, content: str) -> None:
+ self._add_message(AssistantMessage(content))
+
+ def _add_thinking(self, content: str) -> None:
+ self._add_message(ThinkingMessage(content))
+
+ def _add_tool(self, name: str, action: str = "") -> None:
+ self._add_message(ToolMessage(name, action))
+
+ def _add_tool_result(self, name: str, result: str) -> None:
+ """Display tool execution result"""
+ # Hide tool output - LLM will synthesize it in its response
+ # This prevents duplication and keeps the chat clean
+ pass
+
+ def _show_system_prompt(self) -> None:
+ """Display the current system prompt"""
+ if self.agent:
+ prompt = self.agent.get_system_prompt()
+ self._add_system(f"=== System Prompt ===\n{prompt}")
+ else:
+ self._add_system("Agent not initialized")
+
+ def _show_memory_stats(self) -> None:
+ """Display memory usage statistics"""
+ if self.agent and self.agent.llm:
+ stats = self.agent.llm.get_memory_stats()
+ messages_count = len(self.agent.conversation_history)
+
+ # Format messages for token counting
+ llm_messages = self.agent._format_messages_for_llm()
+ current_tokens = self.agent.llm.memory.get_total_tokens(llm_messages)
+
+ info = (
+ f"=== Memory Stats ===\n"
+ f"Messages: {messages_count}\n"
+ f"Current tokens: {current_tokens:,}\n"
+ f"Token budget: {stats['token_budget']:,}\n"
+ f"Summarize at: {stats['summarize_threshold']:,} tokens\n"
+ f"Recent to keep: {stats['recent_to_keep']} messages\n"
+ f"Has summary: {stats['has_summary']}\n"
+ f"Summarized: {stats['summarized_message_count']} messages"
+ )
+ self._add_system(info)
+ else:
+ self._add_system("Agent not initialized")
+
+ async def _show_notes(self) -> None:
+ """Display saved notes"""
+ from ..tools.notes import get_all_notes
+
+ notes = await get_all_notes()
+ if not notes:
+ self._add_system(
+ "=== Notes ===\nNo notes saved.\n\nThe AI can save key findings using the notes tool."
+ )
+ return
+
+ lines = [f"=== Notes ({len(notes)} entries) ==="]
+ for key, value in notes.items():
+ # Show full value, indent multi-line content
+ if "\n" in value:
+ indented = value.replace("\n", "\n ")
+ lines.append(f"\n[{key}]\n {indented}")
+ else:
+ lines.append(f"[{key}] {value}")
+ lines.append("\nFile: loot/notes.json")
+ lines.append("Reports: loot/reports/")
+
+ self._add_system("\n".join(lines))
+
+ def _build_prior_context(self) -> str:
+ """Build a summary of prior findings for crew mode.
+
+ Extracts:
+ - Tool results (nmap scans, etc.) - the actual findings
+ - Assistant analyses - interpretations and summaries
+ - Last user task - what they were working on
+
+ Excludes:
+ - Raw user messages (noise)
+ - Tool call declarations (just names/args, not results)
+ - Very short responses
+ """
+ if not self.agent or not self.agent.conversation_history:
+ return ""
+
+ findings = []
+ last_user_task = ""
+
+ for msg in self.agent.conversation_history:
+ # Track user tasks/questions
+ if msg.role == "user" and msg.content:
+ last_user_task = msg.content[:200]
+
+ # Extract tool results (the actual findings)
+ elif msg.tool_results:
+ for result in msg.tool_results:
+ if result.success and result.result:
+ content = (
+ result.result[:1500]
+ if len(result.result) > 1500
+ else result.result
+ )
+ findings.append(f"[{result.tool_name}]\n{content}")
+
+ # Include assistant analyses (but not tool call messages)
+ elif msg.role == "assistant" and msg.content and not msg.tool_calls:
+ if len(msg.content) > 50:
+ findings.append(f"[Analysis]\n{msg.content[:1000]}")
+
+ if not findings and not last_user_task:
+ return ""
+
+ # Build context with last user task + recent findings
+ parts = []
+ if last_user_task:
+ parts.append(f"Last task: {last_user_task}")
+ if findings:
+ parts.append("Findings:\n" + "\n\n".join(findings[-5:]))
+
+ context = "\n\n".join(parts)
+ if len(context) > 4000:
+ context = context[:4000] + "\n... (truncated)"
+
+ return context
+
+ def _set_target(self, cmd: str) -> None:
+ """Set the target for the engagement"""
+ # Remove /target prefix
+ target = cmd[7:].strip()
+
+ if not target:
+ if self.target:
+ self._add_system(
+ f"Current target: {self.target}\nUsage: /target "
+ )
+ else:
+ self._add_system(
+ "No target set.\nUsage: /target \nExample: /target 192.168.1.1"
+ )
+ return
+
+ self.target = target
+
+ # Update agent's target if agent exists
+ if self.agent:
+ self.agent.target = target
+
+ self._add_system(f"@ Target set: {target}")
+
+ @work(exclusive=True)
+ async def _run_report_generation(self) -> None:
+ """Generate a pentest report from notes and conversation"""
+ from pathlib import Path
+
+ from ..tools.notes import get_all_notes
+
+ if not self.agent or not self.agent.llm:
+ self._add_system("[!] Agent not initialized")
+ return
+
+ notes = await get_all_notes()
+ if not notes:
+ self._add_system(
+ "No notes found. PentestAgent saves findings using the notes tool during testing."
+ )
+ return
+
+ self._add_system("Generating report...")
+
+ # Format notes
+ notes_text = "\n".join(f"### {k}\n{v}\n" for k, v in notes.items())
+
+ # Build conversation summary from full history
+ conversation_summary = ""
+ if self.agent.conversation_history:
+ # Summarize key actions from conversation
+ actions = []
+ for msg in self.agent.conversation_history:
+ if msg.role == "assistant" and msg.tool_calls:
+ for tc in msg.tool_calls:
+ actions.append(f"- Tool: {tc.name}")
+ elif msg.role == "tool_result" and msg.tool_results:
+ for tr in msg.tool_results:
+ # Include truncated result
+ result = tr.result or ""
+ output = result[:200] + "..." if len(result) > 200 else result
+ actions.append(f" Result: {output}")
+ if actions:
+ conversation_summary = "\n".join(actions[-30:]) # Last 30 actions
+
+ report_prompt = f"""Generate a penetration test report in Markdown from the notes below.
+
+# Notes
+{notes_text}
+
+# Activity Log
+{conversation_summary if conversation_summary else "N/A"}
+
+# Target
+{self.target or "Not specified"}
+
+Output a report with:
+1. Executive Summary (2-3 sentences)
+2. Findings (use notes, include severity: Critical/High/Medium/Low/Info)
+3. Recommendations
+
+Be concise. Use the actual data from notes."""
+
+ try:
+ report_content = await self.agent.llm.simple_completion(
+ prompt=report_prompt,
+ system="You are a penetration tester writing a security report. Be concise and factual.",
+ )
+
+ if not report_content or not report_content.strip():
+ self._add_system(
+ "[!] Report generation returned empty. Check LLM connection."
+ )
+ return
+
+ # Save to loot/reports/
+ reports_dir = Path("loot/reports")
+ reports_dir.mkdir(parents=True, exist_ok=True)
+
+ # Append Shadow Graph if available
+ try:
+ from ..knowledge.graph import ShadowGraph
+ from ..tools.notes import get_all_notes_sync
+
+ # Rehydrate graph from notes
+ graph = ShadowGraph()
+ notes = get_all_notes_sync()
+ if notes:
+ graph.update_from_notes(notes)
+ mermaid_code = graph.to_mermaid()
+
+ if mermaid_code:
+ report_content += (
+ "\n\n## Attack Graph (Visual)\n\n```mermaid\n"
+ + mermaid_code
+ + "\n```\n"
+ )
+ except Exception as e:
+ self._add_system(f"[!] Graph generation error: {e}")
+
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H%M%S")
+ report_path = reports_dir / f"report_{timestamp}.md"
+ report_path.write_text(report_content, encoding="utf-8")
+
+ self._add_system(f"+ Report saved: {report_path}")
+
+ except Exception as e:
+ self._add_system(f"[!] Report error: {e}")
+
+ @on(Input.Submitted, "#chat-input")
+ async def handle_submit(self, event: Input.Submitted) -> None:
+ """Handle input submission"""
+ # Block input while initializing or AI is processing
+ if self._is_initializing or self._is_running:
+ return
+
+ message = event.value.strip()
+ if not message:
+ return
+
+ event.input.value = ""
+
+ # Commands
+ if message.startswith("/"):
+ await self._handle_command(message)
+ return
+
+ self._add_user(message)
+
+ # Hide crew sidebar when entering assist mode
+ self._hide_sidebar()
+
+ # Use assist mode by default
+ if self.agent and not self._is_running:
+ self._current_worker = self._run_assist(message)
+
+ async def _handle_command(self, cmd: str) -> None:
+ """Handle slash commands"""
+ cmd_lower = cmd.lower().strip()
+ cmd_original = cmd.strip()
+
+ if cmd_lower in ["/help", "/h", "/?"]:
+ await self.push_screen(HelpScreen())
+ elif cmd_lower == "/clear":
+ scroll = self.query_one("#chat-scroll", ScrollableContainer)
+ await scroll.remove_children()
+ self._hide_sidebar()
+ # Clear agent conversation history for fresh start
+ if self.agent:
+ self.agent.conversation_history.clear()
+ self._add_system("Chat cleared")
+ elif cmd_lower == "/tools":
+ names = [t.name for t in self.all_tools]
+ self._add_system(f"Tools ({len(names)}): " + ", ".join(names))
+ elif cmd_lower in ["/quit", "/exit", "/q"]:
+ self.exit()
+ elif cmd_lower == "/prompt":
+ self._show_system_prompt()
+ elif cmd_lower == "/memory":
+ self._show_memory_stats()
+ elif cmd_lower == "/notes":
+ await self._show_notes()
+ elif cmd_lower == "/report":
+ self._run_report_generation()
+ elif cmd_original.startswith("/target"):
+ self._set_target(cmd_original)
+ elif cmd_original.startswith("/agent"):
+ await self._parse_agent_command(cmd_original)
+ elif cmd_original.startswith("/crew"):
+ await self._parse_crew_command(cmd_original)
+ else:
+ self._add_system(f"Unknown command: {cmd}\nType /help for commands.")
+
+ async def _parse_agent_command(self, cmd: str) -> None:
+ """Parse and execute /agent command"""
+
+ # Remove /agent prefix
+ rest = cmd[6:].strip()
+
+ if not rest:
+ self._add_system(
+ "Usage: /agent \n"
+ "Example: /agent scan 192.168.1.1\n"
+ " /agent enumerate SSH on target"
+ )
+ return
+
+ task = rest
+
+ if not task:
+ self._add_system("Error: No task provided. Usage: /agent ")
+ return
+
+ self._add_user(f"/agent {task}")
+ self._add_system(">> Agent Mode")
+
+ # Hide crew sidebar when entering agent mode
+ self._hide_sidebar()
+
+ if self.agent and not self._is_running:
+ self._current_worker = self._run_agent_mode(task)
+
+ async def _parse_crew_command(self, cmd: str) -> None:
+ """Parse and execute /crew command"""
+ # Remove /crew prefix
+ rest = cmd[5:].strip()
+
+ if not rest:
+ self._add_system(
+ "Usage: /crew \n"
+ "Example: /crew https://example.com\n"
+ " /crew 192.168.1.100\n\n"
+ "Crew mode spawns specialized workers in parallel:\n"
+ " - recon: Reconnaissance and mapping\n"
+ " - sqli: SQL injection testing\n"
+ " - xss: Cross-site scripting testing\n"
+ " - ssrf: Server-side request forgery\n"
+ " - auth: Authentication testing\n"
+ " - idor: Insecure direct object references\n"
+ " - info: Information disclosure"
+ )
+ return
+
+ target = rest
+
+ if not self._is_running:
+ self._add_user(f"/crew {target}")
+ self._show_sidebar()
+ self._current_worker = self._run_crew_mode(target)
+
+ def _show_sidebar(self) -> None:
+ """Show the sidebar for crew mode."""
+ try:
+ import time
+
+ sidebar = self.query_one("#sidebar")
+ sidebar.add_class("visible")
+
+ chat_area = self.query_one("#chat-area")
+ chat_area.add_class("with-sidebar")
+
+ # Setup tree
+ tree = self.query_one("#workers-tree", CrewTree)
+ tree.root.expand()
+ tree.show_root = False
+
+ # Clear old nodes
+ tree.root.remove_children()
+ self._crew_worker_nodes.clear()
+ self._crew_workers.clear()
+ self._worker_events.clear()
+ self._crew_findings_count = 0
+
+ # Start tracking time and tokens
+ self._crew_start_time = time.time()
+ self._crew_tokens_used = 0
+
+ # Start stats timer (update every second)
+ if self._crew_stats_timer:
+ self._crew_stats_timer.stop()
+ self._crew_stats_timer = self.set_interval(1.0, self._update_crew_stats)
+
+ # Start spinner timer for running workers (faster interval for smooth animation)
+ if self._spinner_timer:
+ self._spinner_timer.stop()
+ self._spinner_timer = self.set_interval(0.15, self._update_spinner)
+
+ # Add crew root node (no orchestrator - just "CREW" header)
+ self._crew_orchestrator_node = tree.root.add(
+ "CREW", data={"type": "crew", "id": "crew"}
+ )
+ self._crew_orchestrator_node.expand()
+ tree.select_node(self._crew_orchestrator_node)
+ self._viewing_worker_id = None
+
+ # Update stats
+ self._update_crew_stats()
+ except Exception as e:
+ self._add_system(f"[!] Sidebar error: {e}")
+
+ def _hide_sidebar(self) -> None:
+ """Hide the sidebar."""
+ try:
+ # Stop stats timer
+ if self._crew_stats_timer:
+ self._crew_stats_timer.stop()
+ self._crew_stats_timer = None
+
+ sidebar = self.query_one("#sidebar")
+ sidebar.remove_class("visible")
+
+ chat_area = self.query_one("#chat-area")
+ chat_area.remove_class("with-sidebar")
+ except Exception:
+ pass
+
+ def _update_crew_stats(self) -> None:
+ """Update crew stats panel."""
+ try:
+ import time
+
+ text = Text()
+
+ # Elapsed time
+ text.append("Time: ", style="bold #d4d4d4")
+ if self._crew_start_time:
+ elapsed = time.time() - self._crew_start_time
+ if elapsed < 60:
+ time_str = f"{int(elapsed)}s"
+ elif elapsed < 3600:
+ mins = int(elapsed // 60)
+ secs = int(elapsed % 60)
+ time_str = f"{mins}m {secs}s"
+ else:
+ hrs = int(elapsed // 3600)
+ mins = int((elapsed % 3600) // 60)
+ time_str = f"{hrs}h {mins}m"
+ text.append(time_str, style="#9a9a9a")
+ else:
+ text.append("--", style="#525252")
+
+ text.append("\n")
+
+ # Tokens used
+ text.append("Tokens: ", style="bold #d4d4d4")
+ if self._crew_tokens_used > 0:
+ if self._crew_tokens_used >= 1000:
+ token_str = f"{self._crew_tokens_used / 1000:.1f}k"
+ else:
+ token_str = str(self._crew_tokens_used)
+ text.append(token_str, style="#9a9a9a")
+ else:
+ text.append("--", style="#525252")
+
+ stats = self.query_one("#crew-stats", Static)
+ stats.update(text)
+ stats.border_title = "# Stats"
+ except Exception:
+ pass
+
+ def _update_spinner(self) -> None:
+ """Update spinner animation for running workers."""
+ try:
+ # Advance spinner frame
+ self._spinner_frame += 1
+
+ # Only update labels for running workers (efficient)
+ has_running = False
+ for worker_id, worker in self._crew_workers.items():
+ if worker.get("status") == "running":
+ has_running = True
+ # Update the tree node label
+ if worker_id in self._crew_worker_nodes:
+ node = self._crew_worker_nodes[worker_id]
+ node.set_label(self._format_worker_label(worker_id))
+
+ # Stop spinner if no workers are running (save resources)
+ if not has_running and self._spinner_timer:
+ self._spinner_timer.stop()
+ self._spinner_timer = None
+ except Exception:
+ pass
+
+ def _add_crew_worker(self, worker_id: str, worker_type: str, task: str) -> None:
+ """Add a worker to the sidebar tree."""
+ self._crew_workers[worker_id] = {
+ "worker_type": worker_type,
+ "task": task,
+ "status": "pending",
+ "findings": 0,
+ }
+
+ try:
+ label = self._format_worker_label(worker_id)
+ node = self._crew_orchestrator_node.add(
+ label, data={"type": "worker", "id": worker_id}
+ )
+ self._crew_worker_nodes[worker_id] = node
+ self._crew_orchestrator_node.expand()
+ self._update_crew_stats()
+ except Exception:
+ pass
+
+ def _update_crew_worker(self, worker_id: str, **updates) -> None:
+ """Update a worker's state."""
+ if worker_id not in self._crew_workers:
+ return
+
+ self._crew_workers[worker_id].update(updates)
+
+ # Restart spinner if a worker started running
+ if updates.get("status") == "running" and not self._spinner_timer:
+ self._spinner_timer = self.set_interval(0.15, self._update_spinner)
+
+ try:
+ if worker_id in self._crew_worker_nodes:
+ label = self._format_worker_label(worker_id)
+ self._crew_worker_nodes[worker_id].set_label(label)
+ self._update_crew_stats()
+ except Exception:
+ pass
+
+ def _format_worker_label(self, worker_id: str) -> Text:
+ """Format worker label for tree."""
+ worker = self._crew_workers.get(worker_id, {})
+ status = worker.get("status", "pending")
+ wtype = worker.get("worker_type", "worker")
+ findings = worker.get("findings", 0)
+
+ # 4-state icons: working (braille), done (checkmark), warning (!), error (X)
+ if status in ("running", "pending"):
+ # Animated braille spinner for all in-progress states
+ icon = self._spinner_frames[self._spinner_frame % len(self._spinner_frames)]
+ color = "#d4d4d4" # white
+ elif status == "complete":
+ icon = "✓"
+ color = "#22c55e" # green
+ elif status == "warning":
+ icon = "!"
+ color = "#f59e0b" # amber/orange
+ else: # error, cancelled, unknown
+ icon = "✗"
+ color = "#ef4444" # red
+
+ text = Text()
+ text.append(f"{icon} ", style=color)
+ text.append(wtype.upper(), style="bold")
+
+ if status == "complete" and findings > 0:
+ text.append(f" [{findings}]", style="#22c55e") # green
+ elif status in ("error", "cancelled"):
+ # Don't append " !" here since we already have the X icon
+ pass
+
+ return text
+
+ def _handle_worker_event(
+ self, worker_id: str, event_type: str, data: Dict[str, Any]
+ ) -> None:
+ """Handle worker events from CrewAgent - updates tree sidebar only."""
+ try:
+ if event_type == "spawn":
+ worker_type = data.get("worker_type", "unknown")
+ task = data.get("task", "")
+ self._add_crew_worker(worker_id, worker_type, task)
+ elif event_type == "status":
+ status = data.get("status", "running")
+ self._update_crew_worker(worker_id, status=status)
+ elif event_type == "tool":
+ # Add tool as child node under the agent
+ tool_name = data.get("tool", "unknown")
+ self._add_tool_to_worker(worker_id, tool_name)
+ elif event_type == "tokens":
+ # Track token usage
+ tokens = data.get("tokens", 0)
+ self._crew_tokens_used += tokens
+ elif event_type == "complete":
+ findings_count = data.get("findings_count", 0)
+ self._update_crew_worker(
+ worker_id, status="complete", findings=findings_count
+ )
+ self._crew_findings_count += findings_count
+ self._update_crew_stats()
+ elif event_type == "warning":
+ # Worker hit max iterations but has results
+ self._update_crew_worker(worker_id, status="warning")
+ reason = data.get("reason", "Partial completion")
+ worker = self._crew_workers.get(worker_id, {})
+ wtype = worker.get("worker_type", "worker")
+ self._add_system(f"[!] {wtype.upper()} stopped: {reason}")
+ self._update_crew_stats()
+ elif event_type == "failed":
+ # Worker determined task infeasible
+ self._update_crew_worker(worker_id, status="failed")
+ reason = data.get("reason", "Task infeasible")
+ worker = self._crew_workers.get(worker_id, {})
+ wtype = worker.get("worker_type", "worker")
+ self._add_system(f"[!] {wtype.upper()} failed: {reason}")
+ self._update_crew_stats()
+ elif event_type == "error":
+ self._update_crew_worker(worker_id, status="error")
+ worker = self._crew_workers.get(worker_id, {})
+ wtype = worker.get("worker_type", "worker")
+ error_msg = data.get("error", "Unknown error")
+ # Only show errors in chat - they're important
+ self._add_system(f"[!] {wtype.upper()} failed: {error_msg}")
+ except Exception as e:
+ self._add_system(f"[!] Worker event error: {e}")
+
+ def _add_tool_to_worker(self, worker_id: str, tool_name: str) -> None:
+ """Add a tool usage as child node under worker in tree."""
+ try:
+ node = self._crew_worker_nodes.get(worker_id)
+ if node:
+ node.add_leaf(f" {tool_name}")
+ node.expand()
+ except Exception:
+ pass
+
+ @on(Tree.NodeSelected, "#workers-tree")
+ def on_worker_tree_selected(self, event: Tree.NodeSelected) -> None:
+ """Handle tree node selection."""
+ node = event.node
+ if node.data:
+ node_type = node.data.get("type")
+ if node_type == "crew":
+ self._viewing_worker_id = None
+ elif node_type == "worker":
+ self._viewing_worker_id = node.data.get("id")
+
+ @work(thread=False)
+ async def _run_crew_mode(self, target: str) -> None:
+ """Run crew mode with sidebar."""
+ self._is_running = True
+ self._should_stop = False
+ self._set_status("thinking", "crew")
+
+ try:
+ from ..agents.base_agent import AgentMessage
+ from ..agents.crew import CrewOrchestrator
+ from ..llm import LLM, ModelConfig
+
+ # Build prior context from assist/agent conversation history
+ prior_context = self._build_prior_context()
+
+ llm = LLM(model=self.model, config=ModelConfig(temperature=0.7))
+
+ crew = CrewOrchestrator(
+ llm=llm,
+ tools=self.all_tools,
+ runtime=self.runtime,
+ on_worker_event=self._handle_worker_event,
+ rag_engine=self.rag_engine,
+ target=self.target,
+ prior_context=prior_context,
+ )
+ self._current_crew = crew # Track for cancellation
+
+ self._add_system(f"@ Task: {target}")
+
+ # Track crew results for memory
+ crew_report = None
+
+ async for update in crew.run(target):
+ if self._should_stop:
+ await crew.cancel()
+ self._add_system("[!] Stopped by user")
+ break
+
+ phase = update.get("phase", "")
+
+ if phase == "starting":
+ self._set_status("thinking", "crew")
+
+ elif phase == "thinking":
+ # Show the orchestrator's reasoning
+ content = update.get("content", "")
+ if content:
+ self._add_thinking(content)
+
+ elif phase == "tool_call":
+ # Show orchestration tool calls
+ tool = update.get("tool", "")
+ args = update.get("args", {})
+ self._add_tool(tool, str(args))
+
+ elif phase == "tool_result":
+ # Tool results are tracked via worker events
+ pass
+
+ elif phase == "complete":
+ crew_report = update.get("report", "")
+ if crew_report:
+ self._add_assistant(crew_report)
+
+ elif phase == "error":
+ error = update.get("error", "Unknown error")
+ self._add_system(f"[!] Crew error: {error}")
+
+ # Add crew results to main agent's conversation history
+ # so assist mode can reference what happened
+ if self.agent and crew_report:
+ # Add the crew task as a user message
+ self.agent.conversation_history.append(
+ AgentMessage(
+ role="user",
+ content=f"[CREW MODE] Run parallel analysis on target: {target}",
+ )
+ )
+ # Add the crew report as assistant response
+ self.agent.conversation_history.append(
+ AgentMessage(role="assistant", content=crew_report)
+ )
+
+ self._set_status("complete", "crew")
+ self._add_system("+ Crew task complete.")
+
+ # Stop timers
+ if self._crew_stats_timer:
+ self._crew_stats_timer.stop()
+ self._crew_stats_timer = None
+ if self._spinner_timer:
+ self._spinner_timer.stop()
+ self._spinner_timer = None
+
+ # Clear crew reference
+ self._current_crew = None
+
+ except asyncio.CancelledError:
+ # Cancel crew workers first
+ if self._current_crew:
+ await self._current_crew.cancel()
+ self._current_crew = None
+ self._add_system("[!] Cancelled")
+ self._set_status("idle", "crew")
+ # Stop timers on cancel
+ if self._crew_stats_timer:
+ self._crew_stats_timer.stop()
+ self._crew_stats_timer = None
+ if self._spinner_timer:
+ self._spinner_timer.stop()
+ self._spinner_timer = None
+
+ except Exception as e:
+ import traceback
+
+ # Cancel crew workers on error too
+ if self._current_crew:
+ try:
+ await self._current_crew.cancel()
+ except Exception:
+ pass
+ self._current_crew = None
+ self._add_system(f"[!] Crew error: {e}\n{traceback.format_exc()}")
+ self._set_status("error")
+ # Stop timers on error too
+ if self._crew_stats_timer:
+ self._crew_stats_timer.stop()
+ self._crew_stats_timer = None
+ if self._spinner_timer:
+ self._spinner_timer.stop()
+ self._spinner_timer = None
+ finally:
+ self._is_running = False
+
+ @work(thread=False)
+ async def _run_assist(self, message: str) -> None:
+ """Run in assist mode - single response"""
+ if not self.agent:
+ self._add_system("[!] Agent not ready")
+ return
+
+ self._is_running = True
+ self._should_stop = False
+ self._set_status("thinking", "assist")
+
+ try:
+ async for response in self.agent.assist(message):
+ if self._should_stop:
+ self._add_system("[!] Stopped by user")
+ break
+
+ self._set_status("processing")
+
+ # Show thinking/plan FIRST if there's content with tool calls
+ if response.content:
+ content = response.content.strip()
+ if response.tool_calls:
+ self._add_thinking(content)
+ else:
+ self._add_assistant(content)
+
+ # Show tool calls (skip 'finish' - internal control)
+ if response.tool_calls:
+ for call in response.tool_calls:
+ if call.name == "finish":
+ continue # Skip - summary shown as final message
+ args_str = str(call.arguments)
+ self._add_tool(call.name, args_str)
+
+ # Show tool results (displayed after execution completes)
+ # Skip 'finish' tool - its result is shown as the final summary
+ if response.tool_results:
+ for result in response.tool_results:
+ if result.tool_name == "finish":
+ continue # Skip - summary shown separately
+ if result.success:
+ self._add_tool_result(
+ result.tool_name, result.result or "Done"
+ )
+ else:
+ self._add_tool_result(
+ result.tool_name, f"Error: {result.error}"
+ )
+
+ self._set_status("idle", "assist")
+
+ except asyncio.CancelledError:
+ self._add_system("[!] Cancelled")
+ self._set_status("idle", "assist")
+ except Exception as e:
+ self._add_system(f"[!] Error: {e}")
+ self._set_status("error")
+ finally:
+ self._is_running = False
+
+ @work(thread=False)
+ async def _run_agent_mode(self, task: str) -> None:
+ """Run in agent mode - autonomous until task complete or user stops"""
+ if not self.agent:
+ self._add_system("[!] Agent not ready")
+ return
+
+ self._is_running = True
+ self._should_stop = False
+
+ self._set_status("thinking", "agent")
+
+ try:
+ async for response in self.agent.agent_loop(task):
+ if self._should_stop:
+ self._add_system("[!] Stopped by user")
+ break
+
+ self._set_status("processing")
+
+ # Show thinking/plan FIRST if there's content with tool calls
+ if response.content:
+ content = response.content.strip()
+ # If it has tool calls, it's thinking.
+ # If it's marked as intermediate, it's thinking.
+ if response.tool_calls or response.metadata.get("intermediate"):
+ self._add_thinking(content)
+ else:
+ # Check if this is a task completion message
+ if response.metadata.get("task_complete"):
+ self._add_assistant(content)
+ else:
+ self._add_assistant(content)
+
+ # Show tool calls AFTER thinking
+ if response.tool_calls:
+ for call in response.tool_calls:
+ # Show all tools including finish
+ args_str = str(call.arguments)
+ self._add_tool(call.name, args_str)
+
+ # Show tool results
+ if response.tool_results:
+ for result in response.tool_results:
+ if result.tool_name == "finish":
+ # Skip showing result for finish tool as it's redundant with the tool call display
+ continue
+
+ if result.success:
+ self._add_tool_result(
+ result.tool_name, result.result or "Done"
+ )
+ else:
+ self._add_tool_result(
+ result.tool_name, f"Error: {result.error}"
+ )
+
+ # Check state
+ if self.agent.state.value == "waiting_input":
+ self._set_status("waiting")
+ self._add_system("? Awaiting input...")
+ break
+ elif self.agent.state.value == "complete":
+ break
+
+ self._set_status("thinking")
+
+ self._set_status("complete", "agent")
+ self._add_system("+ Agent task complete. Back to assist mode.")
+
+ # Return to assist mode
+ await asyncio.sleep(1)
+ self._set_status("idle", "assist")
+
+ except asyncio.CancelledError:
+ self._add_system("[!] Cancelled")
+ self._set_status("idle", "assist")
+ except Exception as e:
+ self._add_system(f"[!] Error: {e}")
+ self._set_status("error")
+ finally:
+ self._is_running = False
+
+ def action_quit_app(self) -> None:
+ # Stop any running tasks first
+ if self._is_running:
+ self._should_stop = True
+ if self._current_worker and not self._current_worker.is_finished:
+ self._current_worker.cancel()
+ if self._current_crew:
+ # Schedule cancel but don't wait - we're exiting
+ asyncio.create_task(self._cancel_crew())
+ self.exit()
+
+ def action_stop_agent(self) -> None:
+ if self._is_running:
+ self._should_stop = True
+ self._add_system("[!] Stopping...")
+
+ # Cancel the running worker to interrupt blocking awaits
+ if self._current_worker and not self._current_worker.is_finished:
+ self._current_worker.cancel()
+
+ # Cancel crew orchestrator if running
+ if self._current_crew:
+ asyncio.create_task(self._cancel_crew())
+
+ # Clean up agent state to prevent stale tool responses
+ if self.agent:
+ self.agent.cleanup_after_cancel()
+
+ # Reconnect MCP servers (they may be in a bad state after cancellation)
+ if self.mcp_manager:
+ asyncio.create_task(self._reconnect_mcp_after_cancel())
+
+ async def _cancel_crew(self) -> None:
+ """Cancel crew orchestrator and all workers."""
+ try:
+ if self._current_crew:
+ await self._current_crew.cancel()
+ self._current_crew = None
+ # Mark all running workers as cancelled in the UI
+ for worker_id, worker in self._crew_workers.items():
+ if worker.get("status") in ("running", "pending"):
+ self._update_crew_worker(worker_id, status="cancelled")
+ except Exception:
+ pass # Best effort
+
+ async def _reconnect_mcp_after_cancel(self) -> None:
+ """Reconnect MCP servers after cancellation to restore clean state."""
+ await asyncio.sleep(0.5) # Brief delay for cancellation to propagate
+ try:
+ await self.mcp_manager.reconnect_all()
+ except Exception:
+ pass # Best effort - don't crash if reconnect fails
+
+ def action_show_help(self) -> None:
+ self.push_screen(HelpScreen())
+
+ async def on_unmount(self) -> None:
+ """Cleanup"""
+ if self.mcp_manager:
+ try:
+ await self.mcp_manager.disconnect_all()
+ await asyncio.sleep(0.1)
+ except Exception:
+ pass
+
+ if self.runtime:
+ try:
+ await self.runtime.stop()
+ except Exception:
+ pass
+
+
+# ----- Entry Point -----
+
+
+def run_tui(
+ target: Optional[str] = None,
+ model: str = None,
+ use_docker: bool = False,
+):
+ """Run the PentestAgent TUI"""
+ app = PentestAgentTUI(
+ target=target,
+ model=model,
+ use_docker=use_docker,
+ )
+ app.run()
+
+
+if __name__ == "__main__":
+ run_tui()
diff --git a/ghostcrew/interface/utils.py b/pentestagent/interface/utils.py
similarity index 96%
rename from ghostcrew/interface/utils.py
rename to pentestagent/interface/utils.py
index e804408..8bb6b5d 100644
--- a/ghostcrew/interface/utils.py
+++ b/pentestagent/interface/utils.py
@@ -1,4 +1,4 @@
-"""Interface utilities for GhostCrew."""
+"""Interface utilities for PentestAgent."""
from typing import Any, Optional
@@ -24,10 +24,10 @@ ASCII_BANNER = r"""
def print_banner():
- """Print the GhostCrew banner."""
+ """Print the PentestAgent banner."""
console.print(f"[bold white]{ASCII_BANNER}[/]")
console.print(
- "[bold white]====================== GHOSTCREW =======================[/]"
+ "[bold white]====================== PENTESTAGENT =======================[/]"
)
console.print(
"[dim white] AI Penetration Testing Agents v0.2.0[/dim white]\n"
@@ -126,7 +126,7 @@ def print_status(
tools_count: Number of loaded tools
findings_count: Number of findings
"""
- table = Table(title="GhostCrew Status", show_header=False)
+ table = Table(title="PentestAgent Status", show_header=False)
table.add_column("Property", style="cyan")
table.add_column("Value", style="white")
diff --git a/ghostcrew/knowledge/__init__.py b/pentestagent/knowledge/__init__.py
similarity index 84%
rename from ghostcrew/knowledge/__init__.py
rename to pentestagent/knowledge/__init__.py
index 3fdbf10..e492f87 100644
--- a/ghostcrew/knowledge/__init__.py
+++ b/pentestagent/knowledge/__init__.py
@@ -1,4 +1,4 @@
-"""Knowledge and RAG system for GhostCrew."""
+"""Knowledge and RAG system for PentestAgent."""
from .embeddings import get_embeddings, get_embeddings_local
from .indexer import KnowledgeIndexer
diff --git a/ghostcrew/knowledge/embeddings.py b/pentestagent/knowledge/embeddings.py
similarity index 98%
rename from ghostcrew/knowledge/embeddings.py
rename to pentestagent/knowledge/embeddings.py
index ed9cd31..d8afe7f 100644
--- a/ghostcrew/knowledge/embeddings.py
+++ b/pentestagent/knowledge/embeddings.py
@@ -1,4 +1,4 @@
-"""Embedding generation for GhostCrew."""
+"""Embedding generation for PentestAgent."""
from typing import List, Optional
diff --git a/ghostcrew/knowledge/graph.py b/pentestagent/knowledge/graph.py
similarity index 99%
rename from ghostcrew/knowledge/graph.py
rename to pentestagent/knowledge/graph.py
index 6b08f17..1aee1d0 100644
--- a/ghostcrew/knowledge/graph.py
+++ b/pentestagent/knowledge/graph.py
@@ -1,5 +1,5 @@
"""
-Shadow Graph implementation for GhostCrew.
+Shadow Graph implementation for PentestAgent.
This module provides a lightweight knowledge graph that is built automatically
from agent notes. It is used by the Orchestrator to compute strategic insights
diff --git a/ghostcrew/knowledge/indexer.py b/pentestagent/knowledge/indexer.py
similarity index 99%
rename from ghostcrew/knowledge/indexer.py
rename to pentestagent/knowledge/indexer.py
index 8562619..1116d31 100644
--- a/ghostcrew/knowledge/indexer.py
+++ b/pentestagent/knowledge/indexer.py
@@ -1,4 +1,4 @@
-"""Knowledge indexer for GhostCrew."""
+"""Knowledge indexer for PentestAgent."""
import json
from dataclasses import dataclass
diff --git a/ghostcrew/knowledge/rag.py b/pentestagent/knowledge/rag.py
similarity index 99%
rename from ghostcrew/knowledge/rag.py
rename to pentestagent/knowledge/rag.py
index 67ede07..9a113fa 100644
--- a/ghostcrew/knowledge/rag.py
+++ b/pentestagent/knowledge/rag.py
@@ -1,4 +1,4 @@
-"""RAG (Retrieval Augmented Generation) engine for GhostCrew."""
+"""RAG (Retrieval Augmented Generation) engine for PentestAgent."""
import json
from dataclasses import dataclass
diff --git a/ghostcrew/knowledge/sources/cves.json b/pentestagent/knowledge/sources/cves.json
similarity index 100%
rename from ghostcrew/knowledge/sources/cves.json
rename to pentestagent/knowledge/sources/cves.json
diff --git a/ghostcrew/knowledge/sources/methodologies.md b/pentestagent/knowledge/sources/methodologies.md
similarity index 100%
rename from ghostcrew/knowledge/sources/methodologies.md
rename to pentestagent/knowledge/sources/methodologies.md
diff --git a/ghostcrew/knowledge/sources/wordlists.txt b/pentestagent/knowledge/sources/wordlists.txt
similarity index 100%
rename from ghostcrew/knowledge/sources/wordlists.txt
rename to pentestagent/knowledge/sources/wordlists.txt
diff --git a/ghostcrew/llm/__init__.py b/pentestagent/llm/__init__.py
similarity index 88%
rename from ghostcrew/llm/__init__.py
rename to pentestagent/llm/__init__.py
index d65cc10..f436220 100644
--- a/ghostcrew/llm/__init__.py
+++ b/pentestagent/llm/__init__.py
@@ -1,4 +1,4 @@
-"""LLM integration for GhostCrew."""
+"""LLM integration for PentestAgent."""
from .config import ModelConfig
from .llm import LLM, LLMResponse
diff --git a/ghostcrew/llm/config.py b/pentestagent/llm/config.py
similarity index 97%
rename from ghostcrew/llm/config.py
rename to pentestagent/llm/config.py
index 64ba495..008bede 100644
--- a/ghostcrew/llm/config.py
+++ b/pentestagent/llm/config.py
@@ -1,4 +1,4 @@
-"""LLM configuration for GhostCrew."""
+"""LLM configuration for PentestAgent."""
from dataclasses import dataclass
diff --git a/ghostcrew/llm/llm.py b/pentestagent/llm/llm.py
similarity index 99%
rename from ghostcrew/llm/llm.py
rename to pentestagent/llm/llm.py
index be80b0a..52c0bed 100644
--- a/ghostcrew/llm/llm.py
+++ b/pentestagent/llm/llm.py
@@ -1,4 +1,4 @@
-"""LiteLLM wrapper for GhostCrew."""
+"""LiteLLM wrapper for PentestAgent."""
import asyncio
import random
diff --git a/ghostcrew/llm/memory.py b/pentestagent/llm/memory.py
similarity index 99%
rename from ghostcrew/llm/memory.py
rename to pentestagent/llm/memory.py
index bb71084..74d0b5f 100644
--- a/ghostcrew/llm/memory.py
+++ b/pentestagent/llm/memory.py
@@ -1,4 +1,4 @@
-"""Conversation memory management for GhostCrew."""
+"""Conversation memory management for PentestAgent."""
from typing import Awaitable, Callable, List, Optional
diff --git a/ghostcrew/llm/utils.py b/pentestagent/llm/utils.py
similarity index 98%
rename from ghostcrew/llm/utils.py
rename to pentestagent/llm/utils.py
index 49f8b3b..315f38e 100644
--- a/ghostcrew/llm/utils.py
+++ b/pentestagent/llm/utils.py
@@ -1,4 +1,4 @@
-"""LLM utility functions for GhostCrew."""
+"""LLM utility functions for PentestAgent."""
from typing import List, Optional
diff --git a/ghostcrew/mcp/__init__.py b/pentestagent/mcp/__init__.py
similarity index 85%
rename from ghostcrew/mcp/__init__.py
rename to pentestagent/mcp/__init__.py
index 502657b..0476c01 100644
--- a/ghostcrew/mcp/__init__.py
+++ b/pentestagent/mcp/__init__.py
@@ -1,4 +1,4 @@
-"""MCP (Model Context Protocol) integration for GhostCrew."""
+"""MCP (Model Context Protocol) integration for PentestAgent."""
from .discovery import MCPDiscovery
from .manager import MCPManager, MCPServer, MCPServerConfig
diff --git a/ghostcrew/mcp/discovery.py b/pentestagent/mcp/discovery.py
similarity index 99%
rename from ghostcrew/mcp/discovery.py
rename to pentestagent/mcp/discovery.py
index 42fc134..3ac4e91 100644
--- a/ghostcrew/mcp/discovery.py
+++ b/pentestagent/mcp/discovery.py
@@ -1,4 +1,4 @@
-"""MCP tool discovery for GhostCrew."""
+"""MCP tool discovery for PentestAgent."""
import json
from dataclasses import dataclass
diff --git a/ghostcrew/mcp/manager.py b/pentestagent/mcp/manager.py
similarity index 97%
rename from ghostcrew/mcp/manager.py
rename to pentestagent/mcp/manager.py
index 02c6f29..c7b96c1 100644
--- a/ghostcrew/mcp/manager.py
+++ b/pentestagent/mcp/manager.py
@@ -1,4 +1,4 @@
-"""MCP server connection manager for GhostCrew.
+"""MCP server connection manager for PentestAgent.
Uses standard MCP configuration format:
{
@@ -62,7 +62,7 @@ class MCPManager:
Path.cwd() / "mcp_servers.json",
Path.cwd() / "mcp.json",
Path(__file__).parent / "mcp_servers.json",
- Path.home() / ".ghostcrew" / "mcp_servers.json",
+ Path.home() / ".pentestagent" / "mcp_servers.json",
]
def __init__(self, config_path: Optional[Path] = None):
@@ -200,7 +200,7 @@ class MCPManager:
"params": {
"protocolVersion": "2024-11-05",
"capabilities": {},
- "clientInfo": {"name": "ghostcrew", "version": "0.2.0"},
+ "clientInfo": {"name": "pentestagent", "version": "0.2.0"},
},
"id": self._get_next_id(),
}
diff --git a/ghostcrew/mcp/mcp_servers.json b/pentestagent/mcp/mcp_servers.json
similarity index 100%
rename from ghostcrew/mcp/mcp_servers.json
rename to pentestagent/mcp/mcp_servers.json
diff --git a/ghostcrew/mcp/tools.py b/pentestagent/mcp/tools.py
similarity index 98%
rename from ghostcrew/mcp/tools.py
rename to pentestagent/mcp/tools.py
index 1f45dad..9834c88 100644
--- a/ghostcrew/mcp/tools.py
+++ b/pentestagent/mcp/tools.py
@@ -1,4 +1,4 @@
-"""MCP tool wrapper for GhostCrew."""
+"""MCP tool wrapper for PentestAgent."""
from typing import TYPE_CHECKING, Any
diff --git a/ghostcrew/mcp/transport.py b/pentestagent/mcp/transport.py
similarity index 99%
rename from ghostcrew/mcp/transport.py
rename to pentestagent/mcp/transport.py
index 933d920..33efd7c 100644
--- a/ghostcrew/mcp/transport.py
+++ b/pentestagent/mcp/transport.py
@@ -1,4 +1,4 @@
-"""MCP transport implementations for GhostCrew."""
+"""MCP transport implementations for PentestAgent."""
import asyncio
import json
diff --git a/ghostcrew/playbooks/__init__.py b/pentestagent/playbooks/__init__.py
similarity index 100%
rename from ghostcrew/playbooks/__init__.py
rename to pentestagent/playbooks/__init__.py
diff --git a/ghostcrew/playbooks/base_playbook.py b/pentestagent/playbooks/base_playbook.py
similarity index 100%
rename from ghostcrew/playbooks/base_playbook.py
rename to pentestagent/playbooks/base_playbook.py
diff --git a/ghostcrew/playbooks/thp3_network.py b/pentestagent/playbooks/thp3_network.py
similarity index 94%
rename from ghostcrew/playbooks/thp3_network.py
rename to pentestagent/playbooks/thp3_network.py
index a8f02fe..b4371f5 100644
--- a/ghostcrew/playbooks/thp3_network.py
+++ b/pentestagent/playbooks/thp3_network.py
@@ -1,4 +1,4 @@
-from ghostcrew.playbooks.base_playbook import BasePlaybook, Phase
+from pentestagent.playbooks.base_playbook import BasePlaybook, Phase
class THP3NetworkPlaybook(BasePlaybook):
diff --git a/ghostcrew/playbooks/thp3_recon.py b/pentestagent/playbooks/thp3_recon.py
similarity index 93%
rename from ghostcrew/playbooks/thp3_recon.py
rename to pentestagent/playbooks/thp3_recon.py
index 91c0d5d..3183b72 100644
--- a/ghostcrew/playbooks/thp3_recon.py
+++ b/pentestagent/playbooks/thp3_recon.py
@@ -1,4 +1,4 @@
-from ghostcrew.playbooks.base_playbook import BasePlaybook, Phase
+from pentestagent.playbooks.base_playbook import BasePlaybook, Phase
class THP3ReconPlaybook(BasePlaybook):
diff --git a/ghostcrew/playbooks/thp3_web.py b/pentestagent/playbooks/thp3_web.py
similarity index 93%
rename from ghostcrew/playbooks/thp3_web.py
rename to pentestagent/playbooks/thp3_web.py
index 7e09299..8f22311 100644
--- a/ghostcrew/playbooks/thp3_web.py
+++ b/pentestagent/playbooks/thp3_web.py
@@ -1,4 +1,4 @@
-from ghostcrew.playbooks.base_playbook import BasePlaybook, Phase
+from pentestagent.playbooks.base_playbook import BasePlaybook, Phase
class THP3WebPlaybook(BasePlaybook):
diff --git a/ghostcrew/runtime/__init__.py b/pentestagent/runtime/__init__.py
similarity index 86%
rename from ghostcrew/runtime/__init__.py
rename to pentestagent/runtime/__init__.py
index eee52fa..37948dc 100644
--- a/ghostcrew/runtime/__init__.py
+++ b/pentestagent/runtime/__init__.py
@@ -1,4 +1,4 @@
-"""Runtime environment for GhostCrew."""
+"""Runtime environment for PentestAgent."""
from .docker_runtime import DockerRuntime
from .runtime import CommandResult, EnvironmentInfo, LocalRuntime, Runtime
diff --git a/ghostcrew/runtime/docker_runtime.py b/pentestagent/runtime/docker_runtime.py
similarity index 97%
rename from ghostcrew/runtime/docker_runtime.py
rename to pentestagent/runtime/docker_runtime.py
index bd4580f..0afdeb5 100644
--- a/ghostcrew/runtime/docker_runtime.py
+++ b/pentestagent/runtime/docker_runtime.py
@@ -1,4 +1,4 @@
-"""Docker runtime for GhostCrew."""
+"""Docker runtime for PentestAgent."""
import asyncio
import io
@@ -17,8 +17,8 @@ if TYPE_CHECKING:
class DockerConfig:
"""Docker runtime configuration."""
- image: str = "ghostcrew-kali:latest" # Built from Dockerfile.kali
- container_name: str = "ghostcrew-sandbox"
+ image: str = "pentestagent-kali:latest" # Built from Dockerfile.kali
+ container_name: str = "pentestagent-sandbox"
network_mode: str = "bridge"
cap_add: list = None
volumes: dict = None
@@ -80,8 +80,8 @@ class DockerRuntime(Runtime):
except Exception:
# Create new container
volumes = {
- str(Path.home() / ".ghostcrew"): {
- "bind": "/root/.ghostcrew",
+ str(Path.home() / ".pentestagent"): {
+ "bind": "/root/.pentestagent",
"mode": "rw",
},
**self.config.volumes,
diff --git a/ghostcrew/runtime/runtime.py b/pentestagent/runtime/runtime.py
similarity index 99%
rename from ghostcrew/runtime/runtime.py
rename to pentestagent/runtime/runtime.py
index 677ef74..bb9dd52 100644
--- a/ghostcrew/runtime/runtime.py
+++ b/pentestagent/runtime/runtime.py
@@ -1,4 +1,4 @@
-"""Runtime abstraction for GhostCrew."""
+"""Runtime abstraction for PentestAgent."""
import platform
import shutil
diff --git a/ghostcrew/runtime/tool_server.py b/pentestagent/runtime/tool_server.py
similarity index 100%
rename from ghostcrew/runtime/tool_server.py
rename to pentestagent/runtime/tool_server.py
diff --git a/ghostcrew/tools/__init__.py b/pentestagent/tools/__init__.py
similarity index 95%
rename from ghostcrew/tools/__init__.py
rename to pentestagent/tools/__init__.py
index 3528fcc..5f6342a 100644
--- a/ghostcrew/tools/__init__.py
+++ b/pentestagent/tools/__init__.py
@@ -1,4 +1,4 @@
-"""Tool system for GhostCrew."""
+"""Tool system for PentestAgent."""
from .executor import ToolExecutor
from .loader import discover_tools, get_tool_info, load_all_tools, reload_tools
diff --git a/ghostcrew/tools/browser/__init__.py b/pentestagent/tools/browser/__init__.py
similarity index 99%
rename from ghostcrew/tools/browser/__init__.py
rename to pentestagent/tools/browser/__init__.py
index fec60a9..cd55f79 100644
--- a/ghostcrew/tools/browser/__init__.py
+++ b/pentestagent/tools/browser/__init__.py
@@ -1,4 +1,4 @@
-"""Browser automation tool for GhostCrew."""
+"""Browser automation tool for PentestAgent."""
from typing import TYPE_CHECKING
diff --git a/ghostcrew/tools/browser/browser.py b/pentestagent/tools/browser/browser.py
similarity index 100%
rename from ghostcrew/tools/browser/browser.py
rename to pentestagent/tools/browser/browser.py
diff --git a/ghostcrew/tools/executor.py b/pentestagent/tools/executor.py
similarity index 99%
rename from ghostcrew/tools/executor.py
rename to pentestagent/tools/executor.py
index af8f837..da0dc20 100644
--- a/ghostcrew/tools/executor.py
+++ b/pentestagent/tools/executor.py
@@ -1,4 +1,4 @@
-"""Tool executor for GhostCrew."""
+"""Tool executor for PentestAgent."""
import asyncio
from dataclasses import dataclass
diff --git a/ghostcrew/tools/finish/__init__.py b/pentestagent/tools/finish/__init__.py
similarity index 99%
rename from ghostcrew/tools/finish/__init__.py
rename to pentestagent/tools/finish/__init__.py
index f768c4b..5a71625 100644
--- a/ghostcrew/tools/finish/__init__.py
+++ b/pentestagent/tools/finish/__init__.py
@@ -1,4 +1,4 @@
-"""Task completion tool for GhostCrew agent loop control."""
+"""Task completion tool for PentestAgent agent loop control."""
import json
from dataclasses import dataclass, field
diff --git a/ghostcrew/tools/loader.py b/pentestagent/tools/loader.py
similarity index 91%
rename from ghostcrew/tools/loader.py
rename to pentestagent/tools/loader.py
index 7891855..51dbf83 100644
--- a/ghostcrew/tools/loader.py
+++ b/pentestagent/tools/loader.py
@@ -1,4 +1,4 @@
-"""Dynamic tool loader for GhostCrew."""
+"""Dynamic tool loader for PentestAgent."""
import importlib
import sys
@@ -55,7 +55,7 @@ def load_tool_module(module_name: str, tools_dir: Optional[Path] = None) -> bool
try:
# Build the full module path
- full_module_name = f"ghostcrew.tools.{module_name}"
+ full_module_name = f"pentestagent.tools.{module_name}"
# Check if already loaded
if full_module_name in sys.modules:
@@ -126,13 +126,13 @@ def reload_tools():
to_remove = [
name
for name in sys.modules
- if name.startswith("ghostcrew.tools.")
+ if name.startswith("pentestagent.tools.")
and name
not in (
- "ghostcrew.tools",
- "ghostcrew.tools.registry",
- "ghostcrew.tools.executor",
- "ghostcrew.tools.loader",
+ "pentestagent.tools",
+ "pentestagent.tools.registry",
+ "pentestagent.tools.executor",
+ "pentestagent.tools.loader",
)
]
diff --git a/ghostcrew/tools/notes/__init__.py b/pentestagent/tools/notes/__init__.py
similarity index 99%
rename from ghostcrew/tools/notes/__init__.py
rename to pentestagent/tools/notes/__init__.py
index 7d03e43..ab7189d 100644
--- a/ghostcrew/tools/notes/__init__.py
+++ b/pentestagent/tools/notes/__init__.py
@@ -1,4 +1,4 @@
-"""Notes tool for GhostCrew - persistent key findings storage."""
+"""Notes tool for PentestAgent - persistent key findings storage."""
import asyncio
import json
diff --git a/ghostcrew/tools/registry.py b/pentestagent/tools/registry.py
similarity index 99%
rename from ghostcrew/tools/registry.py
rename to pentestagent/tools/registry.py
index 4fc7ad8..f1e3888 100644
--- a/ghostcrew/tools/registry.py
+++ b/pentestagent/tools/registry.py
@@ -1,4 +1,4 @@
-"""Tool registry for GhostCrew."""
+"""Tool registry for PentestAgent."""
from dataclasses import dataclass, field
from functools import wraps
diff --git a/ghostcrew/tools/terminal/__init__.py b/pentestagent/tools/terminal/__init__.py
similarity index 98%
rename from ghostcrew/tools/terminal/__init__.py
rename to pentestagent/tools/terminal/__init__.py
index 599aafb..4a07c9e 100644
--- a/ghostcrew/tools/terminal/__init__.py
+++ b/pentestagent/tools/terminal/__init__.py
@@ -1,4 +1,4 @@
-"""Terminal tool for GhostCrew."""
+"""Terminal tool for PentestAgent."""
from typing import TYPE_CHECKING
diff --git a/ghostcrew/tools/terminal/terminal.py b/pentestagent/tools/terminal/terminal.py
similarity index 100%
rename from ghostcrew/tools/terminal/terminal.py
rename to pentestagent/tools/terminal/terminal.py
diff --git a/ghostcrew/tools/web_search/__init__.py b/pentestagent/tools/web_search/__init__.py
similarity index 98%
rename from ghostcrew/tools/web_search/__init__.py
rename to pentestagent/tools/web_search/__init__.py
index e8788c4..be73dcb 100644
--- a/ghostcrew/tools/web_search/__init__.py
+++ b/pentestagent/tools/web_search/__init__.py
@@ -1,4 +1,4 @@
-"""Web search tool for GhostCrew."""
+"""Web search tool for PentestAgent."""
import os
from typing import TYPE_CHECKING
diff --git a/pyproject.toml b/pyproject.toml
index cc8630b..c3b1cee 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,5 +1,5 @@
[project]
-name = "ghostcrew"
+name = "pentestagent"
version = "0.2.0"
description = "AI penetration testing"
readme = "README.md"
@@ -68,25 +68,25 @@ rag = [
"faiss-cpu>=1.8.0",
]
all = [
- "ghostcrew[dev,rag]",
+ "pentestagent[dev,rag]",
]
[project.urls]
-Homepage = "https://github.com/GH05TCREW/ghostcrew"
+Homepage = "https://github.com/GH05TCREW/pentestagent"
[project.scripts]
-ghostcrew = "ghostcrew.interface.main:main"
+pentestagent = "pentestagent.interface.main:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
-packages = ["ghostcrew"]
+packages = ["pentestagent"]
[tool.hatch.build.targets.sdist]
include = [
- "ghostcrew/**",
+ "pentestagent/**",
"*.md",
"*.txt"
]
@@ -106,7 +106,7 @@ include = '\.pyi?$'
[tool.isort]
profile = "black"
line_length = 88
-known_first_party = ["ghostcrew"]
+known_first_party = ["pentestagent"]
[tool.ruff]
line-length = 88
diff --git a/requirements.txt b/requirements.txt
index 7c1a6fb..42d1309 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-# GhostCrew Dependencies
+# PentestAgent Dependencies
# Core LLM
litellm>=1.40.0
diff --git a/scripts/run.sh b/scripts/run.sh
index a4d758f..6167df4 100644
--- a/scripts/run.sh
+++ b/scripts/run.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# GhostCrew Run Script
+# PentestAgent Run Script
set -e
@@ -28,7 +28,7 @@ while [[ $# -gt 0 ]]; do
shift 2
;;
--help)
- echo "GhostCrew - AI Penetration Testing"
+ echo "PentestAgent - AI Penetration Testing"
echo ""
echo "Usage: run.sh [options]"
echo ""
@@ -46,7 +46,7 @@ while [[ $# -gt 0 ]]; do
done
# Build command
-CMD="python -m ghostcrew"
+CMD="python -m pentestagent"
if [ "$MODE" = "tui" ]; then
CMD="$CMD --tui"
@@ -56,6 +56,6 @@ if [ -n "$TARGET" ]; then
CMD="$CMD --target $TARGET"
fi
-# Run GhostCrew
-echo "Starting GhostCrew..."
+# Run PentestAgent
+echo "Starting PentestAgent..."
$CMD
diff --git a/scripts/setup.ps1 b/scripts/setup.ps1
index 98f52c5..6e1cf30 100644
--- a/scripts/setup.ps1
+++ b/scripts/setup.ps1
@@ -1,7 +1,7 @@
-# GhostCrew PowerShell Setup Script
+# PentestAgent PowerShell Setup Script
Write-Host "=================================================================="
-Write-Host " GHOSTCREW"
+Write-Host " PENTESTAGENT"
Write-Host " AI Penetration Testing"
Write-Host "=================================================================="
Write-Host ""
@@ -57,7 +57,7 @@ Write-Host "[OK] Playwright browsers installed"
if (-not (Test-Path ".env")) {
Write-Host "Creating .env file..."
@"
-# GhostCrew Configuration
+# PentestAgent Configuration
# Add your API keys here
# OpenAI API Key (required for GPT models)
@@ -67,13 +67,13 @@ OPENAI_API_KEY=
ANTHROPIC_API_KEY=
# Model Configuration
-GHOSTCREW_MODEL=gpt-5
+PENTESTAGENT_MODEL=gpt-5
# Debug Mode
-GHOSTCREW_DEBUG=false
+PENTESTAGENT_DEBUG=false
# Max Iterations
-GHOSTCREW_MAX_ITERATIONS=50
+PENTESTAGENT_MAX_ITERATIONS=50
"@ | Set-Content -Path ".env" -Encoding UTF8
Write-Host "[OK] .env file created"
Write-Host "[!] Please edit .env and add your API keys"
@@ -89,5 +89,5 @@ Write-Host ""
Write-Host "To get started:"
Write-Host " 1. Edit .env and add your API keys"
Write-Host " 2. Activate: .\venv\Scripts\Activate.ps1"
-Write-Host " 3. Run: ghostcrew or python -m ghostcrew"
+Write-Host " 3. Run: pentestagent or python -m pentestagent"
Write-Host ""
diff --git a/scripts/setup.sh b/scripts/setup.sh
index 0ae4f38..bac4a3f 100644
--- a/scripts/setup.sh
+++ b/scripts/setup.sh
@@ -1,10 +1,10 @@
#!/bin/bash
-# GhostCrew Setup Script
+# PentestAgent Setup Script
set -e
echo "=================================================================="
-echo " GHOSTCREW"
+echo " PENTESTAGENT"
echo " AI Penetration Testing"
echo "=================================================================="
echo ""
@@ -51,7 +51,7 @@ echo "[OK] Playwright browsers installed"
if [ ! -f ".env" ]; then
echo "Creating .env file..."
cat > .env << EOF
-# GhostCrew Configuration
+# PentestAgent Configuration
# Add your API keys here
# OpenAI API Key (required for GPT models)
@@ -61,13 +61,13 @@ OPENAI_API_KEY=
ANTHROPIC_API_KEY=
# Model Configuration
-GHOSTCREW_MODEL=gpt-5
+PENTESTAGENT_MODEL=gpt-5
# Debug Mode
-GHOSTCREW_DEBUG=false
+PENTESTAGENT_DEBUG=false
# Max Iterations
-GHOSTCREW_MAX_ITERATIONS=50
+PENTESTAGENT_MAX_ITERATIONS=50
EOF
echo "[OK] .env file created"
echo "[!] Please edit .env and add your API keys"
@@ -84,10 +84,10 @@ echo ""
echo "To get started:"
echo " 1. Edit .env and add your API keys"
echo " 2. Activate the virtual environment: source venv/bin/activate"
-echo " 3. Run GhostCrew: ghostcrew or python -m ghostcrew"
+echo " 3. Run PentestAgent: pentestagent or python -m pentestagent"
echo ""
echo "For Docker usage:"
-echo " docker-compose up ghostcrew"
-echo " docker-compose --profile kali up ghostcrew-kali"
+echo " docker-compose up pentestagent"
+echo " docker-compose --profile kali up pentestagent-kali"
echo ""
echo "=================================================================="
diff --git a/tests/__init__.py b/tests/__init__.py
index ea79261..854810c 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -1 +1 @@
-# GhostCrew Tests
+# PentestAgent Tests
diff --git a/tests/conftest.py b/tests/conftest.py
index a430fce..3703ae7 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,4 +1,4 @@
-"""Test fixtures for GhostCrew tests."""
+"""Test fixtures for PentestAgent tests."""
import pytest
import asyncio
@@ -6,9 +6,9 @@ from pathlib import Path
from typing import Generator, AsyncGenerator
from unittest.mock import MagicMock, AsyncMock
-from ghostcrew.config import Settings
-from ghostcrew.agents.state import AgentState, AgentStateManager
-from ghostcrew.tools import get_all_tools, Tool, ToolSchema
+from pentestagent.config import Settings
+from pentestagent.agents.state import AgentState, AgentStateManager
+from pentestagent.tools import get_all_tools, Tool, ToolSchema
@pytest.fixture
diff --git a/tests/test_agents.py b/tests/test_agents.py
index 77dbda7..b08ab0d 100644
--- a/tests/test_agents.py
+++ b/tests/test_agents.py
@@ -3,7 +3,7 @@
import pytest
from datetime import datetime
-from ghostcrew.agents.state import AgentState, AgentStateManager, StateTransition
+from pentestagent.agents.state import AgentState, AgentStateManager, StateTransition
class TestAgentState:
diff --git a/tests/test_graph.py b/tests/test_graph.py
index fbc7496..49bfdcb 100644
--- a/tests/test_graph.py
+++ b/tests/test_graph.py
@@ -2,7 +2,7 @@
import pytest
import networkx as nx
-from ghostcrew.knowledge.graph import ShadowGraph, GraphNode, GraphEdge
+from pentestagent.knowledge.graph import ShadowGraph, GraphNode, GraphEdge
class TestShadowGraph:
"""Tests for ShadowGraph class."""
diff --git a/tests/test_knowledge.py b/tests/test_knowledge.py
index c21dbaf..35730cb 100644
--- a/tests/test_knowledge.py
+++ b/tests/test_knowledge.py
@@ -5,7 +5,7 @@ import numpy as np
from pathlib import Path
from unittest.mock import patch
-from ghostcrew.knowledge.rag import RAGEngine, Document
+from pentestagent.knowledge.rag import RAGEngine, Document
class TestDocument:
diff --git a/tests/test_notes.py b/tests/test_notes.py
index bfd1556..e3f0bff 100644
--- a/tests/test_notes.py
+++ b/tests/test_notes.py
@@ -6,7 +6,7 @@ import asyncio
from pathlib import Path
from unittest.mock import MagicMock, patch
-from ghostcrew.tools.notes import notes, set_notes_file, get_all_notes, _notes
+from pentestagent.tools.notes import notes, set_notes_file, get_all_notes, _notes
# We need to reset the global state for tests
@pytest.fixture(autouse=True)
@@ -18,7 +18,7 @@ def reset_notes_state(tmp_path):
# Clear the global dictionary (it's imported from the module)
# We need to clear the actual dictionary object in the module
- from ghostcrew.tools.notes import _notes
+ from pentestagent.tools.notes import _notes
_notes.clear()
yield
@@ -148,7 +148,7 @@ async def test_legacy_migration(tmp_path):
set_notes_file(legacy_file)
# Trigger load (get_all_notes calls _load_notes_unlocked if empty, but we need to clear first)
- from ghostcrew.tools.notes import _notes
+ from pentestagent.tools.notes import _notes
_notes.clear()
all_notes = await get_all_notes()
diff --git a/tests/test_tools.py b/tests/test_tools.py
index 9e68b36..5aeaa9a 100644
--- a/tests/test_tools.py
+++ b/tests/test_tools.py
@@ -2,7 +2,7 @@
import pytest
-from ghostcrew.tools import (
+from pentestagent.tools import (
Tool, ToolSchema, register_tool, get_all_tools, get_tool,
enable_tool, disable_tool, get_tool_names
)