refactor: update agent name

This commit is contained in:
GH05TCREW
2025-12-19 10:25:58 -07:00
parent fe51aeed6f
commit c9866a407b
91 changed files with 3120 additions and 500 deletions

View File

@@ -1,4 +1,4 @@
# GhostCrew Configuration
# PentestAgent Configuration
# API Keys (set at least one for chat model)
OPENAI_API_KEY=
@@ -8,11 +8,11 @@ TAVILY_API_KEY=
# Chat Model (any LiteLLM-supported model)
# OpenAI: gpt-5, gpt-4.1, gpt-4.1-mini
# Anthropic: claude-sonnet-4-20250514, claude-opus-4-20250514
GHOSTCREW_MODEL=gpt-5
PENTESTAGENT_MODEL=gpt-5
# Embeddings (for RAG knowledge base)
# Options: openai, local (default: openai if OPENAI_API_KEY set, else local)
# GHOSTCREW_EMBEDDINGS=local
# PENTESTAGENT_EMBEDDINGS=local
# Settings
GHOSTCREW_DEBUG=false
PENTESTAGENT_DEBUG=false

View File

@@ -1,9 +1,9 @@
# GhostCrew - AI Penetration Testing Agent
# PentestAgent - AI Penetration Testing Agent
# Base image with common tools
FROM python:3.11-slim
LABEL maintainer="GhostCrew"
LABEL maintainer="PentestAgent"
LABEL description="AI penetration testing"
# Set environment variables
@@ -50,14 +50,14 @@ RUN pip install --no-cache-dir --upgrade pip && \
COPY . .
# Create non-root user for security
RUN useradd -m -s /bin/bash ghostcrew && \
chown -R ghostcrew:ghostcrew /app
RUN useradd -m -s /bin/bash pentestagent && \
chown -R pentestagent:pentestagent /app
# Switch to non-root user (can switch back for privileged operations)
USER ghostcrew
USER pentestagent
# Expose any needed ports
EXPOSE 8080
# Default command
CMD ["python", "-m", "ghostcrew"]
CMD ["python", "-m", "pentestagent"]

View File

@@ -1,10 +1,10 @@
# GhostCrew Kali Linux Image
# PentestAgent Kali Linux Image
# Full penetration testing environment
FROM kalilinux/kali-rolling
LABEL maintainer="Masic"
LABEL description="GhostCrew with Kali Linux tools"
LABEL description="PentestAgent with Kali Linux tools"
# Set environment variables
ENV DEBIAN_FRONTEND=noninteractive
@@ -82,4 +82,4 @@ COPY docker-entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]
CMD ["python3", "-m", "ghostcrew"]
CMD ["python3", "-m", "pentestagent"]

View File

@@ -1,13 +1,11 @@
<div align="center">
<img src="assets/ghostcrew-logo.png" alt="GhostCrew Logo" width="220" style="margin-bottom: 20px;"/>
<img src="assets/pentestagent-logo.png" alt="PentestAgent Logo" width="220" style="margin-bottom: 20px;"/>
# GHOSTCREW
### AI Penetration Testing Agents
# PentestAgent
### AI Penetration Testing
[![Python](https://img.shields.io/badge/Python-3.10%2B-blue.svg)](https://www.python.org/) [![License](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE.txt) [![Version](https://img.shields.io/badge/Version-0.2.0-orange.svg)](https://github.com/GH05TCREW/ghostcrew/releases) [![Security](https://img.shields.io/badge/Security-Penetration%20Testing-red.svg)](https://github.com/GH05TCREW/ghostcrew) [![MCP](https://img.shields.io/badge/MCP-Compatible-purple.svg)](https://github.com/GH05TCREW/ghostcrew)
[🇺🇸 English](README.md) | [🇨🇳 中文文档](README_zh.md)
[![Python](https://img.shields.io/badge/Python-3.10%2B-blue.svg)](https://www.python.org/) [![License](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE.txt) [![Version](https://img.shields.io/badge/Version-0.2.0-orange.svg)](https://github.com/GH05TCREW/pentestagent/releases) [![Security](https://img.shields.io/badge/Security-Penetration%20Testing-red.svg)](https://github.com/GH05TCREW/pentestagent) [![MCP](https://img.shields.io/badge/MCP-Compatible-purple.svg)](https://github.com/GH05TCREW/pentestagent)
</div>
@@ -22,8 +20,8 @@ https://github.com/user-attachments/assets/a67db2b5-672a-43df-b709-149c8eaee975
```bash
# Clone
git clone https://github.com/GH05TCREW/ghostcrew.git
cd ghostcrew
git clone https://github.com/GH05TCREW/pentestagent.git
cd pentestagent
# Setup (creates venv, installs deps)
.\scripts\setup.ps1 # Windows
@@ -43,14 +41,14 @@ Create `.env` in the project root:
```
ANTHROPIC_API_KEY=sk-ant-...
GHOSTCREW_MODEL=claude-sonnet-4-20250514
PENTESTAGENT_MODEL=claude-sonnet-4-20250514
```
Or for OpenAI:
```
OPENAI_API_KEY=sk-...
GHOSTCREW_MODEL=gpt-5
PENTESTAGENT_MODEL=gpt-5
```
Any [LiteLLM-supported model](https://docs.litellm.ai/docs/providers) works.
@@ -58,9 +56,9 @@ Any [LiteLLM-supported model](https://docs.litellm.ai/docs/providers) works.
## Run
```bash
ghostcrew # Launch TUI
ghostcrew -t 192.168.1.1 # Launch with target
ghostcrew --docker # Run tools in Docker container
pentestagent # Launch TUI
pentestagent -t 192.168.1.1 # Launch with target
pentestagent --docker # Run tools in Docker container
```
## Docker
@@ -73,13 +71,13 @@ Run tools inside a Docker container for isolation and pre-installed pentesting t
# Base image with nmap, netcat, curl
docker run -it --rm \
-e ANTHROPIC_API_KEY=your-key \
-e GHOSTCREW_MODEL=claude-sonnet-4-20250514 \
ghcr.io/gh05tcrew/ghostcrew:latest
-e PENTESTAGENT_MODEL=claude-sonnet-4-20250514 \
ghcr.io/gh05tcrew/pentestagent:latest
# Kali image with metasploit, sqlmap, hydra, etc.
docker run -it --rm \
-e ANTHROPIC_API_KEY=your-key \
ghcr.io/gh05tcrew/ghostcrew:kali
ghcr.io/gh05tcrew/pentestagent:kali
```
### Option 2: Build locally
@@ -89,20 +87,20 @@ docker run -it --rm \
docker compose build
# Run
docker compose run --rm ghostcrew
docker compose run --rm pentestagent
# Or with Kali
docker compose --profile kali build
docker compose --profile kali run --rm ghostcrew-kali
docker compose --profile kali run --rm pentestagent-kali
```
The container runs GhostCrew with access to Linux pentesting tools. The agent can use `nmap`, `msfconsole`, `sqlmap`, etc. directly via the terminal tool.
The container runs PentestAgent with access to Linux pentesting tools. The agent can use `nmap`, `msfconsole`, `sqlmap`, etc. directly via the terminal tool.
Requires Docker to be installed and running.
## Modes
GhostCrew has three modes, accessible via commands in the TUI:
PentestAgent has three modes, accessible via commands in the TUI:
| Mode | Command | Description |
|------|---------|-------------|
@@ -130,25 +128,25 @@ Press `Esc` to stop a running agent. `Ctrl+Q` to quit.
## Playbooks
GhostCrew includes prebuilt **attack playbooks** for black-box security testing. Playbooks define a structured approach to specific security assessments.
PentestAgent includes prebuilt **attack playbooks** for black-box security testing. Playbooks define a structured approach to specific security assessments.
**Run a playbook:**
```bash
ghostcrew run -t example.com --playbook thp3_web
pentestagent run -t example.com --playbook thp3_web
```
![Playbook Demo](assets/playbook.gif)
## Tools
GhostCrew includes built-in tools and supports MCP (Model Context Protocol) for extensibility.
PentestAgent includes built-in tools and supports MCP (Model Context Protocol) for extensibility.
**Built-in tools:** `terminal`, `browser`, `notes`, `web_search` (requires `TAVILY_API_KEY`)
### MCP Integration
Add external tools via MCP servers in `ghostcrew/mcp/mcp_servers.json`:
Add external tools via MCP servers in `pentestagent/mcp/mcp_servers.json`:
```json
{
@@ -167,23 +165,23 @@ Add external tools via MCP servers in `ghostcrew/mcp/mcp_servers.json`:
### CLI Tool Management
```bash
ghostcrew tools list # List all tools
ghostcrew tools info <name> # Show tool details
ghostcrew mcp list # List MCP servers
ghostcrew mcp add <name> <command> [args...] # Add MCP server
ghostcrew mcp test <name> # Test MCP connection
pentestagent tools list # List all tools
pentestagent tools info <name> # Show tool details
pentestagent mcp list # List MCP servers
pentestagent mcp add <name> <command> [args...] # Add MCP server
pentestagent mcp test <name> # Test MCP connection
```
## Knowledge
- **RAG:** Place methodologies, CVEs, or wordlists in `ghostcrew/knowledge/sources/` for automatic context injection.
- **RAG:** Place methodologies, CVEs, or wordlists in `pentestagent/knowledge/sources/` for automatic context injection.
- **Notes:** Agents save findings to `loot/notes.json` with categories (`credential`, `vulnerability`, `finding`, `artifact`). Notes persist across sessions and are injected into agent context.
- **Shadow Graph:** In Crew mode, the orchestrator builds a knowledge graph from notes to derive strategic insights (e.g., "We have credentials for host X").
## Project Structure
```
ghostcrew/
pentestagent/
agents/ # Agent implementations
config/ # Settings and constants
interface/ # TUI and CLI
@@ -199,10 +197,10 @@ ghostcrew/
```bash
pip install -e ".[dev]"
pytest # Run tests
pytest --cov=ghostcrew # With coverage
black ghostcrew # Format
ruff check ghostcrew # Lint
pytest # Run tests
pytest --cov=pentestagent # With coverage
black pentestagent # Format
ruff check pentestagent # Lint
```
## Legal

View File

@@ -1,202 +0,0 @@
<div align="center">
<img src="assets/ghostcrew-logo.png" alt="GhostCrew Logo" width="220" style="margin-bottom: 20px;"/>
# GHOSTCREW
### AI 渗透测试智能体
[![Python](https://img.shields.io/badge/Python-3.10%2B-blue.svg)](https://www.python.org/) [![License](https://img.shields.io/badge/License-MIT-green.svg)](LICENSE.txt) [![版本](https://img.shields.io/badge/版本-0.2.0-orange.svg)](https://github.com/GH05TCREW/ghostcrew/releases) [![Security](https://img.shields.io/badge/Security-渗透测试-red.svg)](https://github.com/GH05TCREW/ghostcrew) [![MCP](https://img.shields.io/badge/MCP-Compatible-purple.svg)](https://github.com/GH05TCREW/ghostcrew)
[🇺🇸 English](README.md) | [🇨🇳 中文文档](README_zh.md)
</div>
https://github.com/user-attachments/assets/a67db2b5-672a-43df-b709-149c8eaee975
## 要求
- Python 3.10+
- OpenAI, Anthropic 或其他支持 LiteLLM 的提供商的 API 密钥
## 安装
```bash
# 克隆仓库
git clone https://github.com/GH05TCREW/ghostcrew.git
cd ghostcrew
# 设置 (创建虚拟环境, 安装依赖)
.\scripts\setup.ps1 # Windows
./scripts/setup.sh # Linux/macOS
# 或者手动安装
python -m venv venv
.\venv\Scripts\Activate.ps1 # Windows
source venv/bin/activate # Linux/macOS
pip install -e ".[all]"
playwright install chromium # 浏览器工具需要
```
## 配置
在项目根目录创建 `.env` 文件:
```
ANTHROPIC_API_KEY=sk-ant-...
GHOSTCREW_MODEL=claude-sonnet-4-20250514
```
或者使用 OpenAI:
```
OPENAI_API_KEY=sk-...
GHOSTCREW_MODEL=gpt-5
```
任何 [LiteLLM 支持的模型](https://docs.litellm.ai/docs/providers) 都可以使用。
## 运行
```bash
ghostcrew # 启动 TUI (终端用户界面)
ghostcrew -t 192.168.1.1 # 启动并指定目标
ghostcrew --docker # 在 Docker 容器中运行工具
```
## Docker
在 Docker 容器中运行工具,以实现隔离并使用预安装的渗透测试工具。
### 选项 1: 拉取预构建镜像 (最快)
```bash
# 基础镜像 (包含 nmap, netcat, curl)
docker run -it --rm \
-e ANTHROPIC_API_KEY=your-key \
-e GHOSTCREW_MODEL=claude-sonnet-4-20250514 \
ghcr.io/gh05tcrew/ghostcrew:latest
# Kali 镜像 (包含 metasploit, sqlmap, hydra 等)
docker run -it --rm \
-e ANTHROPIC_API_KEY=your-key \
ghcr.io/gh05tcrew/ghostcrew:kali
```
### 选项 2: 本地构建
```bash
# 构建
docker compose build
# 运行
docker compose run --rm ghostcrew
# 或者使用 Kali
docker compose --profile kali build
docker compose --profile kali run --rm ghostcrew-kali
```
容器运行 GhostCrew 并可以访问 Linux 渗透测试工具。代理可以通过终端工具直接使用 `nmap`, `msfconsole`, `sqlmap` 等。
需要安装并运行 Docker。
## 模式
GhostCrew 有三种模式,可通过 TUI 中的命令访问:
| 模式 | 命令 | 描述 |
|------|---------|-------------|
| 辅助 (Assist) | (默认) | 与代理聊天。你控制流程。 |
| 代理 (Agent) | `/agent <任务>` | 自主执行单个任务。 |
| 团队 (Crew) | `/crew <任务>` | 多代理模式。协调器生成专门的工作者。 |
### TUI 命令
```
/agent <task> 运行自主代理执行任务
/crew <task> 运行多代理团队执行任务
/target <host> 设置目标
/tools 列出可用工具
/notes 显示保存的笔记
/report 从会话生成报告
/memory 显示令牌/内存使用情况
/prompt 显示系统提示词
/clear 清除聊天和历史记录
/quit 退出 (也可以用 /exit, /q)
/help 显示帮助 (也可以用 /h, /?)
```
`Esc` 停止正在运行的代理。按 `Ctrl+Q` 退出。
## 工具
GhostCrew 包含内置工具,并支持 MCP (Model Context Protocol) 进行扩展。
**内置工具:** `terminal` (终端), `browser` (浏览器), `notes` (笔记), `web_search` (网络搜索, 需要 `TAVILY_API_KEY`)
### MCP 集成
通过 `ghostcrew/mcp/mcp_servers.json` 添加外部工具 (MCP 服务器):
```json
{
"mcpServers": {
"nmap": {
"command": "npx",
"args": ["-y", "gc-nmap-mcp"],
"env": {
"NMAP_PATH": "/usr/bin/nmap"
}
}
}
}
```
### CLI 工具管理
```bash
ghostcrew tools list # 列出所有工具
ghostcrew tools info <name> # 显示工具详情
ghostcrew mcp list # 列出 MCP 服务器
ghostcrew mcp add <name> <command> [args...] # 添加 MCP 服务器
ghostcrew mcp test <name> # 测试 MCP 连接
```
## 知识库
- **RAG (检索增强生成):** 将方法论、CVE 或字典放在 `ghostcrew/knowledge/sources/` 中,以便自动注入上下文。
- **笔记:** 代理将发现保存到 `loot/notes.json`,分类为 (`credential` 凭据, `vulnerability` 漏洞, `finding` 发现, `artifact` 工件)。笔记在会话之间持久保存,并注入到代理上下文中。
- **影子图 (Shadow Graph):** 在团队模式下,协调器从笔记构建知识图谱,以得出战略见解 (例如,“我们拥有主机 X 的凭据”)。
## 项目结构
```
ghostcrew/
agents/ # 代理实现
config/ # 设置和常量
interface/ # TUI 和 CLI
knowledge/ # RAG 系统和影子图
llm/ # LiteLLM 包装器
mcp/ # MCP 客户端和服务器配置
playbooks/ # 攻击剧本
runtime/ # 执行环境
tools/ # 内置工具
```
## 开发
```bash
pip install -e ".[dev]"
pytest # 运行测试
pytest --cov=ghostcrew # 带覆盖率运行
black ghostcrew # 格式化代码
ruff check ghostcrew # 代码检查
```
## 法律声明
仅用于您有明确授权进行测试的系统。未经授权的访问是非法的。
## 许可证
MIT

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 569 KiB

View File

@@ -1,26 +1,26 @@
services:
ghostcrew:
pentestagent:
build:
context: .
dockerfile: Dockerfile
container_name: ghostcrew
container_name: pentestagent
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- GHOSTCREW_MODEL=${GHOSTCREW_MODEL}
- GHOSTCREW_DEBUG=${GHOSTCREW_DEBUG:-false}
- PENTESTAGENT_MODEL=${PENTESTAGENT_MODEL}
- PENTESTAGENT_DEBUG=${PENTESTAGENT_DEBUG:-false}
volumes:
- ./loot:/app/loot
networks:
- ghostcrew-net
- pentestagent-net
stdin_open: true
tty: true
ghostcrew-kali:
pentestagent-kali:
build:
context: .
dockerfile: Dockerfile.kali
container_name: ghostcrew-kali
container_name: pentestagent-kali
privileged: true # Required for VPN and some tools
cap_add:
- NET_ADMIN
@@ -28,18 +28,18 @@ services:
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- GHOSTCREW_MODEL=${GHOSTCREW_MODEL}
- PENTESTAGENT_MODEL=${PENTESTAGENT_MODEL}
- ENABLE_TOR=${ENABLE_TOR:-false}
- INIT_METASPLOIT=${INIT_METASPLOIT:-false}
volumes:
- ./loot:/app/loot
networks:
- ghostcrew-net
- pentestagent-net
stdin_open: true
tty: true
profiles:
- kali
networks:
ghostcrew-net:
pentestagent-net:
driver: bridge

View File

@@ -1,5 +1,5 @@
#!/bin/bash
# GhostCrew Docker Entrypoint
# PentestAgent Docker Entrypoint
set -e
@@ -9,7 +9,7 @@ GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
echo -e "${GREEN}🔧 GhostCrew Container Starting...${NC}"
echo -e "${GREEN}🔧 PentestAgent Container Starting...${NC}"
# Start VPN if config provided
if [ -f "/vpn/config.ovpn" ]; then
@@ -41,10 +41,10 @@ fi
# Create output directory with timestamp
OUTPUT_DIR="/output/$(date +%Y%m%d_%H%M%S)"
mkdir -p "$OUTPUT_DIR"
export GHOSTCREW_OUTPUT_DIR="$OUTPUT_DIR"
export PENTESTAGENT_OUTPUT_DIR="$OUTPUT_DIR"
echo -e "${GREEN}📁 Output directory: $OUTPUT_DIR${NC}"
echo -e "${GREEN}🚀 Starting GhostCrew...${NC}"
echo -e "${GREEN}🚀 Starting PentestAgent...${NC}"
# Execute the main command
exec "$@"

View File

@@ -1,4 +0,0 @@
"""GhostCrew - AI penetration testing."""
__version__ = "0.2.0"
__author__ = "Masic"

View File

@@ -1,6 +0,0 @@
"""GhostCrew entry point for `python -m ghostcrew`."""
from ghostcrew.interface.main import main
if __name__ == "__main__":
main()

View File

@@ -44,7 +44,7 @@ class WorkerPool:
def _generate_id(self) -> str:
"""Generate unique worker ID."""
worker_id = f"ghost-{self._next_id}"
worker_id = f"agent-{self._next_id}"
self._next_id += 1
return worker_id
@@ -93,7 +93,7 @@ class WorkerPool:
async def _run_worker(self, worker: AgentWorker) -> None:
"""Run a single worker agent."""
from ..ghostcrew_agent import GhostCrewAgent
from ..pa_agent import PentestAgentAgent
# Wait for dependencies
if worker.depends_on:
@@ -111,7 +111,7 @@ class WorkerPool:
from ...config.constants import WORKER_MAX_ITERATIONS
agent = GhostCrewAgent(
agent = PentestAgentAgent(
llm=self.llm,
tools=self.tools,
runtime=worker_runtime, # Use isolated runtime

View File

@@ -1,5 +0,0 @@
"""GhostCrew main agent implementation."""
from .ghostcrew_agent import GhostCrewAgent
__all__ = ["GhostCrewAgent"]

View File

@@ -1,4 +1,4 @@
"""Non-interactive CLI mode for GhostCrew."""
"""Non-interactive CLI mode for PentestAgent."""
import asyncio
import time
@@ -12,12 +12,12 @@ from rich.text import Text
console = Console()
# Ghost theme colors (matching TUI)
GHOST_PRIMARY = "#d4d4d4" # light gray - primary text
GHOST_SECONDARY = "#9a9a9a" # medium gray - secondary text
GHOST_DIM = "#6b6b6b" # dim gray - muted text
GHOST_BORDER = "#3a3a3a" # dark gray - borders
GHOST_ACCENT = "#7a7a7a" # accent gray
# PA theme colors (matching TUI)
PA_PRIMARY = "#d4d4d4" # light gray - primary text
PA_SECONDARY = "#9a9a9a" # medium gray - secondary text
PA_DIM = "#6b6b6b" # dim gray - muted text
PA_BORDER = "#3a3a3a" # dark gray - borders
PA_ACCENT = "#7a7a7a" # accent gray
async def run_cli(
@@ -30,7 +30,7 @@ async def run_cli(
mode: str = "agent",
):
"""
Run GhostCrew in non-interactive mode.
Run PentestAgent in non-interactive mode.
Args:
target: Target to test
@@ -41,7 +41,7 @@ async def run_cli(
use_docker: Run tools in Docker container
mode: Execution mode ("agent" or "crew")
"""
from ..agents.ghostcrew_agent import GhostCrewAgent
from ..agents.pa_agent import PentestAgentAgent
from ..knowledge import RAGEngine
from ..llm import LLM
from ..runtime.docker_runtime import DockerRuntime
@@ -50,27 +50,27 @@ async def run_cli(
# Startup panel
start_text = Text()
start_text.append("GHOSTCREW", style=f"bold {GHOST_PRIMARY}")
start_text.append(" - Non-interactive Mode\n\n", style=GHOST_DIM)
start_text.append("Target: ", style=GHOST_SECONDARY)
start_text.append(f"{target}\n", style=GHOST_PRIMARY)
start_text.append("Model: ", style=GHOST_SECONDARY)
start_text.append(f"{model}\n", style=GHOST_PRIMARY)
start_text.append("Mode: ", style=GHOST_SECONDARY)
start_text.append(f"{mode.title()}\n", style=GHOST_PRIMARY)
start_text.append("Runtime: ", style=GHOST_SECONDARY)
start_text.append(f"{'Docker' if use_docker else 'Local'}\n", style=GHOST_PRIMARY)
start_text.append("Max loops: ", style=GHOST_SECONDARY)
start_text.append(f"{max_loops}\n", style=GHOST_PRIMARY)
start_text.append("PENTESTAGENT", style=f"bold {PA_PRIMARY}")
start_text.append(" - Non-interactive Mode\n\n", style=PA_DIM)
start_text.append("Target: ", style=PA_SECONDARY)
start_text.append(f"{target}\n", style=PA_PRIMARY)
start_text.append("Model: ", style=PA_SECONDARY)
start_text.append(f"{model}\n", style=PA_PRIMARY)
start_text.append("Mode: ", style=PA_SECONDARY)
start_text.append(f"{mode.title()}\n", style=PA_PRIMARY)
start_text.append("Runtime: ", style=PA_SECONDARY)
start_text.append(f"{'Docker' if use_docker else 'Local'}\n", style=PA_PRIMARY)
start_text.append("Max loops: ", style=PA_SECONDARY)
start_text.append(f"{max_loops}\n", style=PA_PRIMARY)
task_msg = task or f"Perform a penetration test on {target}"
start_text.append("Task: ", style=GHOST_SECONDARY)
start_text.append(task_msg, style=GHOST_PRIMARY)
start_text.append("Task: ", style=PA_SECONDARY)
start_text.append(task_msg, style=PA_PRIMARY)
console.print()
console.print(
Panel(
start_text, title=f"[{GHOST_SECONDARY}]Starting", border_style=GHOST_BORDER
start_text, title=f"[{PA_SECONDARY}]Starting", border_style=PA_BORDER
)
)
console.print()
@@ -99,13 +99,13 @@ async def run_cli(
register_tool_instance(tool)
mcp_count = len(mcp_tools)
if mcp_count > 0:
console.print(f"[{GHOST_DIM}]Loaded {mcp_count} MCP tools[/]")
console.print(f"[{PA_DIM}]Loaded {mcp_count} MCP tools[/]")
except Exception:
pass # MCP is optional, continue without it
# Initialize runtime - Docker or Local
if use_docker:
console.print(f"[{GHOST_DIM}]Starting Docker container...[/]")
console.print(f"[{PA_DIM}]Starting Docker container...[/]")
runtime = DockerRuntime(mcp_manager=mcp_manager)
else:
runtime = LocalRuntime(mcp_manager=mcp_manager)
@@ -127,11 +127,11 @@ async def run_cli(
last_msg_intermediate = False # Track if previous message was intermediate (to avoid double counting tokens)
stopped_reason = None
def print_status(msg: str, style: str = GHOST_DIM):
def print_status(msg: str, style: str = PA_DIM):
elapsed = int(time.time() - start_time)
mins, secs = divmod(elapsed, 60)
timestamp = f"[{mins:02d}:{secs:02d}]"
console.print(f"[{GHOST_DIM}]{timestamp}[/] [{style}]{msg}[/]")
console.print(f"[{PA_DIM}]{timestamp}[/] [{style}]{msg}[/]")
def display_message(content: str, title: str) -> bool:
"""Display a message panel if it hasn't been shown yet."""
@@ -141,8 +141,8 @@ async def run_cli(
console.print(
Panel(
Markdown(content),
title=f"[{GHOST_PRIMARY}]{title}",
border_style=GHOST_BORDER,
title=f"[{PA_PRIMARY}]{title}",
border_style=PA_BORDER,
)
)
console.print()
@@ -160,7 +160,7 @@ async def run_cli(
status_text = f"Interrupted ({stopped_reason})"
lines = [
"# GhostCrew Penetration Test Report",
"# PentestAgent Penetration Test Report",
"",
"## Executive Summary",
"",
@@ -267,7 +267,7 @@ async def run_cli(
[
"---",
"",
f"*Report generated by GhostCrew on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*",
f"*Report generated by PentestAgent on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*",
]
)
@@ -291,14 +291,14 @@ async def run_cli(
content = generate_report()
report_path.write_text(content, encoding="utf-8")
console.print(f"[{GHOST_SECONDARY}]Report saved: {report_path}[/]")
console.print(f"[{PA_SECONDARY}]Report saved: {report_path}[/]")
async def generate_summary():
"""Ask the LLM to summarize findings when stopped early."""
if not tool_log:
return None
print_status("Generating summary...", GHOST_SECONDARY)
print_status("Generating summary...", PA_SECONDARY)
# Build context from tool results (use full results, not truncated)
context_lines = ["Summarize the penetration test findings so far:\n"]
@@ -345,28 +345,28 @@ async def run_cli(
status = f"STOPPED ({stopped_reason})"
final_text = Text()
final_text.append(f"{status}\n\n", style=f"bold {GHOST_PRIMARY}")
final_text.append("Duration: ", style=GHOST_DIM)
final_text.append(f"{mins}m {secs}s\n", style=GHOST_SECONDARY)
final_text.append("Loops: ", style=GHOST_DIM)
final_text.append(f"{iteration}/{max_loops}\n", style=GHOST_SECONDARY)
final_text.append("Tools: ", style=GHOST_DIM)
final_text.append(f"{tool_count}\n", style=GHOST_SECONDARY)
final_text.append(f"{status}\n\n", style=f"bold {PA_PRIMARY}")
final_text.append("Duration: ", style=PA_DIM)
final_text.append(f"{mins}m {secs}s\n", style=PA_SECONDARY)
final_text.append("Loops: ", style=PA_DIM)
final_text.append(f"{iteration}/{max_loops}\n", style=PA_SECONDARY)
final_text.append("Tools: ", style=PA_DIM)
final_text.append(f"{tool_count}\n", style=PA_SECONDARY)
if total_tokens > 0:
final_text.append("Tokens: ", style=GHOST_DIM)
final_text.append(f"{total_tokens:,}\n", style=GHOST_SECONDARY)
final_text.append("Tokens: ", style=PA_DIM)
final_text.append(f"{total_tokens:,}\n", style=PA_SECONDARY)
if findings_count > 0:
final_text.append("Findings: ", style=GHOST_DIM)
final_text.append(f"{findings_count}", style=GHOST_SECONDARY)
final_text.append("Findings: ", style=PA_DIM)
final_text.append(f"{findings_count}", style=PA_SECONDARY)
console.print()
console.print(
Panel(
final_text,
title=f"[{GHOST_SECONDARY}]{title}",
border_style=GHOST_BORDER,
title=f"[{PA_SECONDARY}]{title}",
border_style=PA_BORDER,
)
)
@@ -388,13 +388,13 @@ async def run_cli(
if event_type == "spawn":
task = data.get("task", "")
print_status(f"Spawned worker {worker_id}: {task}", GHOST_ACCENT)
print_status(f"Spawned worker {worker_id}: {task}", PA_ACCENT)
elif event_type == "tool":
tool_name = data.get("tool", "unknown")
tool_count += 1
print_status(
f"Worker {worker_id} using tool: {tool_name}", GHOST_DIM
f"Worker {worker_id} using tool: {tool_name}", PA_DIM
)
# Log tool usage (limited info available from event)
@@ -429,7 +429,7 @@ async def run_cli(
elif event_type == "status":
status = data.get("status", "")
print_status(f"Worker {worker_id} status: {status}", GHOST_DIM)
print_status(f"Worker {worker_id} status: {status}", PA_DIM)
elif event_type == "warning":
reason = data.get("reason", "unknown")
@@ -456,17 +456,17 @@ async def run_cli(
phase = update.get("phase", "")
if phase == "starting":
print_status("Crew orchestrator starting...", GHOST_PRIMARY)
print_status("Crew orchestrator starting...", PA_PRIMARY)
elif phase == "thinking":
content = update.get("content", "")
if content:
display_message(content, "GhostCrew Plan")
display_message(content, "PentestAgent Plan")
elif phase == "tool_call":
tool = update.get("tool", "")
args = update.get("args", {})
print_status(f"Orchestrator calling: {tool}", GHOST_ACCENT)
print_status(f"Orchestrator calling: {tool}", PA_ACCENT)
elif phase == "complete":
report_content = update.get("report", "")
@@ -487,7 +487,7 @@ async def run_cli(
else:
# Default Agent Mode
agent = GhostCrewAgent(
agent = PentestAgentAgent(
llm=llm,
tools=tools,
runtime=runtime,
@@ -611,39 +611,39 @@ async def run_cli(
# Metasploit-style output with better spacing
console.print() # Blank line before each tool
print_status(f"$ {name} ({tool_count})", GHOST_ACCENT)
print_status(f"$ {name} ({tool_count})", PA_ACCENT)
# Show command/args on separate indented line (truncated for display)
if command_text:
display_cmd = command_text[:80]
if len(command_text) > 80:
display_cmd += "..."
console.print(f" [{GHOST_DIM}]{display_cmd}[/]")
console.print(f" [{PA_DIM}]{display_cmd}[/]")
# Show result on separate line with status indicator
if response.tool_results and i < len(response.tool_results):
tr = response.tool_results[i]
if tr.error:
console.print(
f" [{GHOST_DIM}][!] {tr.error[:100]}[/]"
f" [{PA_DIM}][!] {tr.error[:100]}[/]"
)
elif tr.result:
# Show exit code or brief result
result_line = tr.result[:100].replace("\n", " ")
if exit_code == 0 or "success" in result_line.lower():
console.print(f" [{GHOST_DIM}][+] OK[/]")
console.print(f" [{PA_DIM}][+] OK[/]")
elif exit_code is not None and exit_code != 0:
console.print(
f" [{GHOST_DIM}][-] Exit {exit_code}[/]"
f" [{PA_DIM}][-] Exit {exit_code}[/]"
)
else:
console.print(
f" [{GHOST_DIM}][*] {result_line[:60]}...[/]"
f" [{PA_DIM}][*] {result_line[:60]}...[/]"
)
# Print assistant content immediately (analysis/findings)
if response.content:
if display_message(response.content, "GhostCrew"):
if display_message(response.content, "PentestAgent"):
messages.append(response.content)
# Check max loops limit

View File

@@ -1,5 +1,5 @@
"""
GhostCrew TUI - Terminal User Interface
PentestAgent TUI - Terminal User Interface
"""
import asyncio
@@ -51,7 +51,7 @@ class CrewTree(Tree):
if TYPE_CHECKING:
from ..agents.ghostcrew_agent import GhostCrewAgent
from ..agents.pa_agent import PentestAgentAgent
def wrap_text_lines(text: str, width: int = 80) -> List[str]:
@@ -142,7 +142,7 @@ class HelpScreen(ModalScreen):
def compose(self) -> ComposeResult:
yield Container(
Static("GhostCrew Help", id="help-title"),
Static("PentestAgent Help", id="help-title"),
Static(self._get_help_text(), id="help-content"),
Center(Button("Close", id="help-close")),
id="help-container",
@@ -200,7 +200,7 @@ class ThinkingMessage(Static):
class ToolMessage(Static):
"""Tool execution message"""
# Standard tool icon and color (ghost theme)
# Standard tool icon and color (pa theme)
TOOL_ICON = "$"
TOOL_COLOR = "#9a9a9a" # spirit gray
@@ -262,7 +262,7 @@ class AssistantMessage(Static):
text = Text()
text.append("| ", style="#525252")
text.append(">> ", style="#9a9a9a")
text.append("Ghost\n", style="bold #d4d4d4")
text.append("PentestAgent\n", style="bold #d4d4d4")
# Wrap content - use 70 chars to account for sidebar + prefix
for line in wrap_text_lines(self.message_content, width=70):
@@ -331,7 +331,7 @@ class StatusBar(Static):
# Use fixed-width labels (pad dots to 4 chars so text doesn't jump)
dots_padded = dots.ljust(4)
# Ghost theme status colors (muted, ethereal)
# PA theme status colors (muted, ethereal)
status_map = {
"idle": ("Ready", "#6b6b6b"),
"initializing": (f"Initializing{dots_padded}", "#9a9a9a"),
@@ -366,11 +366,11 @@ class StatusBar(Static):
# ----- Main TUI App -----
class GhostCrewTUI(App):
"""Main GhostCrew TUI Application"""
class PentestAgentTUI(App):
"""Main PentestAgent TUI Application"""
# ═══════════════════════════════════════════════════════════
# GHOST THEME - Ethereal grays emerging from darkness
# PA THEME - Ethereal grays
# ═══════════════════════════════════════════════════════════
# Void: #0a0a0a (terminal black - the darkness)
# Shadow: #121212 (subtle surface)
@@ -559,7 +559,7 @@ class GhostCrewTUI(App):
Binding("tab", "focus_next", "Next", show=False),
]
TITLE = "GhostCrew"
TITLE = "PentestAgent"
SUB_TITLE = "AI Penetration Testing"
def __init__(
@@ -575,7 +575,7 @@ class GhostCrewTUI(App):
self.use_docker = use_docker
# Agent components
self.agent: Optional["GhostCrewAgent"] = None
self.agent: Optional["PentestAgentAgent"] = None
self.runtime = None
self.mcp_manager = None
self.all_tools = []
@@ -641,7 +641,7 @@ class GhostCrewTUI(App):
try:
import os
from ..agents.ghostcrew_agent import GhostCrewAgent
from ..agents.pa_agent import PentestAgentAgent
from ..knowledge import RAGEngine
from ..llm import LLM, ModelConfig
from ..mcp import MCPManager
@@ -665,7 +665,7 @@ class GhostCrewTUI(App):
if knowledge_path:
try:
# Determine embedding method: env var > auto-detect
embeddings_setting = os.getenv("GHOSTCREW_EMBEDDINGS", "").lower()
embeddings_setting = os.getenv("PENTESTAGENT_EMBEDDINGS", "").lower()
if embeddings_setting == "local":
use_local = True
elif embeddings_setting == "openai":
@@ -714,7 +714,7 @@ class GhostCrewTUI(App):
self.all_tools = get_all_tools()
# Agent
self.agent = GhostCrewAgent(
self.agent = PentestAgentAgent(
llm=llm,
tools=self.all_tools,
runtime=self.runtime,
@@ -732,7 +732,7 @@ class GhostCrewTUI(App):
runtime_str = "Docker" if self.use_docker else "Local"
self._add_system(
f"+ GhostCrew ready\n"
f"+ PentestAgent ready\n"
f" Model: {self.model} | Tools: {len(self.all_tools)} | MCP: {mcp_server_count} | RAG: {rag_doc_count}\n"
f" Runtime: {runtime_str} | Mode: Assist (use /agent or /crew for autonomous modes)"
)
@@ -940,7 +940,7 @@ class GhostCrewTUI(App):
notes = await get_all_notes()
if not notes:
self._add_system(
"No notes found. Ghost saves findings using the notes tool during testing."
"No notes found. PentestAgent saves findings using the notes tool during testing."
)
return
@@ -1791,8 +1791,8 @@ def run_tui(
model: str = None,
use_docker: bool = False,
):
"""Run the GhostCrew TUI"""
app = GhostCrewTUI(
"""Run the PentestAgent TUI"""
app = PentestAgentTUI(
target=target,
model=model,
use_docker=use_docker,

4
pentestagent/__init__.py Normal file
View File

@@ -0,0 +1,4 @@
"""PentestAgent - AI penetration testing."""
__version__ = "0.2.0"
__author__ = "Masic"

6
pentestagent/__main__.py Normal file
View File

@@ -0,0 +1,6 @@
"""PentestAgent entry point for `python -m pentestagent`."""
from pentestagent.interface.main import main
if __name__ == "__main__":
main()

View File

@@ -1,4 +1,4 @@
"""Agent system for GhostCrew."""
"""Agent system for PentestAgent."""
from .base_agent import AgentMessage, BaseAgent
from .crew import AgentStatus, AgentWorker, CrewOrchestrator, CrewState

View File

@@ -1,4 +1,4 @@
"""Base agent class for GhostCrew."""
"""Base agent class for PentestAgent."""
from abc import ABC, abstractmethod
from dataclasses import dataclass, field

View File

@@ -6,7 +6,7 @@ from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Optional
from ...config.constants import DEFAULT_MAX_ITERATIONS
from ...knowledge.graph import ShadowGraph
from ..prompts import ghost_crew
from ..prompts import pa_crew
from .models import CrewState, WorkerCallback
from .tools import create_crew_tools
from .worker_pool import WorkerPool
@@ -122,7 +122,7 @@ class CrewOrchestrator:
f"- {i}" for i in graph_insights
)
return ghost_crew.render(
return pa_crew.render(
target=self.target or "Not specified",
prior_context=self.prior_context or "None - starting fresh",
notes_context=notes_context + insights_text,

View File

@@ -0,0 +1,336 @@
"""Worker pool for managing concurrent agent execution."""
import asyncio
import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from .models import AgentStatus, AgentWorker, WorkerCallback
if TYPE_CHECKING:
from ...llm import LLM
from ...runtime import Runtime
from ...tools import Tool
class WorkerPool:
"""Manages concurrent execution of worker agents."""
def __init__(
self,
llm: "LLM",
tools: List["Tool"],
runtime: "Runtime",
target: str = "",
rag_engine: Any = None,
on_worker_event: Optional[WorkerCallback] = None,
):
self.llm = llm
self.tools = tools
self.runtime = runtime
self.target = target
self.rag_engine = rag_engine
self.on_worker_event = on_worker_event
self._workers: Dict[str, AgentWorker] = {}
self._tasks: Dict[str, asyncio.Task] = {}
self._results: Dict[str, str] = {}
self._next_id = 0
self._lock = asyncio.Lock()
def _emit(self, worker_id: str, event: str, data: Dict[str, Any]) -> None:
"""Emit event to callback if registered."""
if self.on_worker_event:
self.on_worker_event(worker_id, event, data)
def _generate_id(self) -> str:
"""Generate unique worker ID."""
worker_id = f"agent-{self._next_id}"
self._next_id += 1
return worker_id
async def spawn(
self,
task: str,
priority: int = 1,
depends_on: Optional[List[str]] = None,
) -> str:
"""
Spawn a new worker agent.
Args:
task: The task description for the agent
priority: Higher priority runs first (for future use)
depends_on: List of agent IDs that must complete first
Returns:
The worker ID
"""
async with self._lock:
worker_id = self._generate_id()
worker = AgentWorker(
id=worker_id,
task=task,
priority=priority,
depends_on=depends_on or [],
)
self._workers[worker_id] = worker
# Emit spawn event for UI
self._emit(
worker_id,
"spawn",
{
"worker_type": worker_id,
"task": task,
},
)
# Start the agent task
self._tasks[worker_id] = asyncio.create_task(self._run_worker(worker))
return worker_id
async def _run_worker(self, worker: AgentWorker) -> None:
"""Run a single worker agent."""
from ..pa_agent import PentestAgentAgent
# Wait for dependencies
if worker.depends_on:
await self._wait_for_dependencies(worker.depends_on)
worker.status = AgentStatus.RUNNING
worker.started_at = time.time()
self._emit(worker.id, "status", {"status": "running"})
# Create isolated runtime for this worker (prevents browser state conflicts)
from ...runtime.runtime import LocalRuntime
worker_runtime = LocalRuntime()
await worker_runtime.start()
from ...config.constants import WORKER_MAX_ITERATIONS
agent = PentestAgentAgent(
llm=self.llm,
tools=self.tools,
runtime=worker_runtime, # Use isolated runtime
target=self.target,
rag_engine=self.rag_engine,
max_iterations=WORKER_MAX_ITERATIONS,
)
try:
final_response = ""
hit_max_iterations = False
is_infeasible = False
async for response in agent.agent_loop(worker.task):
# Track tool calls
if response.tool_calls:
for tc in response.tool_calls:
if tc.name not in worker.tools_used:
worker.tools_used.append(tc.name)
self._emit(worker.id, "tool", {"tool": tc.name})
# Track tokens (avoid double counting)
if response.usage:
total = response.usage.get("total_tokens", 0)
is_intermediate = response.metadata.get("intermediate", False)
has_tools = bool(response.tool_calls)
# Same logic as CLI to avoid double counting
should_count = False
if is_intermediate:
should_count = True
worker.last_msg_intermediate = True
elif has_tools:
if not getattr(worker, "last_msg_intermediate", False):
should_count = True
worker.last_msg_intermediate = False
else:
should_count = True
worker.last_msg_intermediate = False
if should_count and total > 0:
self._emit(worker.id, "tokens", {"tokens": total})
# Capture final response (text without tool calls)
if response.content and not response.tool_calls:
final_response = response.content
# Check metadata flags
if response.metadata:
if response.metadata.get("max_iterations_reached"):
hit_max_iterations = True
if response.metadata.get("replan_impossible"):
is_infeasible = True
# Prioritize structured results from the plan over chatty summaries
plan_summary = ""
plan = getattr(worker_runtime, "plan", None)
if plan and plan.steps:
from ...tools.finish import StepStatus
# Include ALL steps regardless of status - skips and failures are valuable context
# Note: PlanStep stores failure/skip reasons in the 'result' field
steps_with_info = [s for s in plan.steps if s.result]
if steps_with_info:
summary_lines = []
for s in steps_with_info:
status_marker = {
StepStatus.COMPLETE: "",
StepStatus.SKIP: "",
StepStatus.FAIL: "",
}.get(s.status, "·")
info = s.result or "No details"
summary_lines.append(f"{status_marker} {s.description}: {info}")
plan_summary = "\n".join(summary_lines)
# Use plan summary if available, otherwise fallback to chat response
worker.result = plan_summary or final_response or "No findings."
worker.completed_at = time.time()
self._results[worker.id] = worker.result
if is_infeasible:
worker.status = AgentStatus.FAILED
self._emit(
worker.id,
"failed",
{
"summary": worker.result[:200],
"reason": "Task determined infeasible",
},
)
elif hit_max_iterations:
worker.status = AgentStatus.WARNING
self._emit(
worker.id,
"warning",
{
"summary": worker.result[:200],
"reason": "Max iterations reached",
},
)
else:
worker.status = AgentStatus.COMPLETE
self._emit(
worker.id,
"complete",
{
"summary": worker.result[:200],
},
)
except asyncio.CancelledError:
worker.status = AgentStatus.CANCELLED
worker.completed_at = time.time()
self._emit(worker.id, "cancelled", {})
raise
except Exception as e:
worker.error = str(e)
worker.status = AgentStatus.ERROR
worker.completed_at = time.time()
self._emit(worker.id, "error", {"error": str(e)})
finally:
# Cleanup worker's isolated runtime
try:
await worker_runtime.stop()
except Exception:
pass # Best effort cleanup
async def _wait_for_dependencies(self, depends_on: List[str]) -> None:
"""Wait for dependent workers to complete."""
for dep_id in depends_on:
if dep_id in self._tasks:
try:
await self._tasks[dep_id]
except (asyncio.CancelledError, Exception):
pass # Dependency failed, but we continue
async def wait_for(self, agent_ids: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Wait for specified agents (or all) to complete.
Args:
agent_ids: List of agent IDs to wait for. None = wait for all.
Returns:
Dict mapping agent_id to result/error
"""
if agent_ids is None:
agent_ids = list(self._tasks.keys())
results = {}
for agent_id in agent_ids:
if agent_id in self._tasks:
try:
await self._tasks[agent_id]
except (asyncio.CancelledError, Exception):
pass
worker = self._workers.get(agent_id)
if worker:
results[agent_id] = {
"task": worker.task,
"status": worker.status.value,
"result": worker.result,
"error": worker.error,
"tools_used": worker.tools_used,
}
return results
def get_status(self, agent_id: str) -> Optional[Dict[str, Any]]:
"""Get status of a specific agent."""
worker = self._workers.get(agent_id)
if not worker:
return None
return worker.to_dict()
def get_all_status(self) -> Dict[str, Dict[str, Any]]:
"""Get status of all agents."""
return {wid: w.to_dict() for wid, w in self._workers.items()}
async def cancel(self, agent_id: str) -> bool:
"""Cancel a running agent."""
if agent_id not in self._tasks:
return False
task = self._tasks[agent_id]
if not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
return True
return False
async def cancel_all(self) -> None:
"""Cancel all running agents."""
for task in self._tasks.values():
if not task.done():
task.cancel()
# Wait for all to finish
if self._tasks:
await asyncio.gather(*self._tasks.values(), return_exceptions=True)
def get_results(self) -> Dict[str, str]:
"""Get results from all completed agents."""
return dict(self._results)
def get_workers(self) -> List[AgentWorker]:
"""Get all workers."""
return list(self._workers.values())
def reset(self) -> None:
"""Reset the pool for a new task."""
self._workers.clear()
self._tasks.clear()
self._results.clear()
self._next_id = 0

View File

@@ -0,0 +1,5 @@
"""PentestAgent main agent implementation."""
from .pa_agent import PentestAgentAgent
__all__ = ["PentestAgentAgent"]

View File

@@ -1,9 +1,9 @@
"""GhostCrew main pentesting agent."""
"""PentestAgent main pentesting agent."""
from typing import TYPE_CHECKING, List, Optional
from ..base_agent import BaseAgent
from ..prompts import ghost_agent, ghost_assist
from ..prompts import pa_agent, pa_assist
if TYPE_CHECKING:
from ...knowledge import RAGEngine
@@ -12,8 +12,8 @@ if TYPE_CHECKING:
from ...tools import Tool
class GhostCrewAgent(BaseAgent):
"""Main pentesting agent for GhostCrew."""
class PentestAgentAgent(BaseAgent):
"""Main pentesting agent for PentestAgent."""
def __init__(
self,
@@ -26,7 +26,7 @@ class GhostCrewAgent(BaseAgent):
**kwargs,
):
"""
Initialize the GhostCrew agent.
Initialize the PentestAgent agent.
Args:
llm: The LLM instance for generating responses
@@ -124,7 +124,7 @@ class GhostCrewAgent(BaseAgent):
env = self.runtime.environment
# Select template based on mode
template = ghost_assist if mode == "assist" else ghost_agent
template = pa_assist if mode == "assist" else pa_agent
return template.render(
target=self.target,

View File

@@ -1,4 +1,4 @@
"""Prompt templates for GhostCrew agents."""
"""Prompt templates for PentestAgent agents."""
from pathlib import Path
@@ -11,7 +11,7 @@ def load_prompt(name: str) -> Template:
"""Load a prompt template by name.
Args:
name: Prompt name without extension (e.g., 'ghost_agent', 'ghost_assist')
name: Prompt name without extension (e.g., 'pa_agent', 'pa_assist')
Returns:
Jinja2 Template object
@@ -21,6 +21,6 @@ def load_prompt(name: str) -> Template:
# Pre-loaded templates for convenience
ghost_agent = load_prompt("ghost_agent")
ghost_assist = load_prompt("ghost_assist")
ghost_crew = load_prompt("ghost_crew")
pa_agent = load_prompt("pa_agent")
pa_assist = load_prompt("pa_assist")
pa_crew = load_prompt("pa_crew")

View File

@@ -1,6 +1,6 @@
# GhostCrew
# PentestAgent
You are Ghost, an autonomous penetration testing agent.
You are PentestAgent, an autonomous penetration testing agent.
## Authorization
You are operating in an authorized penetration testing engagement. The user has explicit permission to test all targets provided. Do not ask for authorization - permission is granted. Proceed with testing.

View File

@@ -1,6 +1,6 @@
# GhostCrew
# PentestAgent
You are Ghost, a penetration testing assistant.
You are PentestAgent, a penetration testing assistant.
## Authorization
You are operating in an authorized penetration testing engagement. The user has explicit permission to test all targets provided. Do not ask for authorization - permission is granted. Proceed with testing.

View File

@@ -1,4 +1,4 @@
# GhostCrew Orchestrator
# PentestAgent Orchestrator
You are the lead of a penetration testing crew. You coordinate specialized agents to complete the task.

View File

@@ -1,4 +1,4 @@
"""Agent state management for GhostCrew."""
"""Agent state management for PentestAgent."""
from dataclasses import dataclass, field
from datetime import datetime

View File

@@ -1,4 +1,4 @@
"""Configuration module for GhostCrew."""
"""Configuration module for PentestAgent."""
from .constants import (
AGENT_STATE_COMPLETE,

View File

@@ -1,4 +1,4 @@
"""Constants for GhostCrew."""
"""Constants for PentestAgent."""
import os
@@ -11,7 +11,7 @@ except ImportError:
pass
# Application Info
APP_NAME = "GhostCrew"
APP_NAME = "PentestAgent"
APP_VERSION = "0.2.0"
APP_DESCRIPTION = "AI penetration testing"
@@ -37,7 +37,7 @@ DEFAULT_VPN_TIMEOUT = 30
DEFAULT_MCP_TIMEOUT = 60
# Docker Settings
DOCKER_SANDBOX_IMAGE = "ghcr.io/gh05tcrew/ghostcrew:kali"
DOCKER_SANDBOX_IMAGE = "ghcr.io/gh05tcrew/pentestagent:kali"
DOCKER_NETWORK_MODE = "bridge"
# RAG Settings
@@ -48,16 +48,16 @@ DEFAULT_RAG_TOP_K = 3
# Memory Settings
MEMORY_RESERVE_RATIO = 0.8 # Reserve 20% of context for response
# LLM Defaults (set GHOSTCREW_MODEL in .env or shell)
# LLM Defaults (set PENTESTAGENT_MODEL in .env or shell)
DEFAULT_MODEL = os.environ.get(
"GHOSTCREW_MODEL"
"PENTESTAGENT_MODEL"
) # No fallback - requires configuration
DEFAULT_TEMPERATURE = 0.7
DEFAULT_MAX_TOKENS = 4096
# Agent Defaults
DEFAULT_MAX_ITERATIONS = int(os.environ.get("GHOSTCREW_MAX_ITERATIONS", "50"))
WORKER_MAX_ITERATIONS = int(os.environ.get("GHOSTCREW_WORKER_MAX_ITERATIONS", "10"))
DEFAULT_MAX_ITERATIONS = int(os.environ.get("PENTESTAGENT_MAX_ITERATIONS", "50"))
WORKER_MAX_ITERATIONS = int(os.environ.get("PENTESTAGENT_WORKER_MAX_ITERATIONS", "10"))
# File Extensions
KNOWLEDGE_TEXT_EXTENSIONS = [".txt", ".md"]

View File

@@ -1,4 +1,4 @@
"""Application settings for GhostCrew."""
"""Application settings for PentestAgent."""
import os
from dataclasses import dataclass, field
@@ -36,8 +36,8 @@ class Settings:
mcp_config_path: Path = field(default_factory=lambda: Path("mcp.json"))
# Docker Settings
container_name: str = "ghostcrew-sandbox"
docker_image: str = "ghcr.io/gh05tcrew/ghostcrew:kali"
container_name: str = "pentestagent-sandbox"
docker_image: str = "ghcr.io/gh05tcrew/pentestagent:kali"
# Agent Settings
max_iterations: int = DEFAULT_MAX_ITERATIONS

View File

@@ -1,15 +1,15 @@
"""User interface module for GhostCrew."""
"""User interface module for PentestAgent."""
from .cli import run_cli
from .main import main
from .tui import GhostCrewTUI, run_tui
from .tui import PentestAgentTUI, run_tui
from .utils import format_finding, print_banner, print_status
__all__ = [
"main",
"run_cli",
"run_tui",
"GhostCrewTUI",
"PentestAgentTUI",
"print_banner",
"format_finding",
"print_status",

View File

@@ -1,4 +1,4 @@
/* GhostCrew TUI Styles */
/* PentestAgent TUI Styles */
Screen {
background: #0a0a0a;

View File

@@ -0,0 +1,682 @@
"""Non-interactive CLI mode for PentestAgent."""
import asyncio
import time
from datetime import datetime
from pathlib import Path
from rich.console import Console
from rich.markdown import Markdown
from rich.panel import Panel
from rich.text import Text
console = Console()
# PA theme colors (matching TUI)
PA_PRIMARY = "#d4d4d4" # light gray - primary text
PA_SECONDARY = "#9a9a9a" # medium gray - secondary text
PA_DIM = "#6b6b6b" # dim gray - muted text
PA_BORDER = "#3a3a3a" # dark gray - borders
PA_ACCENT = "#7a7a7a" # accent gray
async def run_cli(
target: str,
model: str,
task: str = None,
report: str = None,
max_loops: int = 50,
use_docker: bool = False,
mode: str = "agent",
):
"""
Run PentestAgent in non-interactive mode.
Args:
target: Target to test
model: LLM model to use
task: Optional task description
report: Report path ("auto" for loot/reports/<target>_<timestamp>.md)
max_loops: Max agent loops before stopping
use_docker: Run tools in Docker container
mode: Execution mode ("agent" or "crew")
"""
from ..agents.pa_agent import PentestAgentAgent
from ..knowledge import RAGEngine
from ..llm import LLM
from ..runtime.docker_runtime import DockerRuntime
from ..runtime.runtime import LocalRuntime
from ..tools import get_all_tools
# Startup panel
start_text = Text()
start_text.append("PENTESTAGENT", style=f"bold {PA_PRIMARY}")
start_text.append(" - Non-interactive Mode\n\n", style=PA_DIM)
start_text.append("Target: ", style=PA_SECONDARY)
start_text.append(f"{target}\n", style=PA_PRIMARY)
start_text.append("Model: ", style=PA_SECONDARY)
start_text.append(f"{model}\n", style=PA_PRIMARY)
start_text.append("Mode: ", style=PA_SECONDARY)
start_text.append(f"{mode.title()}\n", style=PA_PRIMARY)
start_text.append("Runtime: ", style=PA_SECONDARY)
start_text.append(f"{'Docker' if use_docker else 'Local'}\n", style=PA_PRIMARY)
start_text.append("Max loops: ", style=PA_SECONDARY)
start_text.append(f"{max_loops}\n", style=PA_PRIMARY)
task_msg = task or f"Perform a penetration test on {target}"
start_text.append("Task: ", style=PA_SECONDARY)
start_text.append(task_msg, style=PA_PRIMARY)
console.print()
console.print(
Panel(start_text, title=f"[{PA_SECONDARY}]Starting", border_style=PA_BORDER)
)
console.print()
# Initialize RAG if knowledge exists
rag = None
knowledge_path = Path("knowledge")
if knowledge_path.exists():
try:
rag = RAGEngine(knowledge_path=knowledge_path)
rag.index()
except Exception:
pass
# Initialize MCP if config exists (silently skip failures)
mcp_manager = None
mcp_count = 0
try:
from ..mcp import MCPManager
from ..tools import register_tool_instance
mcp_manager = MCPManager()
if mcp_manager.config_path.exists():
mcp_tools = await mcp_manager.connect_all()
for tool in mcp_tools:
register_tool_instance(tool)
mcp_count = len(mcp_tools)
if mcp_count > 0:
console.print(f"[{PA_DIM}]Loaded {mcp_count} MCP tools[/]")
except Exception:
pass # MCP is optional, continue without it
# Initialize runtime - Docker or Local
if use_docker:
console.print(f"[{PA_DIM}]Starting Docker container...[/]")
runtime = DockerRuntime(mcp_manager=mcp_manager)
else:
runtime = LocalRuntime(mcp_manager=mcp_manager)
await runtime.start()
llm = LLM(model=model, rag_engine=rag)
tools = get_all_tools()
# Stats tracking
start_time = time.time()
tool_count = 0
iteration = 0
findings_count = 0 # Count of notes/findings recorded
findings = [] # Store actual findings text
total_tokens = 0 # Track total token usage
messages = [] # Store agent messages
tool_log = [] # Log of tools executed (ts, name, command, result, exit_code)
last_content = ""
last_msg_intermediate = False # Track if previous message was intermediate (to avoid double counting tokens)
stopped_reason = None
def print_status(msg: str, style: str = PA_DIM):
elapsed = int(time.time() - start_time)
mins, secs = divmod(elapsed, 60)
timestamp = f"[{mins:02d}:{secs:02d}]"
console.print(f"[{PA_DIM}]{timestamp}[/] [{style}]{msg}[/]")
def display_message(content: str, title: str) -> bool:
"""Display a message panel if it hasn't been shown yet."""
nonlocal last_content
if content and content != last_content:
console.print()
console.print(
Panel(
Markdown(content),
title=f"[{PA_PRIMARY}]{title}",
border_style=PA_BORDER,
)
)
console.print()
last_content = content
return True
return False
def generate_report() -> str:
"""Generate markdown report."""
elapsed = int(time.time() - start_time)
mins, secs = divmod(elapsed, 60)
status_text = "Complete"
if stopped_reason:
status_text = f"Interrupted ({stopped_reason})"
lines = [
"# PentestAgent Penetration Test Report",
"",
"## Executive Summary",
"",
]
# Add AI summary at top if available
# If the last finding is a full report (Crew mode), use it as the main body
# and avoid adding duplicate headers
main_content = ""
if findings:
main_content = findings[-1]
# If it's a full report (starts with #), don't add our own headers if possible
if not main_content.strip().startswith("#"):
lines.append(main_content)
lines.append("")
else:
# It's a full report, so we might want to replace the default header
# or just append it. Let's append it but skip the "Executive Summary" header above if we could.
# For now, just append it.
lines.append(main_content)
lines.append("")
else:
lines.append("*Assessment incomplete - no analysis generated.*")
lines.append("")
# Engagement details table
lines.extend(
[
"## Engagement Details",
"",
"| Field | Value |",
"|-------|-------|",
f"| **Target** | `{target}` |",
f"| **Task** | {task_msg} |",
f"| **Date** | {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} |",
f"| **Duration** | {mins}m {secs}s |",
f"| **Commands Executed** | {tool_count} |",
f"| **Status** | {status_text} |",
"",
"---",
"",
"## Commands Executed",
"",
]
)
# Detailed command log
for i, entry in enumerate(tool_log, 1):
ts = entry.get("ts", "??:??")
name = entry.get("name", "unknown")
command = entry.get("command", "")
result = entry.get("result", "")
exit_code = entry.get("exit_code")
lines.append(f"### {i}. {name} `[{ts}]`")
lines.append("")
if command:
lines.append("**Command:**")
lines.append("```")
lines.append(command)
lines.append("```")
lines.append("")
if exit_code is not None:
lines.append(f"**Exit Code:** `{exit_code}`")
lines.append("")
if result:
lines.append("**Output:**")
lines.append("```")
# Limit output to 2000 chars per command for report size
if len(result) > 2000:
lines.append(result[:2000])
lines.append(f"\n... (truncated, {len(result)} total chars)")
else:
lines.append(result)
lines.append("```")
lines.append("")
# Findings section
# Only show if there are other findings besides the final report we already showed
other_findings = findings[:-1] if findings and len(findings) > 1 else []
if other_findings:
lines.extend(
[
"---",
"",
"## Detailed Findings",
"",
]
)
for i, finding in enumerate(other_findings, 1):
if len(other_findings) > 1:
lines.append(f"### Finding {i}")
lines.append("")
lines.append(finding)
lines.append("")
# Footer
lines.extend(
[
"---",
"",
f"*Report generated by PentestAgent on {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*",
]
)
return "\n".join(lines)
def save_report():
"""Save report to file."""
if not report:
return
# Determine path
if report == "auto":
reports_dir = Path("loot/reports")
reports_dir.mkdir(parents=True, exist_ok=True)
safe_target = target.replace("://", "_").replace("/", "_").replace(":", "_")
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
report_path = reports_dir / f"{safe_target}_{timestamp}.md"
else:
report_path = Path(report)
report_path.parent.mkdir(parents=True, exist_ok=True)
content = generate_report()
report_path.write_text(content, encoding="utf-8")
console.print(f"[{PA_SECONDARY}]Report saved: {report_path}[/]")
async def generate_summary():
"""Ask the LLM to summarize findings when stopped early."""
if not tool_log:
return None
print_status("Generating summary...", PA_SECONDARY)
# Build context from tool results (use full results, not truncated)
context_lines = ["Summarize the penetration test findings so far:\n"]
context_lines.append(f"Target: {target}")
context_lines.append(f"Tools executed: {tool_count}\n")
for entry in tool_log[-10:]: # Last 10 tools
name = entry.get("name", "unknown")
command = entry.get("command", "")
result = entry.get("result", "")[:500] # Limit for context window
context_lines.append(f"- **{name}**: `{command}`")
if result:
context_lines.append(f" Output: {result}")
context_lines.append(
"\nProvide a brief summary of what was discovered and any security concerns found."
)
try:
response = await llm.generate(
system_prompt="You are a penetration testing assistant. Summarize the findings concisely.",
messages=[{"role": "user", "content": "\n".join(context_lines)}],
tools=[],
)
return response.content
except Exception:
return None
async def print_summary(interrupted: bool = False):
nonlocal messages
# Generate summary if we don't have messages yet
if not messages and tool_log:
summary = await generate_summary()
if summary:
messages.append(summary)
elapsed = int(time.time() - start_time)
mins, secs = divmod(elapsed, 60)
title = "Interrupted" if interrupted else "Finished"
status = "PARTIAL RESULTS" if interrupted else "COMPLETE"
if stopped_reason:
status = f"STOPPED ({stopped_reason})"
final_text = Text()
final_text.append(f"{status}\n\n", style=f"bold {PA_PRIMARY}")
final_text.append("Duration: ", style=PA_DIM)
final_text.append(f"{mins}m {secs}s\n", style=PA_SECONDARY)
final_text.append("Loops: ", style=PA_DIM)
final_text.append(f"{iteration}/{max_loops}\n", style=PA_SECONDARY)
final_text.append("Tools: ", style=PA_DIM)
final_text.append(f"{tool_count}\n", style=PA_SECONDARY)
if total_tokens > 0:
final_text.append("Tokens: ", style=PA_DIM)
final_text.append(f"{total_tokens:,}\n", style=PA_SECONDARY)
if findings_count > 0:
final_text.append("Findings: ", style=PA_DIM)
final_text.append(f"{findings_count}", style=PA_SECONDARY)
console.print()
console.print(
Panel(
final_text,
title=f"[{PA_SECONDARY}]{title}",
border_style=PA_BORDER,
)
)
# Show summary/messages only if it's new content (not just displayed)
if messages:
display_message(messages[-1], "Summary")
# Save report
save_report()
print_status("Initializing...")
try:
if mode == "crew":
from ..agents.crew import CrewOrchestrator
def on_worker_event(worker_id: str, event_type: str, data: dict):
nonlocal tool_count, findings_count, total_tokens
if event_type == "spawn":
task = data.get("task", "")
print_status(f"Spawned worker {worker_id}: {task}", PA_ACCENT)
elif event_type == "tool":
tool_name = data.get("tool", "unknown")
tool_count += 1
print_status(f"Worker {worker_id} using tool: {tool_name}", PA_DIM)
# Log tool usage (limited info available from event)
elapsed = int(time.time() - start_time)
mins, secs = divmod(elapsed, 60)
ts = f"{mins:02d}:{secs:02d}"
tool_log.append(
{
"ts": ts,
"name": tool_name,
"command": f"(Worker {worker_id})",
"result": "",
"exit_code": None,
}
)
elif event_type == "tokens":
tokens = data.get("tokens", 0)
total_tokens += tokens
elif event_type == "complete":
f_count = data.get("findings_count", 0)
findings_count += f_count
print_status(
f"Worker {worker_id} complete ({f_count} findings)", "green"
)
elif event_type == "failed":
reason = data.get("reason", "unknown")
print_status(f"Worker {worker_id} failed: {reason}", "red")
elif event_type == "status":
status = data.get("status", "")
print_status(f"Worker {worker_id} status: {status}", PA_DIM)
elif event_type == "warning":
reason = data.get("reason", "unknown")
print_status(f"Worker {worker_id} warning: {reason}", "yellow")
elif event_type == "error":
error = data.get("error", "unknown")
print_status(f"Worker {worker_id} error: {error}", "red")
elif event_type == "cancelled":
print_status(f"Worker {worker_id} cancelled", "yellow")
crew = CrewOrchestrator(
llm=llm,
tools=tools,
runtime=runtime,
on_worker_event=on_worker_event,
rag_engine=rag,
target=target,
)
async for update in crew.run(task_msg):
iteration += 1
phase = update.get("phase", "")
if phase == "starting":
print_status("Crew orchestrator starting...", PA_PRIMARY)
elif phase == "thinking":
content = update.get("content", "")
if content:
display_message(content, "PentestAgent Plan")
elif phase == "tool_call":
tool = update.get("tool", "")
args = update.get("args", {})
print_status(f"Orchestrator calling: {tool}", PA_ACCENT)
elif phase == "complete":
report_content = update.get("report", "")
if report_content:
messages.append(report_content)
findings.append(
report_content
) # Add to findings so it appears in the saved report
display_message(report_content, "Crew Report")
elif phase == "error":
error = update.get("error", "Unknown error")
print_status(f"Crew error: {error}", "red")
if iteration >= max_loops:
stopped_reason = "max loops reached"
raise StopIteration()
else:
# Default Agent Mode
agent = PentestAgentAgent(
llm=llm,
tools=tools,
runtime=runtime,
target=target,
rag_engine=rag,
)
async for response in agent.agent_loop(task_msg):
iteration += 1
# Track token usage
if response.usage:
usage = response.usage.get("total_tokens", 0)
is_intermediate = response.metadata.get("intermediate", False)
has_tools = bool(response.tool_calls)
# Logic to avoid double counting:
# 1. Intermediate messages (thinking) always count
# 2. Tool messages count ONLY if not preceded by intermediate message
if is_intermediate:
total_tokens += usage
last_msg_intermediate = True
elif has_tools:
if not last_msg_intermediate:
total_tokens += usage
last_msg_intermediate = False
else:
# Other messages (like plan)
total_tokens += usage
last_msg_intermediate = False
# Show tool calls and results as they happen
if response.tool_calls:
for i, call in enumerate(response.tool_calls):
tool_count += 1
name = getattr(call, "name", None) or getattr(
call.function, "name", "tool"
)
# Track findings (notes tool)
if name == "notes":
findings_count += 1
try:
args = getattr(call, "arguments", None) or getattr(
call.function, "arguments", "{}"
)
if isinstance(args, str):
import json
args = json.loads(args)
if isinstance(args, dict):
note_content = (
args.get("value", "")
or args.get("content", "")
or args.get("note", "")
)
if note_content:
findings.append(note_content)
except Exception:
pass
elapsed = int(time.time() - start_time)
mins, secs = divmod(elapsed, 60)
ts = f"{mins:02d}:{secs:02d}"
# Get result if available
if response.tool_results and i < len(response.tool_results):
tr = response.tool_results[i]
result_text = tr.result or tr.error or ""
if result_text:
# Truncate for display
preview = result_text[:200].replace("\n", " ")
if len(result_text) > 200:
preview += "..."
# Parse args for command extraction
command_text = ""
exit_code = None
try:
args = getattr(call, "arguments", None) or getattr(
call.function, "arguments", "{}"
)
if isinstance(args, str):
import json
args = json.loads(args)
if isinstance(args, dict):
command_text = args.get("command", "")
except Exception:
pass
# Extract exit code from result
if response.tool_results and i < len(response.tool_results):
tr = response.tool_results[i]
full_result = tr.result or tr.error or ""
# Try to parse exit code
if "Exit Code:" in full_result:
try:
import re
match = re.search(
r"Exit Code:\s*(\d+)", full_result
)
if match:
exit_code = int(match.group(1))
except Exception:
pass
else:
full_result = ""
# Store full data for report (not truncated)
tool_log.append(
{
"ts": ts,
"name": name,
"command": command_text,
"result": full_result,
"exit_code": exit_code,
}
)
# Metasploit-style output with better spacing
console.print() # Blank line before each tool
print_status(f"$ {name} ({tool_count})", PA_ACCENT)
# Show command/args on separate indented line (truncated for display)
if command_text:
display_cmd = command_text[:80]
if len(command_text) > 80:
display_cmd += "..."
console.print(f" [{PA_DIM}]{display_cmd}[/]")
# Show result on separate line with status indicator
if response.tool_results and i < len(response.tool_results):
tr = response.tool_results[i]
if tr.error:
console.print(
f" [{PA_DIM}][!] {tr.error[:100]}[/]"
)
elif tr.result:
# Show exit code or brief result
result_line = tr.result[:100].replace("\n", " ")
if exit_code == 0 or "success" in result_line.lower():
console.print(f" [{PA_DIM}][+] OK[/]")
elif exit_code is not None and exit_code != 0:
console.print(
f" [{PA_DIM}][-] Exit {exit_code}[/]"
)
else:
console.print(
f" [{PA_DIM}][*] {result_line[:60]}...[/]"
)
# Print assistant content immediately (analysis/findings)
if response.content:
if display_message(response.content, "PentestAgent"):
messages.append(response.content)
# Check max loops limit
if iteration >= max_loops:
stopped_reason = "max loops reached"
console.print()
print_status(f"Max loops limit reached ({max_loops})", "yellow")
raise StopIteration()
# In agent mode, ensure the final message is treated as the main finding (Executive Summary)
if mode != "crew" and messages:
findings.append(messages[-1])
await print_summary(interrupted=False)
except StopIteration:
await print_summary(interrupted=True)
except (KeyboardInterrupt, asyncio.CancelledError):
stopped_reason = "user interrupt"
await print_summary(interrupted=True)
except Exception as e:
console.print(f"\n[red]Error: {e}[/]")
stopped_reason = f"error: {e}"
await print_summary(interrupted=True)
finally:
# Cleanup MCP connections first
if mcp_manager:
try:
await mcp_manager.disconnect_all()
await asyncio.sleep(0.1) # Allow transports to close cleanly
except Exception:
pass
# Then stop runtime
if runtime:
try:
await runtime.stop()
except Exception:
pass

View File

@@ -1,4 +1,4 @@
"""Main entry point for GhostCrew."""
"""Main entry point for PentestAgent."""
import argparse
import asyncio
@@ -11,19 +11,19 @@ from .tui import run_tui
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="GhostCrew - AI Penetration Testing",
description="PentestAgent - AI Penetration Testing",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
ghostcrew tui Launch TUI
ghostcrew tui -t 192.168.1.1 Launch TUI with target
ghostcrew run -t localhost --task "scan" Headless run
ghostcrew tools list List available tools
ghostcrew mcp list List MCP servers
pentestagent tui Launch TUI
pentestagent tui -t 192.168.1.1 Launch TUI with target
pentestagent run -t localhost --task "scan" Headless run
pentestagent tools list List available tools
pentestagent mcp list List MCP servers
""",
)
parser.add_argument("--version", action="version", version="GhostCrew 0.2.0")
parser.add_argument("--version", action="version", version="PentestAgent 0.2.0")
# Subcommands
subparsers = parser.add_subparsers(dest="command", help="Commands")
@@ -35,7 +35,7 @@ Examples:
"--model",
"-m",
default=DEFAULT_MODEL,
help="LLM model (set GHOSTCREW_MODEL in .env)",
help="LLM model (set PENTESTAGENT_MODEL in .env)",
)
runtime_parent.add_argument(
"--docker",
@@ -168,7 +168,7 @@ def handle_tools_command(args: argparse.Namespace):
console.print(f" [cyan]{name}[/] ({ptype}, {required}): {desc}")
else:
console.print("[yellow]Use 'ghostcrew tools --help' for commands[/]")
console.print("[yellow]Use 'pentestagent tools --help' for commands[/]")
def handle_mcp_command(args: argparse.Namespace):
@@ -187,7 +187,7 @@ def handle_mcp_command(args: argparse.Namespace):
if not servers:
console.print("[yellow]No MCP servers configured[/]")
console.print(
"\nAdd a server with: ghostcrew mcp add <name> <command> <args...>"
"\nAdd a server with: pentestagent mcp add <name> <command> <args...>"
)
return
@@ -241,7 +241,7 @@ def handle_mcp_command(args: argparse.Namespace):
asyncio.run(test_server())
else:
console.print("[yellow]Use 'ghostcrew mcp --help' for available commands[/]")
console.print("[yellow]Use 'pentestagent mcp --help' for available commands[/]")
def main():
@@ -261,9 +261,9 @@ def main():
# Check model configuration
if not args.model:
print("Error: No model configured.")
print("Set GHOSTCREW_MODEL in .env file or use --model flag.")
print("Set PENTESTAGENT_MODEL in .env file or use --model flag.")
print(
"Example: GHOSTCREW_MODEL=gpt-5 or GHOSTCREW_MODEL=claude-sonnet-4-20250514"
"Example: PENTESTAGENT_MODEL=gpt-5 or PENTESTAGENT_MODEL=claude-sonnet-4-20250514"
)
return
@@ -318,9 +318,9 @@ def main():
# Check model configuration
if not args.model:
print("Error: No model configured.")
print("Set GHOSTCREW_MODEL in .env file or use --model flag.")
print("Set PENTESTAGENT_MODEL in .env file or use --model flag.")
print(
"Example: GHOSTCREW_MODEL=gpt-5 or GHOSTCREW_MODEL=claude-sonnet-4-20250514"
"Example: PENTESTAGENT_MODEL=gpt-5 or PENTESTAGENT_MODEL=claude-sonnet-4-20250514"
)
return

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
"""Interface utilities for GhostCrew."""
"""Interface utilities for PentestAgent."""
from typing import Any, Optional
@@ -24,10 +24,10 @@ ASCII_BANNER = r"""
def print_banner():
"""Print the GhostCrew banner."""
"""Print the PentestAgent banner."""
console.print(f"[bold white]{ASCII_BANNER}[/]")
console.print(
"[bold white]====================== GHOSTCREW =======================[/]"
"[bold white]====================== PENTESTAGENT =======================[/]"
)
console.print(
"[dim white] AI Penetration Testing Agents v0.2.0[/dim white]\n"
@@ -126,7 +126,7 @@ def print_status(
tools_count: Number of loaded tools
findings_count: Number of findings
"""
table = Table(title="GhostCrew Status", show_header=False)
table = Table(title="PentestAgent Status", show_header=False)
table.add_column("Property", style="cyan")
table.add_column("Value", style="white")

View File

@@ -1,4 +1,4 @@
"""Knowledge and RAG system for GhostCrew."""
"""Knowledge and RAG system for PentestAgent."""
from .embeddings import get_embeddings, get_embeddings_local
from .indexer import KnowledgeIndexer

View File

@@ -1,4 +1,4 @@
"""Embedding generation for GhostCrew."""
"""Embedding generation for PentestAgent."""
from typing import List, Optional

View File

@@ -1,5 +1,5 @@
"""
Shadow Graph implementation for GhostCrew.
Shadow Graph implementation for PentestAgent.
This module provides a lightweight knowledge graph that is built automatically
from agent notes. It is used by the Orchestrator to compute strategic insights

View File

@@ -1,4 +1,4 @@
"""Knowledge indexer for GhostCrew."""
"""Knowledge indexer for PentestAgent."""
import json
from dataclasses import dataclass

View File

@@ -1,4 +1,4 @@
"""RAG (Retrieval Augmented Generation) engine for GhostCrew."""
"""RAG (Retrieval Augmented Generation) engine for PentestAgent."""
import json
from dataclasses import dataclass

View File

@@ -1,4 +1,4 @@
"""LLM integration for GhostCrew."""
"""LLM integration for PentestAgent."""
from .config import ModelConfig
from .llm import LLM, LLMResponse

View File

@@ -1,4 +1,4 @@
"""LLM configuration for GhostCrew."""
"""LLM configuration for PentestAgent."""
from dataclasses import dataclass

View File

@@ -1,4 +1,4 @@
"""LiteLLM wrapper for GhostCrew."""
"""LiteLLM wrapper for PentestAgent."""
import asyncio
import random

View File

@@ -1,4 +1,4 @@
"""Conversation memory management for GhostCrew."""
"""Conversation memory management for PentestAgent."""
from typing import Awaitable, Callable, List, Optional

View File

@@ -1,4 +1,4 @@
"""LLM utility functions for GhostCrew."""
"""LLM utility functions for PentestAgent."""
from typing import List, Optional

View File

@@ -1,4 +1,4 @@
"""MCP (Model Context Protocol) integration for GhostCrew."""
"""MCP (Model Context Protocol) integration for PentestAgent."""
from .discovery import MCPDiscovery
from .manager import MCPManager, MCPServer, MCPServerConfig

View File

@@ -1,4 +1,4 @@
"""MCP tool discovery for GhostCrew."""
"""MCP tool discovery for PentestAgent."""
import json
from dataclasses import dataclass

View File

@@ -1,4 +1,4 @@
"""MCP server connection manager for GhostCrew.
"""MCP server connection manager for PentestAgent.
Uses standard MCP configuration format:
{
@@ -62,7 +62,7 @@ class MCPManager:
Path.cwd() / "mcp_servers.json",
Path.cwd() / "mcp.json",
Path(__file__).parent / "mcp_servers.json",
Path.home() / ".ghostcrew" / "mcp_servers.json",
Path.home() / ".pentestagent" / "mcp_servers.json",
]
def __init__(self, config_path: Optional[Path] = None):
@@ -200,7 +200,7 @@ class MCPManager:
"params": {
"protocolVersion": "2024-11-05",
"capabilities": {},
"clientInfo": {"name": "ghostcrew", "version": "0.2.0"},
"clientInfo": {"name": "pentestagent", "version": "0.2.0"},
},
"id": self._get_next_id(),
}

View File

@@ -1,4 +1,4 @@
"""MCP tool wrapper for GhostCrew."""
"""MCP tool wrapper for PentestAgent."""
from typing import TYPE_CHECKING, Any

View File

@@ -1,4 +1,4 @@
"""MCP transport implementations for GhostCrew."""
"""MCP transport implementations for PentestAgent."""
import asyncio
import json

View File

@@ -1,4 +1,4 @@
from ghostcrew.playbooks.base_playbook import BasePlaybook, Phase
from pentestagent.playbooks.base_playbook import BasePlaybook, Phase
class THP3NetworkPlaybook(BasePlaybook):

View File

@@ -1,4 +1,4 @@
from ghostcrew.playbooks.base_playbook import BasePlaybook, Phase
from pentestagent.playbooks.base_playbook import BasePlaybook, Phase
class THP3ReconPlaybook(BasePlaybook):

View File

@@ -1,4 +1,4 @@
from ghostcrew.playbooks.base_playbook import BasePlaybook, Phase
from pentestagent.playbooks.base_playbook import BasePlaybook, Phase
class THP3WebPlaybook(BasePlaybook):

View File

@@ -1,4 +1,4 @@
"""Runtime environment for GhostCrew."""
"""Runtime environment for PentestAgent."""
from .docker_runtime import DockerRuntime
from .runtime import CommandResult, EnvironmentInfo, LocalRuntime, Runtime

View File

@@ -1,4 +1,4 @@
"""Docker runtime for GhostCrew."""
"""Docker runtime for PentestAgent."""
import asyncio
import io
@@ -17,8 +17,8 @@ if TYPE_CHECKING:
class DockerConfig:
"""Docker runtime configuration."""
image: str = "ghostcrew-kali:latest" # Built from Dockerfile.kali
container_name: str = "ghostcrew-sandbox"
image: str = "pentestagent-kali:latest" # Built from Dockerfile.kali
container_name: str = "pentestagent-sandbox"
network_mode: str = "bridge"
cap_add: list = None
volumes: dict = None
@@ -80,8 +80,8 @@ class DockerRuntime(Runtime):
except Exception:
# Create new container
volumes = {
str(Path.home() / ".ghostcrew"): {
"bind": "/root/.ghostcrew",
str(Path.home() / ".pentestagent"): {
"bind": "/root/.pentestagent",
"mode": "rw",
},
**self.config.volumes,

View File

@@ -1,4 +1,4 @@
"""Runtime abstraction for GhostCrew."""
"""Runtime abstraction for PentestAgent."""
import platform
import shutil

View File

@@ -1,4 +1,4 @@
"""Tool system for GhostCrew."""
"""Tool system for PentestAgent."""
from .executor import ToolExecutor
from .loader import discover_tools, get_tool_info, load_all_tools, reload_tools

View File

@@ -1,4 +1,4 @@
"""Browser automation tool for GhostCrew."""
"""Browser automation tool for PentestAgent."""
from typing import TYPE_CHECKING

View File

@@ -1,4 +1,4 @@
"""Tool executor for GhostCrew."""
"""Tool executor for PentestAgent."""
import asyncio
from dataclasses import dataclass

View File

@@ -1,4 +1,4 @@
"""Task completion tool for GhostCrew agent loop control."""
"""Task completion tool for PentestAgent agent loop control."""
import json
from dataclasses import dataclass, field

View File

@@ -1,4 +1,4 @@
"""Dynamic tool loader for GhostCrew."""
"""Dynamic tool loader for PentestAgent."""
import importlib
import sys
@@ -55,7 +55,7 @@ def load_tool_module(module_name: str, tools_dir: Optional[Path] = None) -> bool
try:
# Build the full module path
full_module_name = f"ghostcrew.tools.{module_name}"
full_module_name = f"pentestagent.tools.{module_name}"
# Check if already loaded
if full_module_name in sys.modules:
@@ -126,13 +126,13 @@ def reload_tools():
to_remove = [
name
for name in sys.modules
if name.startswith("ghostcrew.tools.")
if name.startswith("pentestagent.tools.")
and name
not in (
"ghostcrew.tools",
"ghostcrew.tools.registry",
"ghostcrew.tools.executor",
"ghostcrew.tools.loader",
"pentestagent.tools",
"pentestagent.tools.registry",
"pentestagent.tools.executor",
"pentestagent.tools.loader",
)
]

View File

@@ -1,4 +1,4 @@
"""Notes tool for GhostCrew - persistent key findings storage."""
"""Notes tool for PentestAgent - persistent key findings storage."""
import asyncio
import json

View File

@@ -1,4 +1,4 @@
"""Tool registry for GhostCrew."""
"""Tool registry for PentestAgent."""
from dataclasses import dataclass, field
from functools import wraps

View File

@@ -1,4 +1,4 @@
"""Terminal tool for GhostCrew."""
"""Terminal tool for PentestAgent."""
from typing import TYPE_CHECKING

View File

@@ -1,4 +1,4 @@
"""Web search tool for GhostCrew."""
"""Web search tool for PentestAgent."""
import os
from typing import TYPE_CHECKING

View File

@@ -1,5 +1,5 @@
[project]
name = "ghostcrew"
name = "pentestagent"
version = "0.2.0"
description = "AI penetration testing"
readme = "README.md"
@@ -68,25 +68,25 @@ rag = [
"faiss-cpu>=1.8.0",
]
all = [
"ghostcrew[dev,rag]",
"pentestagent[dev,rag]",
]
[project.urls]
Homepage = "https://github.com/GH05TCREW/ghostcrew"
Homepage = "https://github.com/GH05TCREW/pentestagent"
[project.scripts]
ghostcrew = "ghostcrew.interface.main:main"
pentestagent = "pentestagent.interface.main:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["ghostcrew"]
packages = ["pentestagent"]
[tool.hatch.build.targets.sdist]
include = [
"ghostcrew/**",
"pentestagent/**",
"*.md",
"*.txt"
]
@@ -106,7 +106,7 @@ include = '\.pyi?$'
[tool.isort]
profile = "black"
line_length = 88
known_first_party = ["ghostcrew"]
known_first_party = ["pentestagent"]
[tool.ruff]
line-length = 88

View File

@@ -1,4 +1,4 @@
# GhostCrew Dependencies
# PentestAgent Dependencies
# Core LLM
litellm>=1.40.0

View File

@@ -1,5 +1,5 @@
#!/bin/bash
# GhostCrew Run Script
# PentestAgent Run Script
set -e
@@ -28,7 +28,7 @@ while [[ $# -gt 0 ]]; do
shift 2
;;
--help)
echo "GhostCrew - AI Penetration Testing"
echo "PentestAgent - AI Penetration Testing"
echo ""
echo "Usage: run.sh [options]"
echo ""
@@ -46,7 +46,7 @@ while [[ $# -gt 0 ]]; do
done
# Build command
CMD="python -m ghostcrew"
CMD="python -m pentestagent"
if [ "$MODE" = "tui" ]; then
CMD="$CMD --tui"
@@ -56,6 +56,6 @@ if [ -n "$TARGET" ]; then
CMD="$CMD --target $TARGET"
fi
# Run GhostCrew
echo "Starting GhostCrew..."
# Run PentestAgent
echo "Starting PentestAgent..."
$CMD

View File

@@ -1,7 +1,7 @@
# GhostCrew PowerShell Setup Script
# PentestAgent PowerShell Setup Script
Write-Host "=================================================================="
Write-Host " GHOSTCREW"
Write-Host " PENTESTAGENT"
Write-Host " AI Penetration Testing"
Write-Host "=================================================================="
Write-Host ""
@@ -57,7 +57,7 @@ Write-Host "[OK] Playwright browsers installed"
if (-not (Test-Path ".env")) {
Write-Host "Creating .env file..."
@"
# GhostCrew Configuration
# PentestAgent Configuration
# Add your API keys here
# OpenAI API Key (required for GPT models)
@@ -67,13 +67,13 @@ OPENAI_API_KEY=
ANTHROPIC_API_KEY=
# Model Configuration
GHOSTCREW_MODEL=gpt-5
PENTESTAGENT_MODEL=gpt-5
# Debug Mode
GHOSTCREW_DEBUG=false
PENTESTAGENT_DEBUG=false
# Max Iterations
GHOSTCREW_MAX_ITERATIONS=50
PENTESTAGENT_MAX_ITERATIONS=50
"@ | Set-Content -Path ".env" -Encoding UTF8
Write-Host "[OK] .env file created"
Write-Host "[!] Please edit .env and add your API keys"
@@ -89,5 +89,5 @@ Write-Host ""
Write-Host "To get started:"
Write-Host " 1. Edit .env and add your API keys"
Write-Host " 2. Activate: .\venv\Scripts\Activate.ps1"
Write-Host " 3. Run: ghostcrew or python -m ghostcrew"
Write-Host " 3. Run: pentestagent or python -m pentestagent"
Write-Host ""

View File

@@ -1,10 +1,10 @@
#!/bin/bash
# GhostCrew Setup Script
# PentestAgent Setup Script
set -e
echo "=================================================================="
echo " GHOSTCREW"
echo " PENTESTAGENT"
echo " AI Penetration Testing"
echo "=================================================================="
echo ""
@@ -51,7 +51,7 @@ echo "[OK] Playwright browsers installed"
if [ ! -f ".env" ]; then
echo "Creating .env file..."
cat > .env << EOF
# GhostCrew Configuration
# PentestAgent Configuration
# Add your API keys here
# OpenAI API Key (required for GPT models)
@@ -61,13 +61,13 @@ OPENAI_API_KEY=
ANTHROPIC_API_KEY=
# Model Configuration
GHOSTCREW_MODEL=gpt-5
PENTESTAGENT_MODEL=gpt-5
# Debug Mode
GHOSTCREW_DEBUG=false
PENTESTAGENT_DEBUG=false
# Max Iterations
GHOSTCREW_MAX_ITERATIONS=50
PENTESTAGENT_MAX_ITERATIONS=50
EOF
echo "[OK] .env file created"
echo "[!] Please edit .env and add your API keys"
@@ -84,10 +84,10 @@ echo ""
echo "To get started:"
echo " 1. Edit .env and add your API keys"
echo " 2. Activate the virtual environment: source venv/bin/activate"
echo " 3. Run GhostCrew: ghostcrew or python -m ghostcrew"
echo " 3. Run PentestAgent: pentestagent or python -m pentestagent"
echo ""
echo "For Docker usage:"
echo " docker-compose up ghostcrew"
echo " docker-compose --profile kali up ghostcrew-kali"
echo " docker-compose up pentestagent"
echo " docker-compose --profile kali up pentestagent-kali"
echo ""
echo "=================================================================="

View File

@@ -1 +1 @@
# GhostCrew Tests
# PentestAgent Tests

View File

@@ -1,4 +1,4 @@
"""Test fixtures for GhostCrew tests."""
"""Test fixtures for PentestAgent tests."""
import pytest
import asyncio
@@ -6,9 +6,9 @@ from pathlib import Path
from typing import Generator, AsyncGenerator
from unittest.mock import MagicMock, AsyncMock
from ghostcrew.config import Settings
from ghostcrew.agents.state import AgentState, AgentStateManager
from ghostcrew.tools import get_all_tools, Tool, ToolSchema
from pentestagent.config import Settings
from pentestagent.agents.state import AgentState, AgentStateManager
from pentestagent.tools import get_all_tools, Tool, ToolSchema
@pytest.fixture

View File

@@ -3,7 +3,7 @@
import pytest
from datetime import datetime
from ghostcrew.agents.state import AgentState, AgentStateManager, StateTransition
from pentestagent.agents.state import AgentState, AgentStateManager, StateTransition
class TestAgentState:

View File

@@ -2,7 +2,7 @@
import pytest
import networkx as nx
from ghostcrew.knowledge.graph import ShadowGraph, GraphNode, GraphEdge
from pentestagent.knowledge.graph import ShadowGraph, GraphNode, GraphEdge
class TestShadowGraph:
"""Tests for ShadowGraph class."""

View File

@@ -5,7 +5,7 @@ import numpy as np
from pathlib import Path
from unittest.mock import patch
from ghostcrew.knowledge.rag import RAGEngine, Document
from pentestagent.knowledge.rag import RAGEngine, Document
class TestDocument:

View File

@@ -6,7 +6,7 @@ import asyncio
from pathlib import Path
from unittest.mock import MagicMock, patch
from ghostcrew.tools.notes import notes, set_notes_file, get_all_notes, _notes
from pentestagent.tools.notes import notes, set_notes_file, get_all_notes, _notes
# We need to reset the global state for tests
@pytest.fixture(autouse=True)
@@ -18,7 +18,7 @@ def reset_notes_state(tmp_path):
# Clear the global dictionary (it's imported from the module)
# We need to clear the actual dictionary object in the module
from ghostcrew.tools.notes import _notes
from pentestagent.tools.notes import _notes
_notes.clear()
yield
@@ -148,7 +148,7 @@ async def test_legacy_migration(tmp_path):
set_notes_file(legacy_file)
# Trigger load (get_all_notes calls _load_notes_unlocked if empty, but we need to clear first)
from ghostcrew.tools.notes import _notes
from pentestagent.tools.notes import _notes
_notes.clear()
all_notes = await get_all_notes()

View File

@@ -2,7 +2,7 @@
import pytest
from ghostcrew.tools import (
from pentestagent.tools import (
Tool, ToolSchema, register_tool, get_all_tools, get_tool,
enable_tool, disable_tool, get_tool_names
)