mirror of
https://github.com/GH05TCREW/pentestagent.git
synced 2026-03-07 14:23:20 +00:00
125 lines
4.7 KiB
Plaintext
125 lines
4.7 KiB
Plaintext
# PentestAgent example configuration (copy to .env and edit as needed)
|
|
# Copy this file to `.env` and set values appropriate for your environment.
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Core settings
|
|
LLM_PROVIDER=ollama
|
|
#OLLAMA_BASE_URL=http://127.0.0.1:11434
|
|
PENTESTAGENT_MODEL="ollama/qwen2.5:7b-instruct"
|
|
OLLAMA_API_BASE=http://127.0.0.1:11434
|
|
|
|
# Debugging
|
|
PENTESTAGENT_DEBUG=true
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# MCP / Vendored tools (grouped)
|
|
# - Use the LAUNCH_* flags to allow the setup script or manager to auto-start
|
|
# vendored MCP servers and helper daemons. Set to `true` to enable auto-start.
|
|
# - Defaults are `false` to avoid automatically running networked services.
|
|
|
|
# MCP adapters and vendored integrations
|
|
# The project no longer vendors external MCP adapters such as HexStrike
|
|
# or MetasploitMCP. Operators who need external adapters should install
|
|
# and run them manually (for example under `third_party/`) and then
|
|
# configure `mcp_servers.json` to reference the adapter.
|
|
#
|
|
# A minimal example adapter scaffold is provided at
|
|
# `pentestagent/mcp/example_adapter.py` to help implement adapters that
|
|
# match the expected adapter interface.
|
|
|
|
# Metasploit RPC (msfrpcd) connection settings
|
|
# - `MSF_USER`/`MSF_PASSWORD`: msfrpcd credentials (keep password secret)
|
|
# - `MSF_SERVER`/`MSF_PORT`: host/port where msfrpcd listens (typically 127.0.0.1)
|
|
# - `MSF_SSL`: set to `true` if msfrpcd is configured with TLS
|
|
MSF_USER=msf
|
|
# set a non-empty password if you want setup to auto-start msfrpcd
|
|
MSF_PASSWORD=
|
|
MSF_SERVER=127.0.0.1
|
|
MSF_PORT=55553
|
|
MSF_SSL=false
|
|
|
|
# Where to save any payloads generated by vendored MCP servers (optional)
|
|
#PAYLOAD_SAVE_DIR=$HOME/payloads
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Optional provider API keys and overrides
|
|
#OPENAI_API_KEY=
|
|
#ANTHROPIC_API_KEY=
|
|
#GEMINI_API_KEY=
|
|
|
|
# Embeddings (for RAG knowledge base): options include 'openai' or 'local'
|
|
PENTESTAGENT_EMBEDDINGS=local
|
|
|
|
# Optional daily token budgeting (examples)
|
|
#DAILY_TOKEN_LIMIT=1000000
|
|
#PENTESTAGENT_DAILY_TOKEN_BUDGET=500000
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Notes:
|
|
# - Never commit your real API keys or passwords. Keep `.env` out of version control.
|
|
# - Use `LAUNCH_METASPLOIT_MCP=true` only in trusted test environments.# PentestAgent Configuration
|
|
|
|
# API Keys (set at least one for chat model)
|
|
OPENAI_API_KEY=
|
|
ANTHROPIC_API_KEY=
|
|
GEMINI_API_KEY=
|
|
|
|
# For web search functionality (optional)
|
|
TAVILY_API_KEY=
|
|
|
|
# Chat Model (any LiteLLM-supported model)
|
|
# OpenAI: gpt-5, gpt-4.1, gpt-4.1-mini
|
|
# Anthropic: claude-sonnet-4-20250514, claude-opus-4-20250514
|
|
# Google: gemini models require gemini/ prefix (e.g., gemini/gemini-2.5-flash)
|
|
# Other providers: azure/, bedrock/, groq/, ollama/, together_ai/ (see litellm docs)
|
|
PENTESTAGENT_MODEL=gpt-5
|
|
|
|
# Ollama base URL (set this when using an `ollama/...` model)
|
|
# Example: http://127.0.0.1:11434 or http://192.168.0.165:11434
|
|
OLLAMA_BASE_URL=http://127.0.0.1:11434
|
|
|
|
# Example local model string (uncomment to use instead of gpt-5)
|
|
# PENTESTAGENT_MODEL="ollama/qwen2.5:7b-instruct"
|
|
|
|
# Embeddings (for RAG knowledge base)
|
|
# Options: openai, local (default: openai if OPENAI_API_KEY set, else local)
|
|
PENTESTAGENT_EMBEDDINGS=local
|
|
|
|
# Settings
|
|
PENTESTAGENT_DEBUG=false
|
|
|
|
# Optional: manually declare model/context and daily token budgeting
|
|
# Useful when provider metadata isn't available or you want to enforce local limits.
|
|
# Set the model's maximum context window (in tokens). Example values:
|
|
# - Gemini large: 131072
|
|
# - Gemini flash: 65536
|
|
# - Ollama local model: 8192
|
|
# PENTESTAGENT_MODEL_MAX_CONTEXT=131072
|
|
|
|
# Optional daily token budget tracking (integers, tokens):
|
|
# - Set the total token allowance you want to track per day
|
|
# - Set the current used amount (optional; defaults to 0)
|
|
# PENTESTAGENT_DAILY_TOKEN_BUDGET=500000
|
|
# PENTESTAGENT_DAILY_TOKEN_USED=0
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Example pricing & daily token limit used by `/token` diagnostics
|
|
# Uncomment and adjust to enable cost calculations.
|
|
|
|
# Per 1M tokens pricing (USD):
|
|
# Example (input at $2.00 / 1M, output at $12.00 / 1M)
|
|
INPUT_COST_PER_MILLION=2.0
|
|
OUTPUT_COST_PER_MILLION=12.0
|
|
|
|
# Optional unified override (applies to both input and output)
|
|
# COST_PER_MILLION=14.0
|
|
|
|
# Example daily budget (tokens)
|
|
DAILY_TOKEN_LIMIT=1000000
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Agent max iterations (regular agent + crew workers, default: 30)
|
|
# PENTESTAGENT_AGENT_MAX_ITERATIONS=30
|
|
|
|
# Orchestrator max iterations (crew mode coordinator, default: 50)
|
|
# PENTESTAGENT_ORCHESTRATOR_MAX_ITERATIONS=50 |