diff --git a/.env.example b/.env.example index 3e69fc5..94fd3b3 100644 --- a/.env.example +++ b/.env.example @@ -15,6 +15,11 @@ TAVILY_API_KEY= # Other providers: azure/, bedrock/, groq/, ollama/, together_ai/ (see litellm docs) PENTESTAGENT_MODEL=gpt-5 +# Ollama local/remote API base +# Example: http://127.0.0.1:11434 or http://192.168.0.165:11434 +# Set this when using Ollama as the provider so LiteLLM/clients point to the correct host +# OLLAMA_API_BASE=http://127.0.0.1:11434 + # Embeddings (for RAG knowledge base) # Options: openai, local (default: openai if OPENAI_API_KEY set, else local) # PENTESTAGENT_EMBEDDINGS=local diff --git a/pentestagent/llm/llm.py b/pentestagent/llm/llm.py index 2520f4e..155a602 100644 --- a/pentestagent/llm/llm.py +++ b/pentestagent/llm/llm.py @@ -50,6 +50,23 @@ class LLM: # Ensure litellm is available try: + # If user provided an Ollama base URL (e.g. via .env), map it to + # several common environment variable names that LiteLLM or + # underlying Ollama clients may read. This helps when different + # naming conventions are used (OLLAMA_BASE_URL vs LITELLM_OLLAMA_*). + ollama_base = os.getenv("OLLAMA_BASE_URL") or os.getenv("OLLAMA_URL") + if ollama_base: + # Populate a few possible names without overwriting any that + # are already set by the environment. + os.environ.setdefault("OLLAMA_BASE_URL", ollama_base) + os.environ.setdefault("OLLAMA_URL", ollama_base) + os.environ.setdefault("OLLAMA_API_URL", ollama_base) + os.environ.setdefault("LITELLM_OLLAMA_BASE_URL", ollama_base) + os.environ.setdefault("LITELLM_OLLAMA_URL", ollama_base) + # Some clients expect a host without scheme + host_only = ollama_base.replace("http://", "").replace("https://", "") + os.environ.setdefault("OLLAMA_HOST", host_only) + import litellm # Drop unsupported params for models that don't support them