From a6cf68430f2019d62b9346d81c1a24cd6c22cbef Mon Sep 17 00:00:00 2001 From: Dimitri Graur Date: Wed, 25 Feb 2026 15:49:40 +0100 Subject: [PATCH] fix: remove duplicate assistant message when content and tool_calls both present MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In CrewOrchestrator.run(), when the LLM returns both content and tool_calls, the code was appending two consecutive assistant messages to self._messages: 1. {"role": "assistant", "content": response.content} 2. {"role": "assistant", "content": ..., "tool_calls": [...]} Back-to-back model-role messages cause Gemini (and technically any OpenAI-compatible API) to reject the conversation on the next turn. The fix is to drop the redundant bare-content append — the content is already included in the combined message that follows. Also update get_available_models() to list Gemini models with the required gemini/ provider prefix so the helper reflects correct usage. Fixes #8 --- pentestagent/agents/crew/orchestrator.py | 3 --- pentestagent/llm/llm.py | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/pentestagent/agents/crew/orchestrator.py b/pentestagent/agents/crew/orchestrator.py index c7d9733..c8edd9d 100644 --- a/pentestagent/agents/crew/orchestrator.py +++ b/pentestagent/agents/crew/orchestrator.py @@ -184,9 +184,6 @@ class CrewOrchestrator: # If there are tool calls, the content is "thinking" (reasoning before action) if response.content: yield {"phase": "thinking", "content": response.content} - self._messages.append( - {"role": "assistant", "content": response.content} - ) def get_tc_name(tc): if hasattr(tc, "function"): diff --git a/pentestagent/llm/llm.py b/pentestagent/llm/llm.py index e5a79f5..b2a9c9f 100644 --- a/pentestagent/llm/llm.py +++ b/pentestagent/llm/llm.py @@ -363,8 +363,8 @@ class LLM: "claude-sonnet-4-20250514", "claude-opus-4-20250514", # Google - "gemini-2.5-pro", - "gemini-2.5-flash", + "gemini/gemini-2.5-pro", + "gemini/gemini-2.5-flash", # Others via LiteLLM "ollama/llama3", "ollama/mixtral",