mirror of
https://github.com/GH05TCREW/pentestagent.git
synced 2026-03-07 14:23:20 +00:00
fix: remove duplicate assistant message when content and tool_calls both present
In CrewOrchestrator.run(), when the LLM returns both content and tool_calls,
the code was appending two consecutive assistant messages to self._messages:
1. {"role": "assistant", "content": response.content}
2. {"role": "assistant", "content": ..., "tool_calls": [...]}
Back-to-back model-role messages cause Gemini (and technically any
OpenAI-compatible API) to reject the conversation on the next turn.
The fix is to drop the redundant bare-content append — the content is
already included in the combined message that follows.
Also update get_available_models() to list Gemini models with the
required gemini/ provider prefix so the helper reflects correct usage.
Fixes #8
This commit is contained in:
@@ -184,9 +184,6 @@ class CrewOrchestrator:
|
||||
# If there are tool calls, the content is "thinking" (reasoning before action)
|
||||
if response.content:
|
||||
yield {"phase": "thinking", "content": response.content}
|
||||
self._messages.append(
|
||||
{"role": "assistant", "content": response.content}
|
||||
)
|
||||
|
||||
def get_tc_name(tc):
|
||||
if hasattr(tc, "function"):
|
||||
|
||||
@@ -363,8 +363,8 @@ class LLM:
|
||||
"claude-sonnet-4-20250514",
|
||||
"claude-opus-4-20250514",
|
||||
# Google
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-flash",
|
||||
"gemini/gemini-2.5-pro",
|
||||
"gemini/gemini-2.5-flash",
|
||||
# Others via LiteLLM
|
||||
"ollama/llama3",
|
||||
"ollama/mixtral",
|
||||
|
||||
Reference in New Issue
Block a user