mirror of
https://github.com/arc53/DocsGPT.git
synced 2025-11-29 16:43:16 +00:00
test: add agent test coverage and standardize test suite (#2051)
- Add 104 comprehensive tests for agent system - Integrate agent tests into CI/CD pipeline - Standardize tests with @pytest.mark.unit markers - Fix cross-platform path compatibility - Clean up unused imports and dependencies
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
import types
|
||||
import pytest
|
||||
|
||||
import pytest
|
||||
from application.llm.openai import OpenAILLM
|
||||
|
||||
|
||||
@@ -42,16 +42,16 @@ class FakeChatCompletions:
|
||||
|
||||
def create(self, **kwargs):
|
||||
self.last_kwargs = kwargs
|
||||
# default non-streaming: return content
|
||||
if not kwargs.get("stream"):
|
||||
return FakeChatCompletions._Response(choices=[
|
||||
FakeChatCompletions._Choice(content="hello world")
|
||||
])
|
||||
# streaming: yield line objects each with choices[0].delta.content
|
||||
return FakeChatCompletions._Response(lines=[
|
||||
FakeChatCompletions._StreamLine(["part1"]),
|
||||
FakeChatCompletions._StreamLine(["part2"]),
|
||||
])
|
||||
return FakeChatCompletions._Response(
|
||||
choices=[FakeChatCompletions._Choice(content="hello world")]
|
||||
)
|
||||
return FakeChatCompletions._Response(
|
||||
lines=[
|
||||
FakeChatCompletions._StreamLine(["part1"]),
|
||||
FakeChatCompletions._StreamLine(["part2"]),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class FakeClient:
|
||||
@@ -71,16 +71,29 @@ def openai_llm(monkeypatch):
|
||||
return llm
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_clean_messages_openai_variants(openai_llm):
|
||||
messages = [
|
||||
{"role": "system", "content": "sys"},
|
||||
{"role": "model", "content": "asst"},
|
||||
{"role": "user", "content": [
|
||||
{"text": "hello"},
|
||||
{"function_call": {"call_id": "c1", "name": "fn", "args": {"a": 1}}},
|
||||
{"function_response": {"call_id": "c1", "name": "fn", "response": {"result": 42}}},
|
||||
{"type": "image_url", "image_url": {"url": "data:image/png;base64,AAA"}},
|
||||
]},
|
||||
{"role": "model", "content": "asst"},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"text": "hello"},
|
||||
{"function_call": {"call_id": "c1", "name": "fn", "args": {"a": 1}}},
|
||||
{
|
||||
"function_response": {
|
||||
"call_id": "c1",
|
||||
"name": "fn",
|
||||
"response": {"result": 42},
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": "data:image/png;base64,AAA"},
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
cleaned = openai_llm._clean_messages_openai(messages)
|
||||
@@ -89,17 +102,27 @@ def test_clean_messages_openai_variants(openai_llm):
|
||||
assert roles.count("assistant") >= 1
|
||||
assert any(m["role"] == "tool" for m in cleaned)
|
||||
|
||||
assert any(isinstance(m["content"], list) and any(
|
||||
part.get("type") == "image_url" for part in m["content"] if isinstance(part, dict)
|
||||
) for m in cleaned if m["role"] == "user")
|
||||
assert any(
|
||||
isinstance(m["content"], list)
|
||||
and any(
|
||||
part.get("type") == "image_url"
|
||||
for part in m["content"]
|
||||
if isinstance(part, dict)
|
||||
)
|
||||
for m in cleaned
|
||||
if m["role"] == "user"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_raw_gen_calls_openai_client_and_returns_content(openai_llm):
|
||||
msgs = [
|
||||
{"role": "system", "content": "sys"},
|
||||
{"role": "user", "content": "hello"},
|
||||
]
|
||||
content = openai_llm._raw_gen(openai_llm, model="gpt-4o", messages=msgs, stream=False)
|
||||
content = openai_llm._raw_gen(
|
||||
openai_llm, model="gpt-4o", messages=msgs, stream=False
|
||||
)
|
||||
assert content == "hello world"
|
||||
|
||||
passed = openai_llm.client.chat.completions.last_kwargs
|
||||
@@ -108,16 +131,20 @@ def test_raw_gen_calls_openai_client_and_returns_content(openai_llm):
|
||||
assert passed["stream"] is False
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_raw_gen_stream_yields_chunks(openai_llm):
|
||||
msgs = [
|
||||
{"role": "user", "content": "hi"},
|
||||
]
|
||||
gen = openai_llm._raw_gen_stream(openai_llm, model="gpt", messages=msgs, stream=True)
|
||||
gen = openai_llm._raw_gen_stream(
|
||||
openai_llm, model="gpt", messages=msgs, stream=True
|
||||
)
|
||||
chunks = list(gen)
|
||||
assert "part1" in "".join(chunks)
|
||||
assert "part2" in "".join(chunks)
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_prepare_structured_output_format_enforces_required_and_strict(openai_llm):
|
||||
schema = {
|
||||
"type": "object",
|
||||
@@ -134,8 +161,8 @@ def test_prepare_structured_output_format_enforces_required_and_strict(openai_ll
|
||||
assert js["schema"]["additionalProperties"] is False
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
def test_prepare_messages_with_attachments_image_and_pdf(openai_llm, monkeypatch):
|
||||
|
||||
monkeypatch.setattr(openai_llm, "_get_base64_image", lambda att: "AAA=")
|
||||
monkeypatch.setattr(openai_llm, "_upload_file_to_openai", lambda att: "file_xyz")
|
||||
|
||||
@@ -146,12 +173,15 @@ def test_prepare_messages_with_attachments_image_and_pdf(openai_llm, monkeypatch
|
||||
]
|
||||
out = openai_llm.prepare_messages_with_attachments(messages, attachments)
|
||||
|
||||
# last user message should have list content with text and two attachments
|
||||
user_msg = next(m for m in out if m["role"] == "user")
|
||||
assert isinstance(user_msg["content"], list)
|
||||
types_in_content = [p.get("type") for p in user_msg["content"] if isinstance(p, dict)]
|
||||
types_in_content = [
|
||||
p.get("type") for p in user_msg["content"] if isinstance(p, dict)
|
||||
]
|
||||
assert "image_url" in types_in_content or any(
|
||||
isinstance(p, dict) and p.get("image_url") for p in user_msg["content"]
|
||||
)
|
||||
assert any(isinstance(p, dict) and p.get("file", {}).get("file_id") == "file_xyz" for p in user_msg["content"])
|
||||
|
||||
assert any(
|
||||
isinstance(p, dict) and p.get("file", {}).get("file_id") == "file_xyz"
|
||||
for p in user_msg["content"]
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user