mirror of
https://github.com/arc53/DocsGPT.git
synced 2025-11-29 16:43:16 +00:00
* feat: Implement model registry and capabilities for multi-provider support - Added ModelRegistry to manage available models and their capabilities. - Introduced ModelProvider enum for different LLM providers. - Created ModelCapabilities dataclass to define model features. - Implemented methods to load models based on API keys and settings. - Added utility functions for model management in model_utils.py. - Updated settings.py to include provider-specific API keys. - Refactored LLM classes (Anthropic, OpenAI, Google, etc.) to utilize new model registry. - Enhanced utility functions to handle token limits and model validation. - Improved code structure and logging for better maintainability. * feat: Add model selection feature with API integration and UI component * feat: Add model selection and default model functionality in agent management * test: Update assertions and formatting in stream processing tests * refactor(llm): Standardize model identifier to model_id * fix tests --------- Co-authored-by: Alex <a@tushynski.me>
83 lines
2.2 KiB
Python
83 lines
2.2 KiB
Python
import sys
|
|
import types
|
|
|
|
import pytest
|
|
|
|
|
|
class _FakeCompletion:
|
|
def __init__(self, text):
|
|
self.completion = text
|
|
|
|
|
|
class _FakeCompletions:
|
|
def __init__(self):
|
|
self.last_kwargs = None
|
|
self._stream = [_FakeCompletion("s1"), _FakeCompletion("s2")]
|
|
|
|
def create(self, **kwargs):
|
|
self.last_kwargs = kwargs
|
|
if kwargs.get("stream"):
|
|
return self._stream
|
|
return _FakeCompletion("final")
|
|
|
|
|
|
class _FakeAnthropic:
|
|
def __init__(self, api_key=None):
|
|
self.api_key = api_key
|
|
self.completions = _FakeCompletions()
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def patch_anthropic(monkeypatch):
|
|
fake = types.ModuleType("anthropic")
|
|
fake.Anthropic = _FakeAnthropic
|
|
fake.HUMAN_PROMPT = "<HUMAN>"
|
|
fake.AI_PROMPT = "<AI>"
|
|
|
|
modules_to_remove = [key for key in sys.modules if key.startswith("anthropic")]
|
|
for key in modules_to_remove:
|
|
sys.modules.pop(key, None)
|
|
sys.modules["anthropic"] = fake
|
|
|
|
if "application.llm.anthropic" in sys.modules:
|
|
del sys.modules["application.llm.anthropic"]
|
|
yield
|
|
|
|
sys.modules.pop("anthropic", None)
|
|
if "application.llm.anthropic" in sys.modules:
|
|
del sys.modules["application.llm.anthropic"]
|
|
|
|
|
|
def test_anthropic_raw_gen_builds_prompt_and_returns_completion():
|
|
from application.llm.anthropic import AnthropicLLM
|
|
|
|
llm = AnthropicLLM(api_key="k")
|
|
msgs = [
|
|
{"content": "ctx"},
|
|
{"content": "q"},
|
|
]
|
|
out = llm._raw_gen(
|
|
llm, model="claude-2", messages=msgs, stream=False, max_tokens=55
|
|
)
|
|
assert out == "final"
|
|
last = llm.anthropic.completions.last_kwargs
|
|
assert last["model"] == "claude-2"
|
|
assert last["max_tokens_to_sample"] == 55
|
|
assert last["prompt"].startswith("<HUMAN>") and last["prompt"].endswith("<AI>")
|
|
assert "### Context" in last["prompt"] and "### Question" in last["prompt"]
|
|
|
|
|
|
def test_anthropic_raw_gen_stream_yields_chunks():
|
|
from application.llm.anthropic import AnthropicLLM
|
|
|
|
llm = AnthropicLLM(api_key="k")
|
|
msgs = [
|
|
{"content": "ctx"},
|
|
{"content": "q"},
|
|
]
|
|
gen = llm._raw_gen_stream(
|
|
llm, model="claude", messages=msgs, stream=True, max_tokens=10
|
|
)
|
|
chunks = list(gen)
|
|
assert chunks == ["s1", "s2"]
|