feat: model registry and capabilities for multi-provider support (#2158)

* feat: Implement model registry and capabilities for multi-provider support

- Added ModelRegistry to manage available models and their capabilities.
- Introduced ModelProvider enum for different LLM providers.
- Created ModelCapabilities dataclass to define model features.
- Implemented methods to load models based on API keys and settings.
- Added utility functions for model management in model_utils.py.
- Updated settings.py to include provider-specific API keys.
- Refactored LLM classes (Anthropic, OpenAI, Google, etc.) to utilize new model registry.
- Enhanced utility functions to handle token limits and model validation.
- Improved code structure and logging for better maintainability.

* feat: Add model selection feature with API integration and UI component

* feat: Add model selection and default model functionality in agent management

* test: Update assertions and formatting in stream processing tests

* refactor(llm): Standardize model identifier to model_id

* fix tests

---------

Co-authored-by: Alex <a@tushynski.me>
This commit is contained in:
Siddhant Rai
2025-11-14 16:43:19 +05:30
committed by GitHub
parent fbf7cf874b
commit 3f7de867cc
54 changed files with 1388 additions and 226 deletions

View File

@@ -1,11 +1,14 @@
import sys
import types
import pytest
class _FakeCompletion:
def __init__(self, text):
self.completion = text
class _FakeCompletions:
def __init__(self):
self.last_kwargs = None
@@ -17,6 +20,7 @@ class _FakeCompletions:
return self._stream
return _FakeCompletion("final")
class _FakeAnthropic:
def __init__(self, api_key=None):
self.api_key = api_key
@@ -29,9 +33,19 @@ def patch_anthropic(monkeypatch):
fake.Anthropic = _FakeAnthropic
fake.HUMAN_PROMPT = "<HUMAN>"
fake.AI_PROMPT = "<AI>"
modules_to_remove = [key for key in sys.modules if key.startswith("anthropic")]
for key in modules_to_remove:
sys.modules.pop(key, None)
sys.modules["anthropic"] = fake
if "application.llm.anthropic" in sys.modules:
del sys.modules["application.llm.anthropic"]
yield
sys.modules.pop("anthropic", None)
if "application.llm.anthropic" in sys.modules:
del sys.modules["application.llm.anthropic"]
def test_anthropic_raw_gen_builds_prompt_and_returns_completion():
@@ -42,7 +56,9 @@ def test_anthropic_raw_gen_builds_prompt_and_returns_completion():
{"content": "ctx"},
{"content": "q"},
]
out = llm._raw_gen(llm, model="claude-2", messages=msgs, stream=False, max_tokens=55)
out = llm._raw_gen(
llm, model="claude-2", messages=msgs, stream=False, max_tokens=55
)
assert out == "final"
last = llm.anthropic.completions.last_kwargs
assert last["model"] == "claude-2"
@@ -59,7 +75,8 @@ def test_anthropic_raw_gen_stream_yields_chunks():
{"content": "ctx"},
{"content": "q"},
]
gen = llm._raw_gen_stream(llm, model="claude", messages=msgs, stream=True, max_tokens=10)
gen = llm._raw_gen_stream(
llm, model="claude", messages=msgs, stream=True, max_tokens=10
)
chunks = list(gen)
assert chunks == ["s1", "s2"]