feat: pass decoded_token to llm and retrievers

This commit is contained in:
Siddhant Rai
2025-03-18 23:46:02 +05:30
parent f4ab85a2bb
commit ab95d90284
9 changed files with 75 additions and 25 deletions

View File

@@ -5,7 +5,8 @@ from application.usage import gen_token_usage, stream_token_usage
class BaseLLM(ABC):
def __init__(self):
def __init__(self, decoded_token):
self.decoded_token = decoded_token
self.token_usage = {"prompt_tokens": 0, "generated_tokens": 0}
def _apply_decorator(self, method, decorators, *args, **kwargs):

View File

@@ -9,6 +9,7 @@ from application.llm.premai import PremAILLM
from application.llm.google_ai import GoogleLLM
from application.llm.novita import NovitaLLM
class LLMCreator:
llms = {
"openai": OpenAILLM,
@@ -21,12 +22,14 @@ class LLMCreator:
"premai": PremAILLM,
"groq": GroqLLM,
"google": GoogleLLM,
"novita": NovitaLLM
"novita": NovitaLLM,
}
@classmethod
def create_llm(cls, type, api_key, user_api_key, *args, **kwargs):
def create_llm(cls, type, api_key, user_api_key, decoded_token, *args, **kwargs):
llm_class = cls.llms.get(type.lower())
if not llm_class:
raise ValueError(f"No LLM class found for type {type}")
return llm_class(api_key, user_api_key, *args, **kwargs)
return llm_class(
api_key, user_api_key, decoded_token=decoded_token, *args, **kwargs
)