(fix) avoid stringifying list

This commit is contained in:
ManishMadan2882
2024-12-19 17:58:55 +05:30
parent 9096013e13
commit b19c14787e
5 changed files with 16 additions and 10 deletions

View File

@@ -324,7 +324,7 @@ class Stream(Resource):
try:
question = data["question"]
history = str(limit_chat_history(json.loads(data.get("history", []))))
history = limit_chat_history(json.loads(data.get("history", [])), gpt_model=gpt_model)
conversation_id = data.get("conversation_id")
prompt_id = data.get("prompt_id", "default")
@@ -455,7 +455,7 @@ class Answer(Resource):
try:
question = data["question"]
history = str(limit_chat_history(json.loads(data.get("history", []))))
history = limit_chat_history(json.loads(data.get("history", [])), gpt_model=gpt_model)
conversation_id = data.get("conversation_id")
prompt_id = data.get("prompt_id", "default")
chunks = int(data.get("chunks", 2))

View File

@@ -73,6 +73,7 @@ class BraveRetSearch(BaseRetriever):
if len(self.chat_history) > 1:
for i in self.chat_history:
if "prompt" in i and "response" in i:
messages_combine.append(
{"role": "user", "content": i["prompt"]}
)

View File

@@ -73,6 +73,7 @@ class ClassicRAG(BaseRetriever):
if len(self.chat_history) > 1:
for i in self.chat_history:
if "prompt" in i and "response" in i:
messages_combine.append(
{"role": "user", "content": i["prompt"]}
)
@@ -80,7 +81,7 @@ class ClassicRAG(BaseRetriever):
{"role": "system", "content": i["response"]}
)
messages_combine.append({"role": "user", "content": self.question})
llm = LLMCreator.create_llm(
settings.LLM_NAME, api_key=settings.API_KEY, user_api_key=self.user_api_key
)

View File

@@ -90,6 +90,7 @@ class DuckDuckSearch(BaseRetriever):
if len(self.chat_history) > 1:
for i in self.chat_history:
if "prompt" in i and "response" in i:
messages_combine.append(
{"role": "user", "content": i["prompt"]}
)

View File

@@ -54,13 +54,16 @@ def limit_chat_history(history, max_token_limit=None, gpt_model="docsgpt"):
from application.core.settings import settings
max_token_limit = (
max_token_limit
if max_token_limit
and max_token_limit < settings.MODEL_TOKEN_LIMITS.get(
gpt_model, settings.DEFAULT_MAX_HISTORY
max_token_limit
if max_token_limit and
max_token_limit < settings.MODEL_TOKEN_LIMITS.get(
gpt_model, settings.DEFAULT_MAX_HISTORY
)
else settings.MODEL_TOKEN_LIMITS.get(
gpt_model, settings.DEFAULT_MAX_HISTORY
)
)
else settings.MODEL_TOKEN_LIMITS.get(gpt_model, settings.DEFAULT_MAX_HISTORY)
)
if not history:
return []
@@ -78,5 +81,5 @@ def limit_chat_history(history, max_token_limit=None, gpt_model="docsgpt"):
trimmed_history.insert(0, message)
else:
break
return trimmed_history