From 74845aed64fa87f7819dae410c280c91022dca95 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 18 May 2023 14:27:13 +0100 Subject: [PATCH] history init --- application/app.py | 25 +++++++++++-------- frontend/src/conversation/conversationApi.ts | 3 ++- .../src/conversation/conversationSlice.ts | 1 + 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/application/app.py b/application/app.py index d68c5b93..fa56c1d4 100644 --- a/application/app.py +++ b/application/app.py @@ -23,6 +23,7 @@ from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, + AIMessagePromptTemplate, ) from pymongo import MongoClient from werkzeug.utils import secure_filename @@ -107,6 +108,8 @@ def run_async_chain(chain, question, chat_history): result["answer"] = answer return result + + @celery.task(bind=True) def ingest(self, directory, formats, name_job, filename, user): @@ -164,16 +167,6 @@ def api_answer(): docsearch = FAISS.load_local(vectorstore, CohereEmbeddings(cohere_api_key=embeddings_key)) # create a prompt template - if history: - history = json.loads(history) - template_temp = template_hist.replace("{historyquestion}", history[0]).replace("{historyanswer}", - history[1]) - c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template_temp, - template_format="jinja2") - else: - c_prompt = PromptTemplate(input_variables=["summaries", "question"], template=template, - template_format="jinja2") - q_prompt = PromptTemplate(input_variables=["context", "question"], template=template_quest, template_format="jinja2") if settings.LLM_NAME == "openai_chat": @@ -182,6 +175,18 @@ def api_answer(): SystemMessagePromptTemplate.from_template(chat_combine_template), HumanMessagePromptTemplate.from_template("{question}") ] + if history: + tokens_current_history = 0 + tokens_max_history = 1000 + #count tokens in history + for i in history: + if "prompt" in i and "response" in i: + tokens_batch = llm.get_num_tokens(i["prompt"]) + llm.get_num_tokens(i["response"]) + if tokens_current_history + tokens_batch < tokens_max_history: + tokens_current_history += tokens_batch + messages_combine.append(HumanMessagePromptTemplate.from_template(i["prompt"])) + messages_combine.append(SystemMessagePromptTemplate.from_template(i["response"])) + p_chat_combine = ChatPromptTemplate.from_messages(messages_combine) elif settings.LLM_NAME == "openai": llm = OpenAI(openai_api_key=api_key, temperature=0) diff --git a/frontend/src/conversation/conversationApi.ts b/frontend/src/conversation/conversationApi.ts index c7320342..4d5bdfb7 100644 --- a/frontend/src/conversation/conversationApi.ts +++ b/frontend/src/conversation/conversationApi.ts @@ -7,6 +7,7 @@ export function fetchAnswerApi( question: string, apiKey: string, selectedDocs: Doc, + history: Array = [], ): Promise { let namePath = selectedDocs.name; if (selectedDocs.language === namePath) { @@ -37,7 +38,7 @@ export function fetchAnswerApi( question: question, api_key: apiKey, embeddings_key: apiKey, - history: localStorage.getItem('chatHistory'), + history: history, active_docs: docPath, }), }) diff --git a/frontend/src/conversation/conversationSlice.ts b/frontend/src/conversation/conversationSlice.ts index c728b9e0..a822c9bd 100644 --- a/frontend/src/conversation/conversationSlice.ts +++ b/frontend/src/conversation/conversationSlice.ts @@ -19,6 +19,7 @@ export const fetchAnswer = createAsyncThunk< question, state.preference.apiKey, state.preference.selectedDocs!, + state.conversation.queries, ); return answer; });