mirror of
https://github.com/arc53/DocsGPT.git
synced 2025-11-29 00:23:17 +00:00
change the method to save the whole conversation history instead of one msg
This commit is contained in:
@@ -1,7 +1,6 @@
|
||||
import sys
|
||||
import redis
|
||||
import time
|
||||
from datetime import datetime
|
||||
import json
|
||||
from application.core.settings import settings
|
||||
from application.utils import get_hash
|
||||
|
||||
@@ -19,7 +18,7 @@ def make_redis():
|
||||
db=settings.REDIS_DB,
|
||||
)
|
||||
|
||||
def gen_cache_key(messages, model="docgpt"):
|
||||
def gen_cache_key(*messages, model="docgpt"):
|
||||
"""
|
||||
Generate a unique cache key based on the latest user message and model.
|
||||
|
||||
@@ -40,17 +39,11 @@ def gen_cache_key(messages, model="docgpt"):
|
||||
Returns:
|
||||
str: A unique cache key generated by hashing the combined model name and latest user message.
|
||||
"""
|
||||
if not all(isinstance(msg, dict) for msg in messages):
|
||||
raise ValueError("All messages must be dictionaries.")
|
||||
|
||||
if not messages:
|
||||
raise ValueError("No messages found in the conversation to generate a cache key.")
|
||||
if not isinstance(messages, list):
|
||||
raise ValueError("Messages must be a list of dictionaries.")
|
||||
|
||||
latest_user_prompt = next((msg['content'] for msg in reversed(messages) if msg.get('role') == 'user'), None)
|
||||
if latest_user_prompt is None:
|
||||
raise ValueError("No user message found in the conversation to generate a cache key.")
|
||||
|
||||
combined = f"{model}_{latest_user_prompt}"
|
||||
messages_str = json.dumps(list(messages), sort_keys=True)
|
||||
combined = f"{model}_{messages_str}"
|
||||
cache_key = get_hash(combined)
|
||||
return cache_key
|
||||
|
||||
@@ -69,15 +62,16 @@ def gen_cache(func):
|
||||
|
||||
def wrapper(self, model, messages, *args, **kwargs):
|
||||
try:
|
||||
cache_key = gen_cache_key(messages=messages)
|
||||
cache_key = gen_cache_key(*messages)
|
||||
redis_client = make_redis()
|
||||
cached_response = redis_client.get(cache_key)
|
||||
|
||||
if cached_response:
|
||||
print(f"Cache hit for key: {cache_key}")
|
||||
return cached_response.decode('utf-8')
|
||||
|
||||
result = func(self, model, messages, *args, **kwargs)
|
||||
redis_client.set(cache_key, result, ex=3600)
|
||||
print(f"Cache saved for key: {cache_key}")
|
||||
|
||||
return result
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
@@ -100,16 +94,17 @@ def stream_cache(func):
|
||||
(self._raw_gen, decorators=decorators, model=model, messages=messages, stream=stream, *args, **kwargs
|
||||
"""
|
||||
def wrapper(self, model, messages, stream, *args, **kwargs):
|
||||
cache_key = gen_cache_key(messages=messages)
|
||||
cache_key = gen_cache_key(*messages)
|
||||
|
||||
try:
|
||||
# we are using lrange and rpush to simulate streaming
|
||||
redis_client = make_redis()
|
||||
cached_response = redis_client.lrange(cache_key, 0, -1)
|
||||
cached_response = redis_client.get(cache_key)
|
||||
if cached_response:
|
||||
#print(f"Cache hit for stream key: {cache_key}")
|
||||
print(f"Cache hit for stream key: {cache_key}")
|
||||
cached_response = json.loads(cached_response.decode('utf-8'))
|
||||
for chunk in cached_response:
|
||||
yield chunk.decode('utf-8')
|
||||
yield chunk
|
||||
# need to slow down the response to simulate streaming
|
||||
# because the cached response is instantaneous
|
||||
# and redis is using in-memory storage
|
||||
@@ -117,17 +112,17 @@ def stream_cache(func):
|
||||
return
|
||||
|
||||
result = func(self, model, messages, stream, *args, **kwargs)
|
||||
stream_cache_data = []
|
||||
|
||||
for chunk in result:
|
||||
redis_client.rpush(cache_key, chunk)
|
||||
stream_cache_data.append(chunk)
|
||||
yield chunk
|
||||
|
||||
# expire the cache after 30 minutes
|
||||
redis_client.expire(cache_key, 1800)
|
||||
redis_client.set(cache_key, json.dumps(stream_cache_data), ex=1800)
|
||||
print(f"Stream cache saved for key: {cache_key}")
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
yield "Error: No user message found in the conversation to generate a cache key."
|
||||
|
||||
return wrapper
|
||||
|
||||
return wrapper
|
||||
2
frontend/package-lock.json
generated
2
frontend/package-lock.json
generated
@@ -1675,7 +1675,7 @@
|
||||
"version": "18.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.0.tgz",
|
||||
"integrity": "sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==",
|
||||
"devOptional": true,
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@types/react": "*"
|
||||
}
|
||||
|
||||
12
results.txt
Normal file
12
results.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
Base URL:http://petstore.swagger.io,https://api.example.com
|
||||
Path1: /pets
|
||||
description: None
|
||||
parameters: []
|
||||
methods:
|
||||
get=A paged array of pets
|
||||
post=Null response
|
||||
Path2: /pets/{petId}
|
||||
description: None
|
||||
parameters: []
|
||||
methods:
|
||||
get=Expected response to a valid request
|
||||
Reference in New Issue
Block a user