diff --git a/application/api/answer/routes.py b/application/api/answer/routes.py
index 34e6abca..a2e2d1af 100644
--- a/application/api/answer/routes.py
+++ b/application/api/answer/routes.py
@@ -115,8 +115,9 @@ def is_azure_configured():
def save_conversation(
- conversation_id, question, response, source_log_docs, tool_calls, llm, index=None
+ conversation_id, question, response, source_log_docs, tool_calls, llm, index=None, api_key=None
):
+ current_time = datetime.datetime.now(datetime.timezone.utc)
if conversation_id is not None and index is not None:
conversations_collection.update_one(
{"_id": ObjectId(conversation_id), f"queries.{index}": {"$exists": True}},
@@ -126,6 +127,7 @@ def save_conversation(
f"queries.{index}.response": response,
f"queries.{index}.sources": source_log_docs,
f"queries.{index}.tool_calls": tool_calls,
+ f"queries.{index}.timestamp": current_time
}
},
)
@@ -144,6 +146,7 @@ def save_conversation(
"response": response,
"sources": source_log_docs,
"tool_calls": tool_calls,
+ "timestamp": current_time
}
}
},
@@ -168,21 +171,25 @@ def save_conversation(
]
completion = llm.gen(model=gpt_model, messages=messages_summary, max_tokens=30)
- conversation_id = conversations_collection.insert_one(
- {
- "user": "local",
- "date": datetime.datetime.utcnow(),
- "name": completion,
- "queries": [
- {
- "prompt": question,
- "response": response,
- "sources": source_log_docs,
- "tool_calls": tool_calls,
- }
- ],
- }
- ).inserted_id
+ conversation_data = {
+ "user": "local",
+ "date": datetime.datetime.utcnow(),
+ "name": completion,
+ "queries": [
+ {
+ "prompt": question,
+ "response": response,
+ "sources": source_log_docs,
+ "tool_calls": tool_calls,
+ "timestamp": current_time
+ }
+ ],
+ }
+ if api_key:
+ api_key_doc = api_key_collection.find_one({"key": api_key})
+ if api_key_doc:
+ conversation_data["api_key"] = api_key_doc["key"]
+ conversation_id = conversations_collection.insert_one(conversation_data).inserted_id
return conversation_id
@@ -197,11 +204,15 @@ def get_prompt(prompt_id):
prompt = prompts_collection.find_one({"_id": ObjectId(prompt_id)})["content"]
return prompt
-
def complete_stream(
- question, retriever, conversation_id, user_api_key, isNoneDoc=False, index=None
+ question,
+ retriever,
+ conversation_id,
+ user_api_key,
+ isNoneDoc=False,
+ index=None,
+ should_save_conversation=True
):
-
try:
response_full = ""
source_log_docs = []
@@ -232,9 +243,12 @@ def complete_stream(
doc["source"] = "None"
llm = LLMCreator.create_llm(
- settings.LLM_NAME, api_key=settings.API_KEY, user_api_key=user_api_key
+ settings.LLM_NAME,
+ api_key=settings.API_KEY,
+ user_api_key=user_api_key
)
- if user_api_key is None:
+
+ if should_save_conversation:
conversation_id = save_conversation(
conversation_id,
question,
@@ -243,10 +257,14 @@ def complete_stream(
tool_calls,
llm,
index,
+ api_key=user_api_key
)
- # send data.type = "end" to indicate that the stream has ended as json
- data = json.dumps({"type": "id", "id": str(conversation_id)})
- yield f"data: {data}\n\n"
+ else:
+ conversation_id = None
+
+ # send data.type = "end" to indicate that the stream has ended as json
+ data = json.dumps({"type": "id", "id": str(conversation_id)})
+ yield f"data: {data}\n\n"
retriever_params = retriever.get_params()
user_logs_collection.insert_one(
@@ -309,6 +327,9 @@ class Stream(Resource):
"index": fields.Integer(
required=False, description="The position where query is to be updated"
),
+ "save_conversation": fields.Boolean(
+ required=False, default=True, description="Flag to save conversation"
+ ),
},
)
@@ -323,6 +344,8 @@ class Stream(Resource):
if missing_fields:
return missing_fields
+ save_conv = data.get("save_conversation", True)
+
try:
question = data["question"]
history = limit_chat_history(
@@ -381,6 +404,7 @@ class Stream(Resource):
user_api_key=user_api_key,
isNoneDoc=data.get("isNoneDoc"),
index=index,
+ should_save_conversation=save_conv,
),
mimetype="text/event-stream",
)
diff --git a/application/api/user/routes.py b/application/api/user/routes.py
index f71ab3dc..6204ada4 100644
--- a/application/api/user/routes.py
+++ b/application/api/user/routes.py
@@ -106,11 +106,14 @@ class DeleteAllConversations(Resource):
@user_ns.route("/api/get_conversations")
class GetConversations(Resource):
@api.doc(
- description="Retrieve a list of the latest 30 conversations",
+ description="Retrieve a list of the latest 30 conversations (excluding API key conversations)",
)
def get(self):
try:
- conversations = conversations_collection.find().sort("date", -1).limit(30)
+ conversations = conversations_collection.find(
+ {"api_key": {"$exists": False}}
+ ).sort("date", -1).limit(30)
+
list_conversations = [
{"id": str(conversation["_id"]), "name": conversation["name"]}
for conversation in conversations
@@ -213,17 +216,34 @@ class SubmitFeedback(Resource):
return missing_fields
try:
- conversations_collection.update_one(
- {
- "_id": ObjectId(data["conversation_id"]),
- f"queries.{data['question_index']}": {"$exists": True},
- },
- {
- "$set": {
- f"queries.{data['question_index']}.feedback": data["feedback"]
- }
- },
- )
+ if data["feedback"] is None:
+ # Remove feedback and feedback_timestamp if feedback is null
+ conversations_collection.update_one(
+ {
+ "_id": ObjectId(data["conversation_id"]),
+ f"queries.{data['question_index']}": {"$exists": True},
+ },
+ {
+ "$unset": {
+ f"queries.{data['question_index']}.feedback": "",
+ f"queries.{data['question_index']}.feedback_timestamp": ""
+ }
+ },
+ )
+ else:
+ # Set feedback and feedback_timestamp if feedback has a value
+ conversations_collection.update_one(
+ {
+ "_id": ObjectId(data["conversation_id"]),
+ f"queries.{data['question_index']}": {"$exists": True},
+ },
+ {
+ "$set": {
+ f"queries.{data['question_index']}.feedback": data["feedback"],
+ f"queries.{data['question_index']}.feedback_timestamp": datetime.datetime.now(datetime.timezone.utc)
+ }
+ },
+ )
except Exception as err:
current_app.logger.error(f"Error submitting feedback: {err}")
@@ -1186,21 +1206,12 @@ class GetMessageAnalytics(Resource):
get_message_analytics_model = api.model(
"GetMessageAnalyticsModel",
{
- "api_key_id": fields.String(
- required=False,
- description="API Key ID",
- ),
+ "api_key_id": fields.String(required=False, description="API Key ID"),
"filter_option": fields.String(
required=False,
description="Filter option for analytics",
default="last_30_days",
- enum=[
- "last_hour",
- "last_24_hour",
- "last_7_days",
- "last_15_days",
- "last_30_days",
- ],
+ enum=["last_hour", "last_24_hour", "last_7_days", "last_15_days", "last_30_days"],
),
},
)
@@ -1221,42 +1232,21 @@ class GetMessageAnalytics(Resource):
except Exception as err:
current_app.logger.error(f"Error getting API key: {err}")
return make_response(jsonify({"success": False}), 400)
+
end_date = datetime.datetime.now(datetime.timezone.utc)
if filter_option == "last_hour":
start_date = end_date - datetime.timedelta(hours=1)
group_format = "%Y-%m-%d %H:%M:00"
- group_stage = {
- "$group": {
- "_id": {
- "minute": {
- "$dateToString": {"format": group_format, "date": "$date"}
- }
- },
- "total_messages": {"$sum": 1},
- }
- }
-
elif filter_option == "last_24_hour":
start_date = end_date - datetime.timedelta(hours=24)
group_format = "%Y-%m-%d %H:00"
- group_stage = {
- "$group": {
- "_id": {
- "hour": {
- "$dateToString": {"format": group_format, "date": "$date"}
- }
- },
- "total_messages": {"$sum": 1},
- }
- }
-
else:
if filter_option in ["last_7_days", "last_15_days", "last_30_days"]:
filter_days = (
- 6
- if filter_option == "last_7_days"
- else (14 if filter_option == "last_15_days" else 29)
+ 6 if filter_option == "last_7_days"
+ else 14 if filter_option == "last_15_days"
+ else 29
)
else:
return make_response(
@@ -1264,36 +1254,44 @@ class GetMessageAnalytics(Resource):
)
start_date = end_date - datetime.timedelta(days=filter_days)
start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
- end_date = end_date.replace(
- hour=23, minute=59, second=59, microsecond=999999
- )
+ end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999)
group_format = "%Y-%m-%d"
- group_stage = {
- "$group": {
- "_id": {
- "day": {
- "$dateToString": {"format": group_format, "date": "$date"}
- }
- },
- "total_messages": {"$sum": 1},
- }
- }
try:
- match_stage = {
- "$match": {
- "date": {"$gte": start_date, "$lte": end_date},
- }
- }
- if api_key:
- match_stage["$match"]["api_key"] = api_key
- message_data = conversations_collection.aggregate(
- [
- match_stage,
- group_stage,
- {"$sort": {"_id": 1}},
- ]
- )
+ pipeline = [
+ # Initial match for API key if provided
+ {
+ "$match": {
+ "api_key": api_key if api_key else {"$exists": False}
+ }
+ },
+ {"$unwind": "$queries"},
+ # Match queries within the time range
+ {
+ "$match": {
+ "queries.timestamp": {
+ "$gte": start_date,
+ "$lte": end_date
+ }
+ }
+ },
+ # Group by formatted timestamp
+ {
+ "$group": {
+ "_id": {
+ "$dateToString": {
+ "format": group_format,
+ "date": "$queries.timestamp"
+ }
+ },
+ "count": {"$sum": 1}
+ }
+ },
+ # Sort by timestamp
+ {"$sort": {"_id": 1}}
+ ]
+
+ message_data = conversations_collection.aggregate(pipeline)
if filter_option == "last_hour":
intervals = generate_minute_range(start_date, end_date)
@@ -1305,12 +1303,7 @@ class GetMessageAnalytics(Resource):
daily_messages = {interval: 0 for interval in intervals}
for entry in message_data:
- if filter_option == "last_hour":
- daily_messages[entry["_id"]["minute"]] = entry["total_messages"]
- elif filter_option == "last_24_hour":
- daily_messages[entry["_id"]["hour"]] = entry["total_messages"]
- else:
- daily_messages[entry["_id"]["day"]] = entry["total_messages"]
+ daily_messages[entry["_id"]] = entry["count"]
except Exception as err:
current_app.logger.error(f"Error getting message analytics: {err}")
@@ -1358,6 +1351,7 @@ class GetTokenAnalytics(Resource):
except Exception as err:
current_app.logger.error(f"Error getting API key: {err}")
return make_response(jsonify({"success": False}), 400)
+
end_date = datetime.datetime.now(datetime.timezone.utc)
if filter_option == "last_hour":
@@ -1378,7 +1372,6 @@ class GetTokenAnalytics(Resource):
},
}
}
-
elif filter_option == "last_24_hour":
start_date = end_date - datetime.timedelta(hours=24)
group_format = "%Y-%m-%d %H:00"
@@ -1397,7 +1390,6 @@ class GetTokenAnalytics(Resource):
},
}
}
-
else:
if filter_option in ["last_7_days", "last_15_days", "last_30_days"]:
filter_days = (
@@ -1439,6 +1431,8 @@ class GetTokenAnalytics(Resource):
}
if api_key:
match_stage["$match"]["api_key"] = api_key
+ else:
+ match_stage["$match"]["api_key"] = {"$exists": False}
token_usage_data = token_usage_collection.aggregate(
[
@@ -1517,11 +1511,11 @@ class GetFeedbackAnalytics(Resource):
if filter_option == "last_hour":
start_date = end_date - datetime.timedelta(hours=1)
group_format = "%Y-%m-%d %H:%M:00"
- date_field = {"$dateToString": {"format": group_format, "date": "$date"}}
+ date_field = {"$dateToString": {"format": group_format, "date": "$queries.feedback_timestamp"}}
elif filter_option == "last_24_hour":
start_date = end_date - datetime.timedelta(hours=24)
group_format = "%Y-%m-%d %H:00"
- date_field = {"$dateToString": {"format": group_format, "date": "$date"}}
+ date_field = {"$dateToString": {"format": group_format, "date": "$queries.feedback_timestamp"}}
else:
if filter_option in ["last_7_days", "last_15_days", "last_30_days"]:
filter_days = (
@@ -1539,17 +1533,19 @@ class GetFeedbackAnalytics(Resource):
hour=23, minute=59, second=59, microsecond=999999
)
group_format = "%Y-%m-%d"
- date_field = {"$dateToString": {"format": group_format, "date": "$date"}}
+ date_field = {"$dateToString": {"format": group_format, "date": "$queries.feedback_timestamp"}}
try:
match_stage = {
"$match": {
- "date": {"$gte": start_date, "$lte": end_date},
- "queries": {"$exists": True, "$ne": []},
+ "queries.feedback_timestamp": {"$gte": start_date, "$lte": end_date},
+ "queries.feedback": {"$exists": True}
}
}
if api_key:
match_stage["$match"]["api_key"] = api_key
+ else:
+ match_stage["$match"]["api_key"] = {"$exists": False}
# Unwind the queries array to process each query separately
pipeline = [
diff --git a/extensions/react-widget/src/components/DocsGPTWidget.tsx b/extensions/react-widget/src/components/DocsGPTWidget.tsx
index 8aa2e0e6..142853e2 100644
--- a/extensions/react-widget/src/components/DocsGPTWidget.tsx
+++ b/extensions/react-widget/src/components/DocsGPTWidget.tsx
@@ -6,8 +6,8 @@ import { PaperPlaneIcon, RocketIcon, ExclamationTriangleIcon, Cross2Icon } from
import { FEEDBACK, MESSAGE_TYPE, Query, Status, WidgetCoreProps, WidgetProps } from '../types/index';
import { fetchAnswerStreaming, sendFeedback } from '../requests/streamingApi';
import { ThemeProvider } from 'styled-components';
-import Like from "../assets/like.svg"
-import Dislike from "../assets/dislike.svg"
+import Like from '../assets/like.svg';
+import Dislike from '../assets/dislike.svg';
import MarkdownIt from 'markdown-it';
const themes = {
@@ -592,8 +592,8 @@ export const DocsGPTWidget = (props: WidgetProps) => {
)
}
export const WidgetCore = ({
- apiHost = 'https://gptcloud.arc53.com',
- apiKey = "74039c6d-bff7-44ce-ae55-2973cbf13837",
+ apiHost = 'http://localhost:7091',
+ apiKey = "1a31e931-90c9-4fb7-af99-2cba70a0f3ee",
//apiKey = '82962c9a-aa77-4152-94e5-a4f84fd44c6a',
avatar = 'https://d3dg1063dc54p9.cloudfront.net/cute-docsgpt.png',
title = 'Get AI assistance',
@@ -655,32 +655,59 @@ export const WidgetCore = ({
}, [queries.length, queries[queries.length - 1]?.response]);
async function handleFeedback(feedback: FEEDBACK, index: number) {
- let query = queries[index]
- if (!query.response)
+ let query = queries[index];
+ if (!query.response || !conversationId) {
+ console.log("Cannot submit feedback: missing response or conversation ID");
return;
- if (query.feedback != feedback) {
- sendFeedback({
+ }
+
+ // If clicking the same feedback button that's already active, remove the feedback by sending null
+ if (query.feedback === feedback) {
+ try {
+ const response = await sendFeedback({
+ question: query.prompt,
+ answer: query.response,
+ feedback: null,
+ apikey: apiKey,
+ conversation_id: conversationId,
+ question_index: index,
+ }, apiHost);
+
+ if (response.status === 200) {
+ const updatedQuery = { ...query };
+ delete updatedQuery.feedback;
+ setQueries((prev: Query[]) =>
+ prev.map((q, i) => (i === index ? updatedQuery : q))
+ );
+ }
+ } catch (err) {
+ console.error("Failed to submit feedback:", err);
+ }
+ return;
+ }
+
+ try {
+ const response = await sendFeedback({
question: query.prompt,
answer: query.response,
feedback: feedback,
- apikey: apiKey
- }, apiHost)
- .then(res => {
- if (res.status == 200) {
- query.feedback = feedback;
- setQueries((prev: Query[]) => {
- return prev.map((q, i) => (i === index ? query : q));
- });
- }
- })
- .catch(err => console.log("Connection failed", err))
- }
- else {
- delete query.feedback;
- setQueries((prev: Query[]) => {
- return prev.map((q, i) => (i === index ? query : q));
- });
+ apikey: apiKey,
+ conversation_id: conversationId,
+ question_index: index,
+ }, apiHost);
+ if (response.status === 200) {
+ setQueries((prev: Query[]) => {
+ return prev.map((q, i) => {
+ if (i === index) {
+ return { ...q, feedback: feedback };
+ }
+ return q;
+ });
+ });
+ }
+ } catch (err) {
+ console.error("Failed to submit feedback:", err);
}
}
@@ -808,20 +835,34 @@ export const WidgetCore = ({
{collectFeedback &&