mirror of
https://github.com/arc53/DocsGPT.git
synced 2025-12-03 10:33:17 +00:00
feat: agents route replacing chatbots
- Removed API Keys tab from SettingsBar and adjusted tab layout. - Improved styling for tab scrolling buttons and gradient indicators. - Introduced AgentDetailsModal for displaying agent access details. - Updated Analytics component to fetch agent data and handle analytics for selected agent. - Refactored Logs component to accept agentId as a prop for filtering logs. - Enhanced type definitions for InputProps to include textSize. - Cleaned up unused imports and optimized component structure across various files.
This commit is contained in:
@@ -27,7 +27,7 @@ db = mongo["docsgpt"]
|
||||
conversations_collection = db["conversations"]
|
||||
sources_collection = db["sources"]
|
||||
prompts_collection = db["prompts"]
|
||||
api_key_collection = db["api_keys"]
|
||||
agents_collection = db["agents"]
|
||||
user_logs_collection = db["user_logs"]
|
||||
attachments_collection = db["attachments"]
|
||||
|
||||
@@ -87,18 +87,18 @@ def run_async_chain(chain, question, chat_history):
|
||||
|
||||
|
||||
def get_data_from_api_key(api_key):
|
||||
data = api_key_collection.find_one({"key": api_key})
|
||||
# # Raise custom exception if the API key is not found
|
||||
if data is None:
|
||||
raise Exception("Invalid API Key, please generate new key", 401)
|
||||
data = agents_collection.find_one({"key": api_key})
|
||||
if not data:
|
||||
raise Exception("Invalid API Key, please generate a new key", 401)
|
||||
|
||||
if "source" in data and isinstance(data["source"], DBRef):
|
||||
source_doc = db.dereference(data["source"])
|
||||
source = data.get("source")
|
||||
if isinstance(source, DBRef):
|
||||
source_doc = db.dereference(source)
|
||||
data["source"] = str(source_doc["_id"])
|
||||
if "retriever" in source_doc:
|
||||
data["retriever"] = source_doc["retriever"]
|
||||
data["retriever"] = source_doc.get("retriever", data.get("retriever"))
|
||||
else:
|
||||
data["source"] = {}
|
||||
|
||||
return data
|
||||
|
||||
|
||||
@@ -128,7 +128,7 @@ def save_conversation(
|
||||
llm,
|
||||
decoded_token,
|
||||
index=None,
|
||||
api_key=None
|
||||
api_key=None,
|
||||
):
|
||||
current_time = datetime.datetime.now(datetime.timezone.utc)
|
||||
if conversation_id is not None and index is not None:
|
||||
@@ -202,7 +202,7 @@ def save_conversation(
|
||||
],
|
||||
}
|
||||
if api_key:
|
||||
api_key_doc = api_key_collection.find_one({"key": api_key})
|
||||
api_key_doc = agents_collection.find_one({"key": api_key})
|
||||
if api_key_doc:
|
||||
conversation_data["api_key"] = api_key_doc["key"]
|
||||
conversation_id = conversations_collection.insert_one(
|
||||
@@ -241,7 +241,9 @@ def complete_stream(
|
||||
|
||||
if attachments:
|
||||
attachment_ids = [attachment["id"] for attachment in attachments]
|
||||
logger.info(f"Processing request with {len(attachments)} attachments: {attachment_ids}")
|
||||
logger.info(
|
||||
f"Processing request with {len(attachments)} attachments: {attachment_ids}"
|
||||
)
|
||||
|
||||
answer = agent.gen(query=question, retriever=retriever)
|
||||
|
||||
@@ -294,7 +296,7 @@ def complete_stream(
|
||||
llm,
|
||||
decoded_token,
|
||||
index,
|
||||
api_key=user_api_key
|
||||
api_key=user_api_key,
|
||||
)
|
||||
else:
|
||||
conversation_id = None
|
||||
@@ -366,7 +368,9 @@ class Stream(Resource):
|
||||
required=False, description="Index of the query to update"
|
||||
),
|
||||
"save_conversation": fields.Boolean(
|
||||
required=False, default=True, description="Whether to save the conversation"
|
||||
required=False,
|
||||
default=True,
|
||||
description="Whether to save the conversation",
|
||||
),
|
||||
"attachments": fields.List(
|
||||
fields.String, required=False, description="List of attachment IDs"
|
||||
@@ -400,6 +404,7 @@ class Stream(Resource):
|
||||
chunks = int(data.get("chunks", 2))
|
||||
token_limit = data.get("token_limit", settings.DEFAULT_MAX_HISTORY)
|
||||
retriever_name = data.get("retriever", "classic")
|
||||
agent_type = settings.AGENT_NAME
|
||||
|
||||
if "api_key" in data:
|
||||
data_key = get_data_from_api_key(data["api_key"])
|
||||
@@ -408,6 +413,7 @@ class Stream(Resource):
|
||||
source = {"active_docs": data_key.get("source")}
|
||||
retriever_name = data_key.get("retriever", retriever_name)
|
||||
user_api_key = data["api_key"]
|
||||
agent_type = data_key.get("agent_type", agent_type)
|
||||
decoded_token = {"sub": data_key.get("user")}
|
||||
|
||||
elif "active_docs" in data:
|
||||
@@ -423,8 +429,10 @@ class Stream(Resource):
|
||||
|
||||
if not decoded_token:
|
||||
return make_response({"error": "Unauthorized"}, 401)
|
||||
|
||||
attachments = get_attachments_content(attachment_ids, decoded_token.get("sub"))
|
||||
|
||||
attachments = get_attachments_content(
|
||||
attachment_ids, decoded_token.get("sub")
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"/stream - request_data: {data}, source: {source}, attachments: {len(attachments)}",
|
||||
@@ -436,7 +444,7 @@ class Stream(Resource):
|
||||
chunks = 0
|
||||
|
||||
agent = AgentCreator.create_agent(
|
||||
settings.AGENT_NAME,
|
||||
agent_type,
|
||||
endpoint="stream",
|
||||
llm_name=settings.LLM_NAME,
|
||||
gpt_model=gpt_model,
|
||||
@@ -552,6 +560,7 @@ class Answer(Resource):
|
||||
chunks = int(data.get("chunks", 2))
|
||||
token_limit = data.get("token_limit", settings.DEFAULT_MAX_HISTORY)
|
||||
retriever_name = data.get("retriever", "classic")
|
||||
agent_type = settings.AGENT_NAME
|
||||
|
||||
if "api_key" in data:
|
||||
data_key = get_data_from_api_key(data["api_key"])
|
||||
@@ -560,6 +569,7 @@ class Answer(Resource):
|
||||
source = {"active_docs": data_key.get("source")}
|
||||
retriever_name = data_key.get("retriever", retriever_name)
|
||||
user_api_key = data["api_key"]
|
||||
agent_type = data_key.get("agent_type", agent_type)
|
||||
decoded_token = {"sub": data_key.get("user")}
|
||||
|
||||
elif "active_docs" in data:
|
||||
@@ -584,7 +594,7 @@ class Answer(Resource):
|
||||
)
|
||||
|
||||
agent = AgentCreator.create_agent(
|
||||
settings.AGENT_NAME,
|
||||
agent_type,
|
||||
endpoint="api/answer",
|
||||
llm_name=settings.LLM_NAME,
|
||||
gpt_model=gpt_model,
|
||||
@@ -811,33 +821,34 @@ class Search(Resource):
|
||||
def get_attachments_content(attachment_ids, user):
|
||||
"""
|
||||
Retrieve content from attachment documents based on their IDs.
|
||||
|
||||
|
||||
Args:
|
||||
attachment_ids (list): List of attachment document IDs
|
||||
user (str): User identifier to verify ownership
|
||||
|
||||
|
||||
Returns:
|
||||
list: List of dictionaries containing attachment content and metadata
|
||||
"""
|
||||
if not attachment_ids:
|
||||
return []
|
||||
|
||||
|
||||
attachments = []
|
||||
for attachment_id in attachment_ids:
|
||||
try:
|
||||
attachment_doc = attachments_collection.find_one({
|
||||
"_id": ObjectId(attachment_id),
|
||||
"user": user
|
||||
})
|
||||
|
||||
attachment_doc = attachments_collection.find_one(
|
||||
{"_id": ObjectId(attachment_id), "user": user}
|
||||
)
|
||||
|
||||
if attachment_doc:
|
||||
attachments.append({
|
||||
"id": str(attachment_doc["_id"]),
|
||||
"content": attachment_doc["content"],
|
||||
"token_count": attachment_doc.get("token_count", 0),
|
||||
"path": attachment_doc.get("path", "")
|
||||
})
|
||||
attachments.append(
|
||||
{
|
||||
"id": str(attachment_doc["_id"]),
|
||||
"content": attachment_doc["content"],
|
||||
"token_count": attachment_doc.get("token_count", 0),
|
||||
"path": attachment_doc.get("path", ""),
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving attachment {attachment_id}: {e}")
|
||||
|
||||
|
||||
return attachments
|
||||
|
||||
Reference in New Issue
Block a user