diff --git a/application/agents/base.py b/application/agents/base.py index f2cabdb7..77729fe6 100644 --- a/application/agents/base.py +++ b/application/agents/base.py @@ -140,28 +140,28 @@ class BaseAgent(ABC): tool_id, action_name, call_args = parser.parse_args(call) call_id = getattr(call, "id", None) or str(uuid.uuid4()) - + # Check if parsing failed if tool_id is None or action_name is None: error_message = f"Error: Failed to parse LLM tool call. Tool name: {getattr(call, 'name', 'unknown')}" logger.error(error_message) - + tool_call_data = { "tool_name": "unknown", "call_id": call_id, - "action_name": getattr(call, 'name', 'unknown'), + "action_name": getattr(call, "name", "unknown"), "arguments": call_args or {}, "result": f"Failed to parse tool call. Invalid tool name format: {getattr(call, 'name', 'unknown')}", } yield {"type": "tool_call", "data": {**tool_call_data, "status": "error"}} self.tool_calls.append(tool_call_data) return "Failed to parse tool call.", call_id - + # Check if tool_id exists in available tools if tool_id not in tools_dict: error_message = f"Error: Tool ID '{tool_id}' extracted from LLM call not found in available tools_dict. Available IDs: {list(tools_dict.keys())}" logger.error(error_message) - + # Return error result tool_call_data = { "tool_name": "unknown", @@ -173,7 +173,7 @@ class BaseAgent(ABC): yield {"type": "tool_call", "data": {**tool_call_data, "status": "error"}} self.tool_calls.append(tool_call_data) return f"Tool with ID {tool_id} not found.", call_id - + tool_call_data = { "tool_name": tools_dict[tool_id]["name"], "call_id": call_id, @@ -225,6 +225,7 @@ class BaseAgent(ABC): if tool_data["name"] == "api_tool" else tool_data["config"] ), + user_id=self.user, # Pass user ID for MCP tools credential decryption ) if tool_data["name"] == "api_tool": print( diff --git a/application/agents/tools/mcp_tool.py b/application/agents/tools/mcp_tool.py new file mode 100644 index 00000000..dc689367 --- /dev/null +++ b/application/agents/tools/mcp_tool.py @@ -0,0 +1,405 @@ +import json +import time +from typing import Any, Dict, List, Optional + +import requests + +from application.agents.tools.base import Tool +from application.security.encryption import decrypt_credentials + + +_mcp_session_cache = {} + + +class MCPTool(Tool): + """ + MCP Tool + Connect to remote Model Context Protocol (MCP) servers to access dynamic tools and resources. Supports various authentication methods and provides secure access to external services through the MCP protocol. + """ + + def __init__(self, config: Dict[str, Any], user_id: Optional[str] = None): + """ + Initialize the MCP Tool with configuration. + + Args: + config: Dictionary containing MCP server configuration: + - server_url: URL of the remote MCP server + - auth_type: Type of authentication (api_key, bearer, basic, none) + - encrypted_credentials: Encrypted credentials (if available) + - timeout: Request timeout in seconds (default: 30) + user_id: User ID for decrypting credentials (required if encrypted_credentials exist) + """ + self.config = config + self.server_url = config.get("server_url", "") + self.auth_type = config.get("auth_type", "none") + self.timeout = config.get("timeout", 30) + + self.auth_credentials = {} + if config.get("encrypted_credentials") and user_id: + self.auth_credentials = decrypt_credentials( + config["encrypted_credentials"], user_id + ) + else: + self.auth_credentials = config.get("auth_credentials", {}) + self.available_tools = [] + self._session = requests.Session() + self._mcp_session_id = None + self._setup_authentication() + self._cache_key = self._generate_cache_key() + + def _setup_authentication(self): + """Setup authentication for the MCP server connection.""" + if self.auth_type == "api_key": + api_key = self.auth_credentials.get("api_key", "") + header_name = self.auth_credentials.get("api_key_header", "X-API-Key") + if api_key: + self._session.headers.update({header_name: api_key}) + elif self.auth_type == "bearer": + token = self.auth_credentials.get("bearer_token", "") + if token: + self._session.headers.update({"Authorization": f"Bearer {token}"}) + elif self.auth_type == "basic": + username = self.auth_credentials.get("username", "") + password = self.auth_credentials.get("password", "") + if username and password: + self._session.auth = (username, password) + + def _generate_cache_key(self) -> str: + """Generate a unique cache key for this MCP server configuration.""" + auth_key = "" + if self.auth_type == "bearer": + token = self.auth_credentials.get("bearer_token", "") + auth_key = f"bearer:{token[:10]}..." if token else "bearer:none" + elif self.auth_type == "api_key": + api_key = self.auth_credentials.get("api_key", "") + auth_key = f"apikey:{api_key[:10]}..." if api_key else "apikey:none" + elif self.auth_type == "basic": + username = self.auth_credentials.get("username", "") + auth_key = f"basic:{username}" + else: + auth_key = "none" + return f"{self.server_url}#{auth_key}" + + def _get_cached_session(self) -> Optional[str]: + """Get cached session ID if available and not expired.""" + global _mcp_session_cache + + if self._cache_key in _mcp_session_cache: + session_data = _mcp_session_cache[self._cache_key] + if time.time() - session_data["created_at"] < 1800: + return session_data["session_id"] + else: + del _mcp_session_cache[self._cache_key] + return None + + def _cache_session(self, session_id: str): + """Cache the session ID for reuse.""" + global _mcp_session_cache + _mcp_session_cache[self._cache_key] = { + "session_id": session_id, + "created_at": time.time(), + } + + def _initialize_mcp_connection(self) -> Dict: + """ + Initialize MCP connection with the server, using cached session if available. + + Returns: + Server capabilities and information + """ + cached_session = self._get_cached_session() + if cached_session: + self._mcp_session_id = cached_session + return {"cached": True} + try: + init_params = { + "protocolVersion": "2024-11-05", + "capabilities": {"roots": {"listChanged": True}, "sampling": {}}, + "clientInfo": {"name": "DocsGPT", "version": "1.0.0"}, + } + response = self._make_mcp_request("initialize", init_params) + self._make_mcp_request("notifications/initialized") + + return response + except Exception as e: + return {"error": str(e), "fallback": True} + + def _ensure_valid_session(self): + """Ensure we have a valid MCP session, reinitializing if needed.""" + if not self._mcp_session_id: + self._initialize_mcp_connection() + + def _make_mcp_request(self, method: str, params: Optional[Dict] = None) -> Dict: + """ + Make an MCP protocol request to the server with automatic session recovery. + + Args: + method: MCP method name (e.g., "tools/list", "tools/call") + params: Parameters for the MCP method + + Returns: + Response data as dictionary + + Raises: + Exception: If request fails after retry + """ + mcp_message = {"jsonrpc": "2.0", "method": method} + + if not method.startswith("notifications/"): + mcp_message["id"] = 1 + if params: + mcp_message["params"] = params + return self._execute_mcp_request(mcp_message, method) + + def _execute_mcp_request( + self, mcp_message: Dict, method: str, is_retry: bool = False + ) -> Dict: + """Execute MCP request with optional retry on session failure.""" + try: + final_headers = self._session.headers.copy() + final_headers.update( + { + "Content-Type": "application/json", + "Accept": "application/json, text/event-stream", + } + ) + + if self._mcp_session_id: + final_headers["Mcp-Session-Id"] = self._mcp_session_id + response = self._session.post( + self.server_url.rstrip("/"), + json=mcp_message, + headers=final_headers, + timeout=self.timeout, + ) + + if "mcp-session-id" in response.headers: + self._mcp_session_id = response.headers["mcp-session-id"] + self._cache_session(self._mcp_session_id) + response.raise_for_status() + + if method.startswith("notifications/"): + return {} + response_text = response.text.strip() + if response_text.startswith("event:") and "data:" in response_text: + lines = response_text.split("\n") + data_line = None + for line in lines: + if line.startswith("data:"): + data_line = line[5:].strip() + break + if data_line: + try: + result = json.loads(data_line) + except json.JSONDecodeError: + raise Exception(f"Invalid JSON in SSE data: {data_line}") + else: + raise Exception(f"No data found in SSE response: {response_text}") + else: + try: + result = response.json() + except json.JSONDecodeError: + raise Exception(f"Invalid JSON response: {response.text}") + if "error" in result: + error_msg = result["error"] + if isinstance(error_msg, dict): + error_msg = error_msg.get("message", str(error_msg)) + raise Exception(f"MCP server error: {error_msg}") + return result.get("result", result) + except requests.exceptions.RequestException as e: + if not is_retry and self._should_retry_with_new_session(e): + self._invalidate_and_refresh_session() + return self._execute_mcp_request(mcp_message, method, is_retry=True) + raise Exception(f"MCP server request failed: {str(e)}") + + def _should_retry_with_new_session(self, error: Exception) -> bool: + """Check if error indicates session invalidation and retry is warranted.""" + error_str = str(error).lower() + return ( + any( + indicator in error_str + for indicator in [ + "invalid session", + "session expired", + "unauthorized", + "401", + "403", + ] + ) + and self._mcp_session_id is not None + ) + + def _invalidate_and_refresh_session(self) -> None: + """Invalidate current session and create a new one.""" + global _mcp_session_cache + if self._cache_key in _mcp_session_cache: + del _mcp_session_cache[self._cache_key] + self._mcp_session_id = None + self._initialize_mcp_connection() + + def discover_tools(self) -> List[Dict]: + """ + Discover available tools from the MCP server using MCP protocol. + + Returns: + List of tool definitions from the server + """ + try: + self._ensure_valid_session() + + response = self._make_mcp_request("tools/list") + + # Handle both formats: response with 'tools' key or response that IS the tools list + + if isinstance(response, dict): + if "tools" in response: + self.available_tools = response["tools"] + elif ( + "result" in response + and isinstance(response["result"], dict) + and "tools" in response["result"] + ): + self.available_tools = response["result"]["tools"] + else: + self.available_tools = [response] if response else [] + elif isinstance(response, list): + self.available_tools = response + else: + self.available_tools = [] + return self.available_tools + except Exception as e: + raise Exception(f"Failed to discover tools from MCP server: {str(e)}") + + def execute_action(self, action_name: str, **kwargs) -> Any: + """ + Execute an action on the remote MCP server using MCP protocol. + + Args: + action_name: Name of the action to execute + **kwargs: Parameters for the action + + Returns: + Result from the MCP server + """ + self._ensure_valid_session() + + # Skipping empty/None values - letting the server use defaults + + cleaned_kwargs = {} + for key, value in kwargs.items(): + if value == "" or value is None: + continue + cleaned_kwargs[key] = value + call_params = {"name": action_name, "arguments": cleaned_kwargs} + try: + result = self._make_mcp_request("tools/call", call_params) + return result + except Exception as e: + raise Exception(f"Failed to execute action '{action_name}': {str(e)}") + + def get_actions_metadata(self) -> List[Dict]: + """ + Get metadata for all available actions. + + Returns: + List of action metadata dictionaries + """ + actions = [] + for tool in self.available_tools: + input_schema = ( + tool.get("inputSchema") + or tool.get("input_schema") + or tool.get("schema") + or tool.get("parameters") + ) + + parameters_schema = { + "type": "object", + "properties": {}, + "required": [], + } + + if input_schema: + if isinstance(input_schema, dict): + if "properties" in input_schema: + parameters_schema = { + "type": input_schema.get("type", "object"), + "properties": input_schema.get("properties", {}), + "required": input_schema.get("required", []), + } + + for key in ["additionalProperties", "description"]: + if key in input_schema: + parameters_schema[key] = input_schema[key] + else: + parameters_schema["properties"] = input_schema + action = { + "name": tool.get("name", ""), + "description": tool.get("description", ""), + "parameters": parameters_schema, + } + actions.append(action) + return actions + + def test_connection(self) -> Dict: + """ + Test the connection to the MCP server and validate functionality. + + Returns: + Dictionary with connection test results including tool count + """ + try: + self._mcp_session_id = None + + init_result = self._initialize_mcp_connection() + + tools = self.discover_tools() + + message = f"Successfully connected to MCP server. Found {len(tools)} tools." + if init_result.get("cached"): + message += " (Using cached session)" + elif init_result.get("fallback"): + message += " (No formal initialization required)" + return { + "success": True, + "message": message, + "tools_count": len(tools), + "session_id": self._mcp_session_id, + "tools": [tool.get("name", "unknown") for tool in tools[:5]], + } + except Exception as e: + return { + "success": False, + "message": f"Connection failed: {str(e)}", + "tools_count": 0, + "error_type": type(e).__name__, + } + + def get_config_requirements(self) -> Dict: + return { + "server_url": { + "type": "string", + "description": "URL of the remote MCP server (e.g., https://api.example.com)", + "required": True, + }, + "auth_type": { + "type": "string", + "description": "Authentication type", + "enum": ["none", "api_key", "bearer", "basic"], + "default": "none", + "required": True, + }, + "auth_credentials": { + "type": "object", + "description": "Authentication credentials (varies by auth_type)", + "required": False, + }, + "timeout": { + "type": "integer", + "description": "Request timeout in seconds", + "default": 30, + "minimum": 1, + "maximum": 300, + "required": False, + }, + } diff --git a/application/agents/tools/tool_manager.py b/application/agents/tools/tool_manager.py index ad71db28..d602b762 100644 --- a/application/agents/tools/tool_manager.py +++ b/application/agents/tools/tool_manager.py @@ -23,16 +23,23 @@ class ToolManager: tool_config = self.config.get(name, {}) self.tools[name] = obj(tool_config) - def load_tool(self, tool_name, tool_config): + def load_tool(self, tool_name, tool_config, user_id=None): self.config[tool_name] = tool_config module = importlib.import_module(f"application.agents.tools.{tool_name}") for member_name, obj in inspect.getmembers(module, inspect.isclass): if issubclass(obj, Tool) and obj is not Tool: - return obj(tool_config) + if tool_name == "mcp_tool" and user_id: + return obj(tool_config, user_id) + else: + return obj(tool_config) - def execute_action(self, tool_name, action_name, **kwargs): + def execute_action(self, tool_name, action_name, user_id=None, **kwargs): if tool_name not in self.tools: raise ValueError(f"Tool '{tool_name}' not loaded") + if tool_name == "mcp_tool" and user_id: + tool_config = self.config.get(tool_name, {}) + tool = self.load_tool(tool_name, tool_config, user_id) + return tool.execute_action(action_name, **kwargs) return self.tools[tool_name].execute_action(action_name, **kwargs) def get_all_actions_metadata(self): diff --git a/application/api/answer/services/stream_processor.py b/application/api/answer/services/stream_processor.py index 648d24f5..f6e639ef 100644 --- a/application/api/answer/services/stream_processor.py +++ b/application/api/answer/services/stream_processor.py @@ -69,11 +69,8 @@ class StreamProcessor: self.decoded_token.get("sub") if self.decoded_token is not None else None ) self.conversation_id = self.data.get("conversation_id") - self.source = ( - {"active_docs": self.data["active_docs"]} - if "active_docs" in self.data - else {} - ) + self.source = {} + self.all_sources = [] self.attachments = [] self.history = [] self.agent_config = {} @@ -85,6 +82,8 @@ class StreamProcessor: def initialize(self): """Initialize all required components for processing""" + self._configure_agent() + self._configure_source() self._configure_retriever() self._configure_agent() self._load_conversation_history() @@ -171,13 +170,77 @@ class StreamProcessor: source = data.get("source") if isinstance(source, DBRef): source_doc = self.db.dereference(source) - data["source"] = str(source_doc["_id"]) - data["retriever"] = source_doc.get("retriever", data.get("retriever")) - data["chunks"] = source_doc.get("chunks", data.get("chunks")) + if source_doc: + data["source"] = str(source_doc["_id"]) + data["retriever"] = source_doc.get("retriever", data.get("retriever")) + data["chunks"] = source_doc.get("chunks", data.get("chunks")) + else: + data["source"] = None + elif source == "default": + data["source"] = "default" else: data["source"] = None + # Handle multiple sources + + sources = data.get("sources", []) + if sources and isinstance(sources, list): + sources_list = [] + for i, source_ref in enumerate(sources): + if source_ref == "default": + processed_source = { + "id": "default", + "retriever": "classic", + "chunks": data.get("chunks", "2"), + } + sources_list.append(processed_source) + elif isinstance(source_ref, DBRef): + source_doc = self.db.dereference(source_ref) + if source_doc: + processed_source = { + "id": str(source_doc["_id"]), + "retriever": source_doc.get("retriever", "classic"), + "chunks": source_doc.get("chunks", data.get("chunks", "2")), + } + sources_list.append(processed_source) + data["sources"] = sources_list + else: + data["sources"] = [] return data + def _configure_source(self): + """Configure the source based on agent data""" + api_key = self.data.get("api_key") or self.agent_key + + if api_key: + agent_data = self._get_data_from_api_key(api_key) + + if agent_data.get("sources") and len(agent_data["sources"]) > 0: + source_ids = [ + source["id"] for source in agent_data["sources"] if source.get("id") + ] + if source_ids: + self.source = {"active_docs": source_ids} + else: + self.source = {} + self.all_sources = agent_data["sources"] + elif agent_data.get("source"): + self.source = {"active_docs": agent_data["source"]} + self.all_sources = [ + { + "id": agent_data["source"], + "retriever": agent_data.get("retriever", "classic"), + } + ] + else: + self.source = {} + self.all_sources = [] + return + if "active_docs" in self.data: + self.source = {"active_docs": self.data["active_docs"]} + return + self.source = {} + self.all_sources = [] + def _configure_agent(self): """Configure the agent based on request data""" agent_id = self.data.get("agent_id") @@ -203,7 +266,13 @@ class StreamProcessor: if data_key.get("retriever"): self.retriever_config["retriever_name"] = data_key["retriever"] if data_key.get("chunks") is not None: - self.retriever_config["chunks"] = data_key["chunks"] + try: + self.retriever_config["chunks"] = int(data_key["chunks"]) + except (ValueError, TypeError): + logger.warning( + f"Invalid chunks value: {data_key['chunks']}, using default value 2" + ) + self.retriever_config["chunks"] = 2 elif self.agent_key: data_key = self._get_data_from_api_key(self.agent_key) self.agent_config.update( @@ -224,7 +293,13 @@ class StreamProcessor: if data_key.get("retriever"): self.retriever_config["retriever_name"] = data_key["retriever"] if data_key.get("chunks") is not None: - self.retriever_config["chunks"] = data_key["chunks"] + try: + self.retriever_config["chunks"] = int(data_key["chunks"]) + except (ValueError, TypeError): + logger.warning( + f"Invalid chunks value: {data_key['chunks']}, using default value 2" + ) + self.retriever_config["chunks"] = 2 else: self.agent_config.update( { @@ -243,7 +318,8 @@ class StreamProcessor: "token_limit": self.data.get("token_limit", settings.DEFAULT_MAX_HISTORY), } - if "isNoneDoc" in self.data and self.data["isNoneDoc"]: + api_key = self.data.get("api_key") or self.agent_key + if not api_key and "isNoneDoc" in self.data and self.data["isNoneDoc"]: self.retriever_config["chunks"] = 0 def create_agent(self): diff --git a/application/api/connector/routes.py b/application/api/connector/routes.py index f1ba247b..aca55b42 100644 --- a/application/api/connector/routes.py +++ b/application/api/connector/routes.py @@ -1,6 +1,5 @@ import datetime import json -import logging from bson.objectid import ObjectId diff --git a/application/api/user/routes.py b/application/api/user/routes.py index 7eae66f6..f0493c7c 100644 --- a/application/api/user/routes.py +++ b/application/api/user/routes.py @@ -3,11 +3,12 @@ import json import math import os import secrets +import tempfile import uuid +import zipfile from functools import wraps from typing import Optional, Tuple -import tempfile -import zipfile + from bson.binary import Binary, UuidRepresentation from bson.dbref import DBRef from bson.objectid import ObjectId @@ -24,7 +25,10 @@ from flask_restx import fields, inputs, Namespace, Resource from pymongo import ReturnDocument from werkzeug.utils import secure_filename +from application.agents.tools.mcp_tool import MCPTool + from application.agents.tools.tool_manager import ToolManager +from application.api import api from application.api.user.tasks import ( ingest, @@ -35,19 +39,19 @@ from application.api.user.tasks import ( ) from application.core.mongo_db import MongoDB from application.core.settings import settings -from application.api import api +from application.parser.connectors.connector_creator import ConnectorCreator +from application.security.encryption import decrypt_credentials, encrypt_credentials from application.storage.storage_creator import StorageCreator from application.tts.google_tts import GoogleTTS from application.utils import ( check_required_fields, generate_image_url, + num_tokens_from_string, safe_filename, validate_function_name, validate_required_fields, ) -from application.utils import num_tokens_from_string from application.vectorstore.vector_creator import VectorCreator -from application.parser.connectors.connector_creator import ConnectorCreator storage = StorageCreator.get_storage() @@ -74,7 +78,6 @@ try: users_collection.create_index("user_id", unique=True) except Exception as e: print("Error creating indexes:", e) - user = Blueprint("user", __name__) user_ns = Namespace("user", description="User related operations", path="/") api.add_namespace(user_ns) @@ -127,11 +130,9 @@ def ensure_user_doc(user_id): updates["agent_preferences.pinned"] = [] if "shared_with_me" not in prefs: updates["agent_preferences.shared_with_me"] = [] - if updates: users_collection.update_one({"user_id": user_id}, {"$set": updates}) user_doc = users_collection.find_one({"user_id": user_id}) - return user_doc @@ -183,7 +184,6 @@ def handle_image_upload( jsonify({"success": False, "message": "Image upload failed"}), 400, ) - return image_url, None @@ -297,8 +297,8 @@ class GetSingleConversation(Resource): ) if not conversation: return make_response(jsonify({"status": "not found"}), 404) - # Process queries to include attachment names + queries = conversation["queries"] for query in queries: if "attachments" in query and query["attachments"]: @@ -499,6 +499,7 @@ class DeleteOldIndexes(Resource): try: # Delete vector index + if settings.VECTOR_STORE == "faiss": index_path = f"indexes/{str(doc['_id'])}" if storage.file_exists(f"{index_path}/index.faiss"): @@ -569,6 +570,7 @@ class UploadFile(Resource): job_name = request.form["name"] # Create safe versions for filesystem operations + safe_user = safe_filename(user) dir_name = safe_filename(job_name) base_path = f"{settings.UPLOAD_FOLDER}/{safe_user}/{dir_name}" @@ -576,7 +578,6 @@ class UploadFile(Resource): try: storage = StorageCreator.get_storage() - for file in files: original_filename = file.filename safe_file = safe_filename(original_filename) @@ -587,43 +588,67 @@ class UploadFile(Resource): if zipfile.is_zipfile(temp_file_path): try: - with zipfile.ZipFile(temp_file_path, 'r') as zip_ref: + with zipfile.ZipFile(temp_file_path, "r") as zip_ref: zip_ref.extractall(path=temp_dir) # Walk through extracted files and upload them + for root, _, files in os.walk(temp_dir): for extracted_file in files: - if os.path.join(root, extracted_file) == temp_file_path: + if ( + os.path.join(root, extracted_file) + == temp_file_path + ): continue - rel_path = os.path.relpath(os.path.join(root, extracted_file), temp_dir) + rel_path = os.path.relpath( + os.path.join(root, extracted_file), temp_dir + ) storage_path = f"{base_path}/{rel_path}" - with open(os.path.join(root, extracted_file), 'rb') as f: + with open( + os.path.join(root, extracted_file), "rb" + ) as f: storage.save_file(f, storage_path) except Exception as e: - current_app.logger.error(f"Error extracting zip: {e}", exc_info=True) + current_app.logger.error( + f"Error extracting zip: {e}", exc_info=True + ) # If zip extraction fails, save the original zip file + file_path = f"{base_path}/{safe_file}" - with open(temp_file_path, 'rb') as f: + with open(temp_file_path, "rb") as f: storage.save_file(f, file_path) else: # For non-zip files, save directly + file_path = f"{base_path}/{safe_file}" - with open(temp_file_path, 'rb') as f: + with open(temp_file_path, "rb") as f: storage.save_file(f, file_path) task = ingest.delay( settings.UPLOAD_FOLDER, [ - ".rst", ".md", ".pdf", ".txt", ".docx", ".csv", ".epub", - ".html", ".mdx", ".json", ".xlsx", ".pptx", ".png", - ".jpg", ".jpeg", + ".rst", + ".md", + ".pdf", + ".txt", + ".docx", + ".csv", + ".epub", + ".html", + ".mdx", + ".json", + ".xlsx", + ".pptx", + ".png", + ".jpg", + ".jpeg", ], job_name, user, file_path=base_path, - filename=dir_name + filename=dir_name, ) except Exception as err: current_app.logger.error(f"Error uploading file: {err}", exc_info=True) @@ -637,12 +662,29 @@ class ManageSourceFiles(Resource): api.model( "ManageSourceFilesModel", { - "source_id": fields.String(required=True, description="Source ID to modify"), - "operation": fields.String(required=True, description="Operation: 'add', 'remove', or 'remove_directory'"), - "file_paths": fields.List(fields.String, required=False, description="File paths to remove (for remove operation)"), - "directory_path": fields.String(required=False, description="Directory path to remove (for remove_directory operation)"), - "file": fields.Raw(required=False, description="Files to add (for add operation)"), - "parent_dir": fields.String(required=False, description="Parent directory path relative to source root"), + "source_id": fields.String( + required=True, description="Source ID to modify" + ), + "operation": fields.String( + required=True, + description="Operation: 'add', 'remove', or 'remove_directory'", + ), + "file_paths": fields.List( + fields.String, + required=False, + description="File paths to remove (for remove operation)", + ), + "directory_path": fields.String( + required=False, + description="Directory path to remove (for remove_directory operation)", + ), + "file": fields.Raw( + required=False, description="Files to add (for add operation)" + ), + "parent_dir": fields.String( + required=False, + description="Parent directory path relative to source root", + ), }, ) ) @@ -652,7 +694,9 @@ class ManageSourceFiles(Resource): def post(self): decoded_token = request.decoded_token if not decoded_token: - return make_response(jsonify({"success": False, "message": "Unauthorized"}), 401) + return make_response( + jsonify({"success": False, "message": "Unauthorized"}), 401 + ) user = decoded_token.get("sub") source_id = request.form.get("source_id") @@ -660,30 +704,49 @@ class ManageSourceFiles(Resource): if not source_id or not operation: return make_response( - jsonify({"success": False, "message": "source_id and operation are required"}), 400 + jsonify( + { + "success": False, + "message": "source_id and operation are required", + } + ), + 400, ) - if operation not in ["add", "remove", "remove_directory"]: return make_response( - jsonify({"success": False, "message": "operation must be 'add', 'remove', or 'remove_directory'"}), 400 + jsonify( + { + "success": False, + "message": "operation must be 'add', 'remove', or 'remove_directory'", + } + ), + 400, ) - try: ObjectId(source_id) except Exception: return make_response( jsonify({"success": False, "message": "Invalid source ID format"}), 400 ) - try: - source = sources_collection.find_one({"_id": ObjectId(source_id), "user": user}) + source = sources_collection.find_one( + {"_id": ObjectId(source_id), "user": user} + ) if not source: return make_response( - jsonify({"success": False, "message": "Source not found or access denied"}), 404 + jsonify( + { + "success": False, + "message": "Source not found or access denied", + } + ), + 404, ) except Exception as err: current_app.logger.error(f"Error finding source: {err}", exc_info=True) - return make_response(jsonify({"success": False, "message": "Database error"}), 500) + return make_response( + jsonify({"success": False, "message": "Database error"}), 500 + ) try: storage = StorageCreator.get_storage() @@ -692,98 +755,141 @@ class ManageSourceFiles(Resource): if parent_dir and (parent_dir.startswith("/") or ".." in parent_dir): return make_response( - jsonify({"success": False, "message": "Invalid parent directory path"}), 400 + jsonify( + {"success": False, "message": "Invalid parent directory path"} + ), + 400, ) - if operation == "add": files = request.files.getlist("file") if not files or all(file.filename == "" for file in files): return make_response( - jsonify({"success": False, "message": "No files provided for add operation"}), 400 + jsonify( + { + "success": False, + "message": "No files provided for add operation", + } + ), + 400, ) - added_files = [] target_dir = source_file_path if parent_dir: target_dir = f"{source_file_path}/{parent_dir}" - for file in files: if file.filename: safe_filename_str = safe_filename(file.filename) file_path = f"{target_dir}/{safe_filename_str}" # Save file to storage + storage.save_file(file, file_path) added_files.append(safe_filename_str) - # Trigger re-ingestion pipeline + from application.api.user.tasks import reingest_source_task task = reingest_source_task.delay(source_id=source_id, user=user) - return make_response(jsonify({ - "success": True, - "message": f"Added {len(added_files)} files", - "added_files": added_files, - "parent_dir": parent_dir, - "reingest_task_id": task.id - }), 200) + return make_response( + jsonify( + { + "success": True, + "message": f"Added {len(added_files)} files", + "added_files": added_files, + "parent_dir": parent_dir, + "reingest_task_id": task.id, + } + ), + 200, + ) elif operation == "remove": file_paths_str = request.form.get("file_paths") if not file_paths_str: return make_response( - jsonify({"success": False, "message": "file_paths required for remove operation"}), 400 + jsonify( + { + "success": False, + "message": "file_paths required for remove operation", + } + ), + 400, ) - try: - file_paths = json.loads(file_paths_str) if isinstance(file_paths_str, str) else file_paths_str + file_paths = ( + json.loads(file_paths_str) + if isinstance(file_paths_str, str) + else file_paths_str + ) except Exception: return make_response( - jsonify({"success": False, "message": "Invalid file_paths format"}), 400 + jsonify( + {"success": False, "message": "Invalid file_paths format"} + ), + 400, ) - # Remove files from storage and directory structure + removed_files = [] for file_path in file_paths: full_path = f"{source_file_path}/{file_path}" # Remove from storage + if storage.file_exists(full_path): storage.delete_file(full_path) removed_files.append(file_path) - # Trigger re-ingestion pipeline + from application.api.user.tasks import reingest_source_task task = reingest_source_task.delay(source_id=source_id, user=user) - return make_response(jsonify({ - "success": True, - "message": f"Removed {len(removed_files)} files", - "removed_files": removed_files, - "reingest_task_id": task.id - }), 200) + return make_response( + jsonify( + { + "success": True, + "message": f"Removed {len(removed_files)} files", + "removed_files": removed_files, + "reingest_task_id": task.id, + } + ), + 200, + ) elif operation == "remove_directory": directory_path = request.form.get("directory_path") if not directory_path: return make_response( - jsonify({"success": False, "message": "directory_path required for remove_directory operation"}), 400 + jsonify( + { + "success": False, + "message": "directory_path required for remove_directory operation", + } + ), + 400, ) - # Validate directory path (prevent path traversal) + if directory_path.startswith("/") or ".." in directory_path: current_app.logger.warning( f"Invalid directory path attempted for removal. " f"User: {user}, Source ID: {source_id}, Directory path: {directory_path}" ) return make_response( - jsonify({"success": False, "message": "Invalid directory path"}), 400 + jsonify( + {"success": False, "message": "Invalid directory path"} + ), + 400, ) - full_directory_path = f"{source_file_path}/{directory_path}" if directory_path else source_file_path + full_directory_path = ( + f"{source_file_path}/{directory_path}" + if directory_path + else source_file_path + ) if not storage.is_directory(full_directory_path): current_app.logger.warning( @@ -792,9 +898,14 @@ class ManageSourceFiles(Resource): f"Full path: {full_directory_path}" ) return make_response( - jsonify({"success": False, "message": "Directory not found or is not a directory"}), 404 + jsonify( + { + "success": False, + "message": "Directory not found or is not a directory", + } + ), + 404, ) - success = storage.remove_directory(full_directory_path) if not success: @@ -804,9 +915,11 @@ class ManageSourceFiles(Resource): f"Full path: {full_directory_path}" ) return make_response( - jsonify({"success": False, "message": "Failed to remove directory"}), 500 + jsonify( + {"success": False, "message": "Failed to remove directory"} + ), + 500, ) - current_app.logger.info( f"Successfully removed directory. " f"User: {user}, Source ID: {source_id}, Directory path: {directory_path}, " @@ -814,16 +927,22 @@ class ManageSourceFiles(Resource): ) # Trigger re-ingestion pipeline + from application.api.user.tasks import reingest_source_task task = reingest_source_task.delay(source_id=source_id, user=user) - return make_response(jsonify({ - "success": True, - "message": f"Successfully removed directory: {directory_path}", - "removed_directory": directory_path, - "reingest_task_id": task.id - }), 200) + return make_response( + jsonify( + { + "success": True, + "message": f"Successfully removed directory: {directory_path}", + "removed_directory": directory_path, + "reingest_task_id": task.id, + } + ), + 200, + ) except Exception as err: error_context = f"operation={operation}, user={user}, source_id={source_id}" @@ -837,8 +956,12 @@ class ManageSourceFiles(Resource): parent_dir = request.form.get("parent_dir", "") error_context += f", parent_dir={parent_dir}" - current_app.logger.error(f"Error managing source files: {err} ({error_context})", exc_info=True) - return make_response(jsonify({"success": False, "message": "Operation failed"}), 500) + current_app.logger.error( + f"Error managing source files: {err} ({error_context})", exc_info=True + ) + return make_response( + jsonify({"success": False, "message": "Operation failed"}), 500 + ) @user_ns.route("/api/remote") @@ -882,25 +1005,31 @@ class UploadRemote(Resource): elif data["source"] in ConnectorCreator.get_supported_connectors(): session_token = config.get("session_token") if not session_token: - return make_response(jsonify({ - "success": False, - "error": f"Missing session_token in {data['source']} configuration" - }), 400) - + return make_response( + jsonify( + { + "success": False, + "error": f"Missing session_token in {data['source']} configuration", + } + ), + 400, + ) # Process file_ids + file_ids = config.get("file_ids", []) if isinstance(file_ids, str): - file_ids = [id.strip() for id in file_ids.split(',') if id.strip()] + file_ids = [id.strip() for id in file_ids.split(",") if id.strip()] elif not isinstance(file_ids, list): file_ids = [] - # Process folder_ids + folder_ids = config.get("folder_ids", []) if isinstance(folder_ids, str): - folder_ids = [id.strip() for id in folder_ids.split(',') if id.strip()] + folder_ids = [ + id.strip() for id in folder_ids.split(",") if id.strip() + ] elif not isinstance(folder_ids, list): folder_ids = [] - config["file_ids"] = file_ids config["folder_ids"] = folder_ids @@ -912,9 +1041,11 @@ class UploadRemote(Resource): file_ids=file_ids, folder_ids=folder_ids, recursive=config.get("recursive", False), - retriever=config.get("retriever", "classic") + retriever=config.get("retriever", "classic"), + ) + return make_response( + jsonify({"success": True, "task_id": task.id}), 200 ) - return make_response(jsonify({"success": True, "task_id": task.id}), 200) task = ingest_remote.delay( source_data=source_data, job_name=data["name"], @@ -1023,7 +1154,7 @@ class PaginatedSources(Resource): "retriever": doc.get("retriever", "classic"), "syncFrequency": doc.get("sync_frequency", ""), "isNested": bool(doc.get("directory_structure")), - "type": doc.get("type", "file") + "type": doc.get("type", "file"), } paginated_docs.append(doc_data) response = { @@ -1072,7 +1203,9 @@ class CombinedJson(Resource): "retriever": index.get("retriever", "classic"), "syncFrequency": index.get("sync_frequency", ""), "is_nested": bool(index.get("directory_structure")), - "type": index.get("type", "file") # Add type field with default "file" + "type": index.get( + "type", "file" + ), # Add type field with default "file" } ) except Exception as err: @@ -1288,17 +1421,14 @@ class GetAgent(Resource): def get(self): if not (decoded_token := request.decoded_token): return {"success": False}, 401 - if not (agent_id := request.args.get("id")): return {"success": False, "message": "ID required"}, 400 - try: agent = agents_collection.find_one( {"_id": ObjectId(agent_id), "user": decoded_token["sub"]} ) if not agent: return {"status": "Not found"}, 404 - data = { "id": str(agent["_id"]), "name": agent["name"], @@ -1312,6 +1442,16 @@ class GetAgent(Resource): and (source_doc := db.dereference(agent.get("source"))) else "" ), + "sources": [ + ( + str(db.dereference(source_ref)["_id"]) + if isinstance(source_ref, DBRef) and db.dereference(source_ref) + else source_ref + ) + for source_ref in agent.get("sources", []) + if (isinstance(source_ref, DBRef) and db.dereference(source_ref)) + or source_ref == "default" + ], "chunks": agent["chunks"], "retriever": agent.get("retriever", ""), "prompt_id": agent.get("prompt_id", ""), @@ -1334,7 +1474,6 @@ class GetAgent(Resource): "shared_token": agent.get("shared_token", ""), } return make_response(jsonify(data), 200) - except Exception as e: current_app.logger.error(f"Agent fetch error: {e}", exc_info=True) return {"success": False}, 400 @@ -1346,7 +1485,6 @@ class GetAgents(Resource): def get(self): if not (decoded_token := request.decoded_token): return {"success": False}, 401 - user = decoded_token.get("sub") try: user_doc = ensure_user_doc(user) @@ -1365,8 +1503,24 @@ class GetAgents(Resource): str(source_doc["_id"]) if isinstance(agent.get("source"), DBRef) and (source_doc := db.dereference(agent.get("source"))) - else "" + else ( + agent.get("source", "") + if agent.get("source") == "default" + else "" + ) ), + "sources": [ + ( + source_ref + if source_ref == "default" + else str(db.dereference(source_ref)["_id"]) + ) + for source_ref in agent.get("sources", []) + if source_ref == "default" + or ( + isinstance(source_ref, DBRef) and db.dereference(source_ref) + ) + ], "chunks": agent["chunks"], "retriever": agent.get("retriever", ""), "prompt_id": agent.get("prompt_id", ""), @@ -1409,7 +1563,14 @@ class CreateAgent(Resource): "image": fields.Raw( required=False, description="Image file upload", type="file" ), - "source": fields.String(required=True, description="Source ID"), + "source": fields.String( + required=False, description="Source ID (legacy single source)" + ), + "sources": fields.List( + fields.String, + required=False, + description="List of source identifiers for multiple sources", + ), "chunks": fields.Integer(required=True, description="Chunks count"), "retriever": fields.String(required=True, description="Retriever ID"), "prompt_id": fields.String(required=True, description="Prompt ID"), @@ -1421,7 +1582,8 @@ class CreateAgent(Resource): required=True, description="Status of the agent (draft or published)" ), "json_schema": fields.Raw( - required=False, description="JSON schema for enforcing structured output format" + required=False, + description="JSON schema for enforcing structured output format", ), }, ) @@ -1441,6 +1603,11 @@ class CreateAgent(Resource): data["tools"] = json.loads(data["tools"]) except json.JSONDecodeError: data["tools"] = [] + if "sources" in data: + try: + data["sources"] = json.loads(data["sources"]) + except json.JSONDecodeError: + data["sources"] = [] if "json_schema" in data: try: data["json_schema"] = json.loads(data["json_schema"]) @@ -1449,28 +1616,42 @@ class CreateAgent(Resource): print(f"Received data: {data}") # Validate JSON schema if provided + if data.get("json_schema"): try: # Basic validation - ensure it's a valid JSON structure + json_schema = data.get("json_schema") if not isinstance(json_schema, dict): return make_response( - jsonify({"success": False, "message": "JSON schema must be a valid JSON object"}), - 400 + jsonify( + { + "success": False, + "message": "JSON schema must be a valid JSON object", + } + ), + 400, ) # Validate that it has either a 'schema' property or is itself a schema + if "schema" not in json_schema and "type" not in json_schema: return make_response( - jsonify({"success": False, "message": "JSON schema must contain either a 'schema' property or be a valid JSON schema with 'type' property"}), - 400 + jsonify( + { + "success": False, + "message": "JSON schema must contain either a 'schema' property or be a valid JSON schema with 'type' property", + } + ), + 400, ) except Exception as e: return make_response( - jsonify({"success": False, "message": f"Invalid JSON schema: {str(e)}"}), - 400 + jsonify( + {"success": False, "message": f"Invalid JSON schema: {str(e)}"} + ), + 400, ) - if data.get("status") not in ["draft", "published"]: return make_response( jsonify( @@ -1481,17 +1662,27 @@ class CreateAgent(Resource): ), 400, ) - if data.get("status") == "published": required_fields = [ "name", "description", - "source", "chunks", "retriever", "prompt_id", "agent_type", ] + # Require either source or sources (but not both) + + if not data.get("source") and not data.get("sources"): + return make_response( + jsonify( + { + "success": False, + "message": "Either 'source' or 'sources' field is required for published agents", + } + ), + 400, + ) validate_fields = ["name", "description", "prompt_id", "agent_type"] else: required_fields = ["name"] @@ -1502,25 +1693,37 @@ class CreateAgent(Resource): return missing_fields if invalid_fields: return invalid_fields - image_url, error = handle_image_upload(request, "", user, storage) if error: return make_response( jsonify({"success": False, "message": "Image upload failed"}), 400 ) - try: key = str(uuid.uuid4()) if data.get("status") == "published" else "" + + sources_list = [] + if data.get("sources") and len(data.get("sources", [])) > 0: + for source_id in data.get("sources", []): + if source_id == "default": + sources_list.append("default") + elif ObjectId.is_valid(source_id): + sources_list.append(DBRef("sources", ObjectId(source_id))) + source_field = "" + else: + source_value = data.get("source", "") + if source_value == "default": + source_field = "default" + elif ObjectId.is_valid(source_value): + source_field = DBRef("sources", ObjectId(source_value)) + else: + source_field = "" new_agent = { "user": user, "name": data.get("name"), "description": data.get("description", ""), "image": image_url, - "source": ( - DBRef("sources", ObjectId(data.get("source"))) - if ObjectId.is_valid(data.get("source")) - else "" - ), + "source": source_field, + "sources": sources_list, "chunks": data.get("chunks", ""), "retriever": data.get("retriever", ""), "prompt_id": data.get("prompt_id", ""), @@ -1535,7 +1738,11 @@ class CreateAgent(Resource): } if new_agent["chunks"] == "": new_agent["chunks"] = "0" - if new_agent["source"] == "" and new_agent["retriever"] == "": + if ( + new_agent["source"] == "" + and new_agent["retriever"] == "" + and not new_agent["sources"] + ): new_agent["retriever"] = "classic" resp = agents_collection.insert_one(new_agent) new_id = str(resp.inserted_id) @@ -1557,7 +1764,14 @@ class UpdateAgent(Resource): "image": fields.String( required=False, description="New image URL or identifier" ), - "source": fields.String(required=True, description="Source ID"), + "source": fields.String( + required=False, description="Source ID (legacy single source)" + ), + "sources": fields.List( + fields.String, + required=False, + description="List of source identifiers for multiple sources", + ), "chunks": fields.Integer(required=True, description="Chunks count"), "retriever": fields.String(required=True, description="Retriever ID"), "prompt_id": fields.String(required=True, description="Prompt ID"), @@ -1569,7 +1783,8 @@ class UpdateAgent(Resource): required=True, description="Status of the agent (draft or published)" ), "json_schema": fields.Raw( - required=False, description="JSON schema for enforcing structured output format" + required=False, + description="JSON schema for enforcing structured output format", ), }, ) @@ -1589,12 +1804,16 @@ class UpdateAgent(Resource): data["tools"] = json.loads(data["tools"]) except json.JSONDecodeError: data["tools"] = [] + if "sources" in data: + try: + data["sources"] = json.loads(data["sources"]) + except json.JSONDecodeError: + data["sources"] = [] if "json_schema" in data: try: data["json_schema"] = json.loads(data["json_schema"]) except json.JSONDecodeError: data["json_schema"] = None - if not ObjectId.is_valid(agent_id): return make_response( jsonify({"success": False, "message": "Invalid agent ID format"}), 400 @@ -1618,7 +1837,6 @@ class UpdateAgent(Resource): ), 404, ) - image_url, error = handle_image_upload( request, existing_agent.get("image", ""), user, storage ) @@ -1626,13 +1844,13 @@ class UpdateAgent(Resource): return make_response( jsonify({"success": False, "message": "Image upload failed"}), 400 ) - update_fields = {} allowed_fields = [ "name", "description", "image", "source", + "sources", "chunks", "retriever", "prompt_id", @@ -1656,7 +1874,11 @@ class UpdateAgent(Resource): update_fields[field] = new_status elif field == "source": source_id = data.get("source") - if source_id and ObjectId.is_valid(source_id): + if source_id == "default": + # Handle special "default" source + + update_fields[field] = "default" + elif source_id and ObjectId.is_valid(source_id): update_fields[field] = DBRef("sources", ObjectId(source_id)) elif source_id: return make_response( @@ -1670,6 +1892,30 @@ class UpdateAgent(Resource): ) else: update_fields[field] = "" + elif field == "sources": + sources_list = data.get("sources", []) + if sources_list and isinstance(sources_list, list): + valid_sources = [] + for source_id in sources_list: + if source_id == "default": + valid_sources.append("default") + elif ObjectId.is_valid(source_id): + valid_sources.append( + DBRef("sources", ObjectId(source_id)) + ) + else: + return make_response( + jsonify( + { + "success": False, + "message": f"Invalid source ID format: {source_id}", + } + ), + 400, + ) + update_fields[field] = valid_sources + else: + update_fields[field] = [] elif field == "chunks": chunks_value = data.get("chunks") if chunks_value == "": @@ -1735,7 +1981,6 @@ class UpdateAgent(Resource): ), 400, ) - if not existing_agent.get("key"): newly_generated_key = str(uuid.uuid4()) update_fields["key"] = newly_generated_key @@ -1822,7 +2067,6 @@ class PinnedAgents(Resource): decoded_token = request.decoded_token if not decoded_token: return make_response(jsonify({"success": False}), 401) - user_id = decoded_token.get("sub") try: @@ -1831,7 +2075,6 @@ class PinnedAgents(Resource): if not pinned_ids: return make_response(jsonify([]), 200) - pinned_object_ids = [ObjectId(agent_id) for agent_id in pinned_ids] pinned_agents_cursor = agents_collection.find( @@ -1841,6 +2084,7 @@ class PinnedAgents(Resource): existing_ids = {str(agent["_id"]) for agent in pinned_agents} # Clean up any stale pinned IDs + stale_ids = [ agent_id for agent_id in pinned_ids if agent_id not in existing_ids ] @@ -1849,7 +2093,6 @@ class PinnedAgents(Resource): {"user_id": user_id}, {"$pullAll": {"agent_preferences.pinned": stale_ids}}, ) - list_pinned_agents = [ { "id": str(agent["_id"]), @@ -1886,11 +2129,9 @@ class PinnedAgents(Resource): for agent in pinned_agents if "source" in agent or "retriever" in agent ] - except Exception as err: current_app.logger.error(f"Error retrieving pinned agents: {err}") return make_response(jsonify({"success": False}), 400) - return make_response(jsonify(list_pinned_agents), 200) @@ -1954,7 +2195,6 @@ class RemoveSharedAgent(Resource): return make_response( jsonify({"success": False, "message": "ID is required"}), 400 ) - try: agent = agents_collection.find_one( {"_id": ObjectId(agent_id), "shared_publicly": True} @@ -1964,7 +2204,6 @@ class RemoveSharedAgent(Resource): jsonify({"success": False, "message": "Shared agent not found"}), 404, ) - ensure_user_doc(user_id) users_collection.update_one( {"user_id": user_id}, @@ -1977,7 +2216,6 @@ class RemoveSharedAgent(Resource): ) return make_response(jsonify({"success": True, "action": "removed"}), 200) - except Exception as err: current_app.logger.error(f"Error removing shared agent: {err}") return make_response( @@ -2000,7 +2238,6 @@ class SharedAgent(Resource): return make_response( jsonify({"success": False, "message": "Token or ID is required"}), 400 ) - try: query = { "shared_publicly": True, @@ -2012,7 +2249,6 @@ class SharedAgent(Resource): jsonify({"success": False, "message": "Shared agent not found"}), 404, ) - agent_id = str(shared_agent["_id"]) data = { "id": agent_id, @@ -2052,7 +2288,6 @@ class SharedAgent(Resource): if tool_data: enriched_tools.append(tool_data.get("name", "")) data["tools"] = enriched_tools - decoded_token = getattr(request, "decoded_token", None) if decoded_token: user_id = decoded_token.get("sub") @@ -2064,9 +2299,7 @@ class SharedAgent(Resource): {"user_id": user_id}, {"$addToSet": {"agent_preferences.shared_with_me": agent_id}}, ) - return make_response(jsonify(data), 200) - except Exception as err: current_app.logger.error(f"Error retrieving shared agent: {err}") return make_response(jsonify({"success": False}), 400) @@ -2100,7 +2333,6 @@ class SharedAgents(Resource): {"user_id": user_id}, {"$pullAll": {"agent_preferences.shared_with_me": stale_ids}}, ) - pinned_ids = set(user_doc.get("agent_preferences", {}).get("pinned", [])) list_shared_agents = [ @@ -2127,7 +2359,6 @@ class SharedAgents(Resource): ] return make_response(jsonify(list_shared_agents), 200) - except Exception as err: current_app.logger.error(f"Error retrieving shared agents: {err}") return make_response(jsonify({"success": False}), 400) @@ -3411,7 +3642,60 @@ class UpdateTool(Resource): ), 400, ) - update_data["config"] = data["config"] + tool_doc = user_tools_collection.find_one( + {"_id": ObjectId(data["id"]), "user": user} + ) + if tool_doc and tool_doc.get("name") == "mcp_tool": + config = data["config"] + existing_config = tool_doc.get("config", {}) + storage_config = existing_config.copy() + + storage_config.update(config) + existing_credentials = {} + if "encrypted_credentials" in existing_config: + existing_credentials = decrypt_credentials( + existing_config["encrypted_credentials"], user + ) + auth_credentials = existing_credentials.copy() + auth_type = storage_config.get("auth_type", "none") + if auth_type == "api_key": + if "api_key" in config and config["api_key"]: + auth_credentials["api_key"] = config["api_key"] + if "api_key_header" in config: + auth_credentials["api_key_header"] = config[ + "api_key_header" + ] + elif auth_type == "bearer": + if "bearer_token" in config and config["bearer_token"]: + auth_credentials["bearer_token"] = config["bearer_token"] + elif "encrypted_token" in config and config["encrypted_token"]: + auth_credentials["bearer_token"] = config["encrypted_token"] + elif auth_type == "basic": + if "username" in config and config["username"]: + auth_credentials["username"] = config["username"] + if "password" in config and config["password"]: + auth_credentials["password"] = config["password"] + if auth_type != "none" and auth_credentials: + encrypted_credentials_string = encrypt_credentials( + auth_credentials, user + ) + storage_config["encrypted_credentials"] = ( + encrypted_credentials_string + ) + elif auth_type == "none": + storage_config.pop("encrypted_credentials", None) + for field in [ + "api_key", + "bearer_token", + "encrypted_token", + "username", + "password", + "api_key_header", + ]: + storage_config.pop(field, None) + update_data["config"] = storage_config + else: + update_data["config"] = data["config"] if "status" in data: update_data["status"] = data["status"] user_tools_collection.update_one( @@ -3577,7 +3861,7 @@ class GetChunks(Resource): "page": "Page number for pagination", "per_page": "Number of chunks per page", "path": "Optional: Filter chunks by relative file path", - "search": "Optional: Search term to filter chunks by title or content" + "search": "Optional: Search term to filter chunks by title or content", }, ) def get(self): @@ -3607,20 +3891,21 @@ class GetChunks(Resource): metadata = chunk.get("metadata", {}) # Filter by path if provided + if path: chunk_source = metadata.get("source", "") # Check if the chunk's source matches the requested path + if not chunk_source or not chunk_source.endswith(path): continue - # Filter by search term if provided + if search_term: text_match = search_term in chunk.get("text", "").lower() title_match = search_term in metadata.get("title", "").lower() if not (text_match or title_match): continue - filtered_chunks.append(chunk) chunks = filtered_chunks @@ -3638,7 +3923,7 @@ class GetChunks(Resource): "total": total_chunks, "chunks": paginated_chunks, "path": path if path else None, - "search": search_term if search_term else None + "search": search_term if search_term else None, } ), 200, @@ -3647,6 +3932,7 @@ class GetChunks(Resource): current_app.logger.error(f"Error getting chunks: {e}", exc_info=True) return make_response(jsonify({"success": False}), 500) + @user_ns.route("/api/add_chunk") class AddChunk(Resource): @api.expect( @@ -3781,7 +4067,6 @@ class UpdateChunk(Resource): if metadata is None: metadata = {} metadata["token_count"] = token_count - if not ObjectId.is_valid(doc_id): return make_response(jsonify({"error": "Invalid doc_id"}), 400) doc = sources_collection.find_one({"_id": ObjectId(doc_id), "user": user}) @@ -3796,7 +4081,6 @@ class UpdateChunk(Resource): existing_chunk = next((c for c in chunks if c["doc_id"] == chunk_id), None) if not existing_chunk: return make_response(jsonify({"error": "Chunk not found"}), 404) - new_text = text if text is not None else existing_chunk["text"] if metadata is not None: @@ -3804,16 +4088,16 @@ class UpdateChunk(Resource): new_metadata.update(metadata) else: new_metadata = existing_chunk["metadata"].copy() - if text is not None: new_metadata["token_count"] = num_tokens_from_string(new_text) - try: new_chunk_id = store.add_chunk(new_text, new_metadata) deleted = store.delete_chunk(chunk_id) if not deleted: - current_app.logger.warning(f"Failed to delete old chunk {chunk_id}, but new chunk {new_chunk_id} was created") + current_app.logger.warning( + f"Failed to delete old chunk {chunk_id}, but new chunk {new_chunk_id} was created" + ) return make_response( jsonify( @@ -3861,7 +4145,6 @@ class StoreAttachment(Resource): jsonify({"status": "error", "message": "Missing file"}), 400, ) - user = None if decoded_token: user = safe_filename(decoded_token.get("sub")) @@ -3876,7 +4159,6 @@ class StoreAttachment(Resource): return make_response( jsonify({"success": False, "message": "Authentication required"}), 401 ) - try: attachment_id = ObjectId() original_filename = safe_filename(os.path.basename(file.filename)) @@ -3918,7 +4200,6 @@ class ServeImage(Resource): content_type = f"image/{extension}" if extension == "jpg": content_type = "image/jpeg" - response = make_response(file_obj.read()) response.headers.set("Content-Type", content_type) response.headers.set("Cache-Control", "max-age=86400") @@ -3950,9 +4231,7 @@ class DirectoryStructure(Resource): doc_id = request.args.get("id") if not doc_id: - return make_response( - jsonify({"error": "Document ID is required"}), 400 - ) + return make_response(jsonify({"error": "Document ID is required"}), 400) if not ObjectId.is_valid(doc_id): return make_response(jsonify({"error": "Invalid document ID"}), 400) @@ -3975,24 +4254,222 @@ class DirectoryStructure(Resource): provider = remote_data_obj.get("provider") except Exception as e: current_app.logger.warning( - f"Failed to parse remote_data for doc {doc_id}: {e}") - + f"Failed to parse remote_data for doc {doc_id}: {e}" + ) return make_response( - jsonify({ - "success": True, - "directory_structure": directory_structure, - "base_path": base_path, - "provider": provider, - }), 200 + jsonify( + { + "success": True, + "directory_structure": directory_structure, + "base_path": base_path, + "provider": provider, + } + ), + 200, ) - except Exception as e: current_app.logger.error( f"Error retrieving directory structure: {e}", exc_info=True ) + return make_response(jsonify({"success": False, "error": str(e)}), 500) + + +@user_ns.route("/api/mcp_server/test") +class TestMCPServerConfig(Resource): + @api.expect( + api.model( + "MCPServerTestModel", + { + "config": fields.Raw( + required=True, description="MCP server configuration to test" + ), + }, + ) + ) + @api.doc(description="Test MCP server connection with provided configuration") + def post(self): + decoded_token = request.decoded_token + if not decoded_token: + return make_response(jsonify({"success": False}), 401) + user = decoded_token.get("sub") + data = request.get_json() + + required_fields = ["config"] + missing_fields = check_required_fields(data, required_fields) + if missing_fields: + return missing_fields + try: + config = data["config"] + + auth_credentials = {} + auth_type = config.get("auth_type", "none") + + if auth_type == "api_key" and "api_key" in config: + auth_credentials["api_key"] = config["api_key"] + if "api_key_header" in config: + auth_credentials["api_key_header"] = config["api_key_header"] + elif auth_type == "bearer" and "bearer_token" in config: + auth_credentials["bearer_token"] = config["bearer_token"] + elif auth_type == "basic": + if "username" in config: + auth_credentials["username"] = config["username"] + if "password" in config: + auth_credentials["password"] = config["password"] + + test_config = config.copy() + test_config["auth_credentials"] = auth_credentials + + mcp_tool = MCPTool(test_config, user) + result = mcp_tool.test_connection() + + return make_response(jsonify(result), 200) + except Exception as e: + current_app.logger.error(f"Error testing MCP server: {e}", exc_info=True) return make_response( - jsonify({"success": False, "error": str(e)}), 500 + jsonify( + {"success": False, "error": f"Connection test failed: {str(e)}"} + ), + 500, ) +@user_ns.route("/api/mcp_server/save") +class MCPServerSave(Resource): + @api.expect( + api.model( + "MCPServerSaveModel", + { + "id": fields.String( + required=False, description="Tool ID for updates (optional)" + ), + "displayName": fields.String( + required=True, description="Display name for the MCP server" + ), + "config": fields.Raw( + required=True, description="MCP server configuration" + ), + "status": fields.Boolean( + required=False, default=True, description="Tool status" + ), + }, + ) + ) + @api.doc(description="Create or update MCP server with automatic tool discovery") + def post(self): + decoded_token = request.decoded_token + if not decoded_token: + return make_response(jsonify({"success": False}), 401) + user = decoded_token.get("sub") + data = request.get_json() + required_fields = ["displayName", "config"] + missing_fields = check_required_fields(data, required_fields) + if missing_fields: + return missing_fields + try: + config = data["config"] + + auth_credentials = {} + auth_type = config.get("auth_type", "none") + if auth_type == "api_key": + if "api_key" in config and config["api_key"]: + auth_credentials["api_key"] = config["api_key"] + if "api_key_header" in config: + auth_credentials["api_key_header"] = config["api_key_header"] + elif auth_type == "bearer": + if "bearer_token" in config and config["bearer_token"]: + auth_credentials["bearer_token"] = config["bearer_token"] + elif auth_type == "basic": + if "username" in config and config["username"]: + auth_credentials["username"] = config["username"] + if "password" in config and config["password"]: + auth_credentials["password"] = config["password"] + mcp_config = config.copy() + mcp_config["auth_credentials"] = auth_credentials + + if auth_type == "none" or auth_credentials: + mcp_tool = MCPTool(mcp_config, user) + mcp_tool.discover_tools() + actions_metadata = mcp_tool.get_actions_metadata() + else: + raise Exception( + "No valid credentials provided for the selected authentication type" + ) + + storage_config = config.copy() + if auth_credentials: + encrypted_credentials_string = encrypt_credentials( + auth_credentials, user + ) + storage_config["encrypted_credentials"] = encrypted_credentials_string + + for field in [ + "api_key", + "bearer_token", + "username", + "password", + "api_key_header", + ]: + storage_config.pop(field, None) + transformed_actions = [] + for action in actions_metadata: + action["active"] = True + if "parameters" in action: + if "properties" in action["parameters"]: + for param_name, param_details in action["parameters"][ + "properties" + ].items(): + param_details["filled_by_llm"] = True + param_details["value"] = "" + transformed_actions.append(action) + tool_data = { + "name": "mcp_tool", + "displayName": data["displayName"], + "customName": data["displayName"], + "description": f"MCP Server: {storage_config.get('server_url', 'Unknown')}", + "config": storage_config, + "actions": transformed_actions, + "status": data.get("status", True), + "user": user, + } + + tool_id = data.get("id") + if tool_id: + result = user_tools_collection.update_one( + {"_id": ObjectId(tool_id), "user": user, "name": "mcp_tool"}, + {"$set": {k: v for k, v in tool_data.items() if k != "user"}}, + ) + if result.matched_count == 0: + return make_response( + jsonify( + { + "success": False, + "error": "Tool not found or access denied", + } + ), + 404, + ) + response_data = { + "success": True, + "id": tool_id, + "message": f"MCP server updated successfully! Discovered {len(transformed_actions)} tools.", + "tools_count": len(transformed_actions), + } + else: + result = user_tools_collection.insert_one(tool_data) + tool_id = str(result.inserted_id) + response_data = { + "success": True, + "id": tool_id, + "message": f"MCP server created successfully! Discovered {len(transformed_actions)} tools.", + "tools_count": len(transformed_actions), + } + return make_response(jsonify(response_data), 200) + except Exception as e: + current_app.logger.error(f"Error saving MCP server: {e}", exc_info=True) + return make_response( + jsonify( + {"success": False, "error": f"Failed to save MCP server: {str(e)}"} + ), + 500, + ) diff --git a/application/core/settings.py b/application/core/settings.py index cb7d75e3..7ede4e86 100644 --- a/application/core/settings.py +++ b/application/core/settings.py @@ -26,7 +26,7 @@ class Settings(BaseSettings): "gpt-4o-mini": 128000, "gpt-3.5-turbo": 4096, "claude-2": 1e5, - "gemini-2.0-flash-exp": 1e6, + "gemini-2.5-flash": 1e6, } UPLOAD_FOLDER: str = "inputs" PARSE_PDF_AS_IMAGE: bool = False @@ -96,7 +96,7 @@ class Settings(BaseSettings): QDRANT_HOST: Optional[str] = None QDRANT_PATH: Optional[str] = None QDRANT_DISTANCE_FUNC: str = "Cosine" - + # PGVector vectorstore config PGVECTOR_CONNECTION_STRING: Optional[str] = None # Milvus vectorstore config @@ -116,6 +116,9 @@ class Settings(BaseSettings): JWT_SECRET_KEY: str = "" + # Encryption settings + ENCRYPTION_SECRET_KEY: str = "default-docsgpt-encryption-key" + path = Path(__file__).parent.parent.absolute() settings = Settings(_env_file=path.joinpath(".env"), _env_file_encoding="utf-8") diff --git a/application/llm/google_ai.py b/application/llm/google_ai.py index 91065b74..b88e1d9f 100644 --- a/application/llm/google_ai.py +++ b/application/llm/google_ai.py @@ -143,6 +143,7 @@ class GoogleLLM(BaseLLM): raise def _clean_messages_google(self, messages): + """Convert OpenAI format messages to Google AI format.""" cleaned_messages = [] for message in messages: role = message.get("role") @@ -150,6 +151,8 @@ class GoogleLLM(BaseLLM): if role == "assistant": role = "model" + elif role == "tool": + role = "model" parts = [] if role and content is not None: @@ -188,11 +191,63 @@ class GoogleLLM(BaseLLM): else: raise ValueError(f"Unexpected content type: {type(content)}") - cleaned_messages.append(types.Content(role=role, parts=parts)) + if parts: + cleaned_messages.append(types.Content(role=role, parts=parts)) return cleaned_messages + def _clean_schema(self, schema_obj): + """ + Recursively remove unsupported fields from schema objects + and validate required properties. + """ + if not isinstance(schema_obj, dict): + return schema_obj + allowed_fields = { + "type", + "description", + "items", + "properties", + "required", + "enum", + "pattern", + "minimum", + "maximum", + "nullable", + "default", + } + + cleaned = {} + for key, value in schema_obj.items(): + if key not in allowed_fields: + continue + elif key == "type" and isinstance(value, str): + cleaned[key] = value.upper() + elif isinstance(value, dict): + cleaned[key] = self._clean_schema(value) + elif isinstance(value, list): + cleaned[key] = [self._clean_schema(item) for item in value] + else: + cleaned[key] = value + + # Validate that required properties actually exist in properties + if "required" in cleaned and "properties" in cleaned: + valid_required = [] + properties_keys = set(cleaned["properties"].keys()) + for required_prop in cleaned["required"]: + if required_prop in properties_keys: + valid_required.append(required_prop) + if valid_required: + cleaned["required"] = valid_required + else: + cleaned.pop("required", None) + elif "required" in cleaned and "properties" not in cleaned: + cleaned.pop("required", None) + + return cleaned + def _clean_tools_format(self, tools_list): + """Convert OpenAI format tools to Google AI format.""" genai_tools = [] for tool_data in tools_list: if tool_data["type"] == "function": @@ -201,18 +256,16 @@ class GoogleLLM(BaseLLM): properties = parameters.get("properties", {}) if properties: + cleaned_properties = {} + for k, v in properties.items(): + cleaned_properties[k] = self._clean_schema(v) + genai_function = dict( name=function["name"], description=function["description"], parameters={ "type": "OBJECT", - "properties": { - k: { - **v, - "type": v["type"].upper() if v["type"] else None, - } - for k, v in properties.items() - }, + "properties": cleaned_properties, "required": ( parameters["required"] if "required" in parameters @@ -242,6 +295,7 @@ class GoogleLLM(BaseLLM): response_schema=None, **kwargs, ): + """Generate content using Google AI API without streaming.""" client = genai.Client(api_key=self.api_key) if formatting == "openai": messages = self._clean_messages_google(messages) @@ -281,6 +335,7 @@ class GoogleLLM(BaseLLM): response_schema=None, **kwargs, ): + """Generate content using Google AI API with streaming.""" client = genai.Client(api_key=self.api_key) if formatting == "openai": messages = self._clean_messages_google(messages) @@ -331,12 +386,15 @@ class GoogleLLM(BaseLLM): yield chunk.text def _supports_tools(self): + """Return whether this LLM supports function calling.""" return True def _supports_structured_output(self): + """Return whether this LLM supports structured JSON output.""" return True def prepare_structured_output_format(self, json_schema): + """Convert JSON schema to Google AI structured output format.""" if not json_schema: return None diff --git a/application/llm/handlers/base.py b/application/llm/handlers/base.py index 43205472..96ed4c00 100644 --- a/application/llm/handlers/base.py +++ b/application/llm/handlers/base.py @@ -205,7 +205,6 @@ class LLMHandler(ABC): except StopIteration as e: tool_response, call_id = e.value break - updated_messages.append( { "role": "assistant", @@ -222,17 +221,36 @@ class LLMHandler(ABC): ) updated_messages.append(self.create_tool_message(call, tool_response)) - except Exception as e: logger.error(f"Error executing tool: {str(e)}", exc_info=True) - updated_messages.append( - { - "role": "tool", - "content": f"Error executing tool: {str(e)}", - "tool_call_id": call.id, - } + error_call = ToolCall( + id=call.id, name=call.name, arguments=call.arguments ) + error_response = f"Error executing tool: {str(e)}" + error_message = self.create_tool_message(error_call, error_response) + updated_messages.append(error_message) + call_parts = call.name.split("_") + if len(call_parts) >= 2: + tool_id = call_parts[-1] # Last part is tool ID (e.g., "1") + action_name = "_".join(call_parts[:-1]) + tool_name = tools_dict.get(tool_id, {}).get("name", "unknown_tool") + full_action_name = f"{action_name}_{tool_id}" + else: + tool_name = "unknown_tool" + action_name = call.name + full_action_name = call.name + yield { + "type": "tool_call", + "data": { + "tool_name": tool_name, + "call_id": call.id, + "action_name": full_action_name, + "arguments": call.arguments, + "error": error_response, + "status": "error", + }, + } return updated_messages def handle_non_streaming( @@ -263,13 +281,11 @@ class LLMHandler(ABC): except StopIteration as e: messages = e.value break - response = agent.llm.gen( model=agent.gpt_model, messages=messages, tools=agent.tools ) parsed = self.parse_response(response) self.llm_calls.append(build_stack_data(agent.llm)) - return parsed.content def handle_streaming( diff --git a/application/llm/handlers/google.py b/application/llm/handlers/google.py index b43f2a16..7fa44cb6 100644 --- a/application/llm/handlers/google.py +++ b/application/llm/handlers/google.py @@ -17,7 +17,6 @@ class GoogleLLMHandler(LLMHandler): finish_reason="stop", raw_response=response, ) - if hasattr(response, "candidates"): parts = response.candidates[0].content.parts if response.candidates else [] tool_calls = [ @@ -41,7 +40,6 @@ class GoogleLLMHandler(LLMHandler): finish_reason="tool_calls" if tool_calls else "stop", raw_response=response, ) - else: tool_calls = [] if hasattr(response, "function_call"): @@ -61,14 +59,16 @@ class GoogleLLMHandler(LLMHandler): def create_tool_message(self, tool_call: ToolCall, result: Any) -> Dict: """Create Google-style tool message.""" - from google.genai import types return { - "role": "tool", + "role": "model", "content": [ - types.Part.from_function_response( - name=tool_call.name, response={"result": result} - ).to_json_dict() + { + "function_response": { + "name": tool_call.name, + "response": {"result": result}, + } + } ], } diff --git a/application/requirements.txt b/application/requirements.txt index b7076ed8..80564689 100644 --- a/application/requirements.txt +++ b/application/requirements.txt @@ -2,6 +2,7 @@ anthropic==0.49.0 boto3==1.38.18 beautifulsoup4==4.13.4 celery==5.4.0 +cryptography==42.0.8 dataclasses-json==0.6.7 docx2txt==0.8 duckduckgo-search==7.5.2 diff --git a/application/retriever/base.py b/application/retriever/base.py index fd99dbdd..36ac2e93 100644 --- a/application/retriever/base.py +++ b/application/retriever/base.py @@ -5,10 +5,6 @@ class BaseRetriever(ABC): def __init__(self): pass - @abstractmethod - def gen(self, *args, **kwargs): - pass - @abstractmethod def search(self, *args, **kwargs): pass diff --git a/application/retriever/classic_rag.py b/application/retriever/classic_rag.py index 9416b4f7..2ce863c2 100644 --- a/application/retriever/classic_rag.py +++ b/application/retriever/classic_rag.py @@ -1,4 +1,5 @@ import logging + from application.core.settings import settings from application.llm.llm_creator import LLMCreator from application.retriever.base import BaseRetriever @@ -20,10 +21,20 @@ class ClassicRAG(BaseRetriever): api_key=settings.API_KEY, decoded_token=None, ): - self.original_question = "" + """Initialize ClassicRAG retriever with vectorstore sources and LLM configuration""" + self.original_question = source.get("question", "") self.chat_history = chat_history if chat_history is not None else [] self.prompt = prompt - self.chunks = chunks + if isinstance(chunks, str): + try: + self.chunks = int(chunks) + except ValueError: + logging.warning( + f"Invalid chunks value '{chunks}', using default value 2" + ) + self.chunks = 2 + else: + self.chunks = chunks self.gpt_model = gpt_model self.token_limit = ( token_limit @@ -44,25 +55,52 @@ class ClassicRAG(BaseRetriever): user_api_key=self.user_api_key, decoded_token=decoded_token, ) - self.vectorstore = source["active_docs"] if "active_docs" in source else None + + if "active_docs" in source and source["active_docs"] is not None: + if isinstance(source["active_docs"], list): + self.vectorstores = source["active_docs"] + else: + self.vectorstores = [source["active_docs"]] + else: + self.vectorstores = [] self.question = self._rephrase_query() self.decoded_token = decoded_token + self._validate_vectorstore_config() + + def _validate_vectorstore_config(self): + """Validate vectorstore IDs and remove any empty/invalid entries""" + if not self.vectorstores: + logging.warning("No vectorstores configured for retrieval") + return + invalid_ids = [ + vs_id for vs_id in self.vectorstores if not vs_id or not vs_id.strip() + ] + if invalid_ids: + logging.warning(f"Found invalid vectorstore IDs: {invalid_ids}") + self.vectorstores = [ + vs_id for vs_id in self.vectorstores if vs_id and vs_id.strip() + ] def _rephrase_query(self): + """Rephrase user query with chat history context for better retrieval""" if ( not self.original_question or not self.chat_history or self.chat_history == [] or self.chunks == 0 - or self.vectorstore is None + or not self.vectorstores ): return self.original_question - prompt = f"""Given the following conversation history: + {self.chat_history} + + Rephrase the following user question to be a standalone search query + that captures all relevant context from the conversation: + """ messages = [ @@ -79,44 +117,62 @@ class ClassicRAG(BaseRetriever): return self.original_question def _get_data(self): - if self.chunks == 0 or self.vectorstore is None: - docs = [] - else: - docsearch = VectorCreator.create_vectorstore( - settings.VECTOR_STORE, self.vectorstore, settings.EMBEDDINGS_KEY - ) - docs_temp = docsearch.search(self.question, k=self.chunks) - docs = [ - { - "title": i.metadata.get( - "title", i.metadata.get("post_title", i.page_content) - ).split("/")[-1], - "text": i.page_content, - "source": ( - i.metadata.get("source") - if i.metadata.get("source") - else "local" - ), - } - for i in docs_temp - ] + """Retrieve relevant documents from configured vectorstores""" + if self.chunks == 0 or not self.vectorstores: + return [] + all_docs = [] + chunks_per_source = max(1, self.chunks // len(self.vectorstores)) - return docs + for vectorstore_id in self.vectorstores: + if vectorstore_id: + try: + docsearch = VectorCreator.create_vectorstore( + settings.VECTOR_STORE, vectorstore_id, settings.EMBEDDINGS_KEY + ) + docs_temp = docsearch.search(self.question, k=chunks_per_source) - def gen(): - pass + for doc in docs_temp: + if hasattr(doc, "page_content") and hasattr(doc, "metadata"): + page_content = doc.page_content + metadata = doc.metadata + else: + page_content = doc.get("text", doc.get("page_content", "")) + metadata = doc.get("metadata", {}) + title = metadata.get( + "title", metadata.get("post_title", page_content) + ) + if isinstance(title, str): + title = title.split("/")[-1] + else: + title = str(title).split("/")[-1] + all_docs.append( + { + "title": title, + "text": page_content, + "source": metadata.get("source") or vectorstore_id, + } + ) + except Exception as e: + logging.error( + f"Error searching vectorstore {vectorstore_id}: {e}", + exc_info=True, + ) + continue + return all_docs def search(self, query: str = ""): + """Search for documents using optional query override""" if query: self.original_question = query self.question = self._rephrase_query() return self._get_data() def get_params(self): + """Return current retriever configuration parameters""" return { "question": self.original_question, "rephrased_question": self.question, - "source": self.vectorstore, + "sources": self.vectorstores, "chunks": self.chunks, "token_limit": self.token_limit, "gpt_model": self.gpt_model, diff --git a/application/security/__init__.py b/application/security/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/application/security/encryption.py b/application/security/encryption.py new file mode 100644 index 00000000..4cb3a4d5 --- /dev/null +++ b/application/security/encryption.py @@ -0,0 +1,85 @@ +import base64 +import json +import os + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes +from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC + +from application.core.settings import settings + + +def _derive_key(user_id: str, salt: bytes) -> bytes: + app_secret = settings.ENCRYPTION_SECRET_KEY + + password = f"{app_secret}#{user_id}".encode() + + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=salt, + iterations=100000, + backend=default_backend(), + ) + + return kdf.derive(password) + + +def encrypt_credentials(credentials: dict, user_id: str) -> str: + if not credentials: + return "" + try: + salt = os.urandom(16) + iv = os.urandom(16) + key = _derive_key(user_id, salt) + + json_str = json.dumps(credentials) + + cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend()) + encryptor = cipher.encryptor() + + padded_data = _pad_data(json_str.encode()) + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + result = salt + iv + encrypted_data + return base64.b64encode(result).decode() + except Exception as e: + print(f"Warning: Failed to encrypt credentials: {e}") + return "" + + +def decrypt_credentials(encrypted_data: str, user_id: str) -> dict: + if not encrypted_data: + return {} + try: + data = base64.b64decode(encrypted_data.encode()) + + salt = data[:16] + iv = data[16:32] + encrypted_content = data[32:] + + key = _derive_key(user_id, salt) + + cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=default_backend()) + decryptor = cipher.decryptor() + + decrypted_padded = decryptor.update(encrypted_content) + decryptor.finalize() + decrypted_data = _unpad_data(decrypted_padded) + + return json.loads(decrypted_data.decode()) + except Exception as e: + print(f"Warning: Failed to decrypt credentials: {e}") + return {} + + +def _pad_data(data: bytes) -> bytes: + block_size = 16 + padding_len = block_size - (len(data) % block_size) + padding = bytes([padding_len]) * padding_len + return data + padding + + +def _unpad_data(data: bytes) -> bytes: + padding_len = data[-1] + return data[:-padding_len] diff --git a/application/vectorstore/base.py b/application/vectorstore/base.py index a6b206c9..ea4885cd 100644 --- a/application/vectorstore/base.py +++ b/application/vectorstore/base.py @@ -1,20 +1,28 @@ -from abc import ABC, abstractmethod import os -from sentence_transformers import SentenceTransformer +from abc import ABC, abstractmethod + from langchain_openai import OpenAIEmbeddings +from sentence_transformers import SentenceTransformer + from application.core.settings import settings + class EmbeddingsWrapper: def __init__(self, model_name, *args, **kwargs): - self.model = SentenceTransformer(model_name, config_kwargs={'allow_dangerous_deserialization': True}, *args, **kwargs) + self.model = SentenceTransformer( + model_name, + config_kwargs={"allow_dangerous_deserialization": True}, + *args, + **kwargs + ) self.dimension = self.model.get_sentence_embedding_dimension() def embed_query(self, query: str): return self.model.encode(query).tolist() - + def embed_documents(self, documents: list): return self.model.encode(documents).tolist() - + def __call__(self, text): if isinstance(text, str): return self.embed_query(text) @@ -24,15 +32,14 @@ class EmbeddingsWrapper: raise ValueError("Input must be a string or a list of strings") - class EmbeddingsSingleton: _instances = {} @staticmethod def get_instance(embeddings_name, *args, **kwargs): if embeddings_name not in EmbeddingsSingleton._instances: - EmbeddingsSingleton._instances[embeddings_name] = EmbeddingsSingleton._create_instance( - embeddings_name, *args, **kwargs + EmbeddingsSingleton._instances[embeddings_name] = ( + EmbeddingsSingleton._create_instance(embeddings_name, *args, **kwargs) ) return EmbeddingsSingleton._instances[embeddings_name] @@ -40,9 +47,15 @@ class EmbeddingsSingleton: def _create_instance(embeddings_name, *args, **kwargs): embeddings_factory = { "openai_text-embedding-ada-002": OpenAIEmbeddings, - "huggingface_sentence-transformers/all-mpnet-base-v2": lambda: EmbeddingsWrapper("sentence-transformers/all-mpnet-base-v2"), - "huggingface_sentence-transformers-all-mpnet-base-v2": lambda: EmbeddingsWrapper("sentence-transformers/all-mpnet-base-v2"), - "huggingface_hkunlp/instructor-large": lambda: EmbeddingsWrapper("hkunlp/instructor-large"), + "huggingface_sentence-transformers/all-mpnet-base-v2": lambda: EmbeddingsWrapper( + "sentence-transformers/all-mpnet-base-v2" + ), + "huggingface_sentence-transformers-all-mpnet-base-v2": lambda: EmbeddingsWrapper( + "sentence-transformers/all-mpnet-base-v2" + ), + "huggingface_hkunlp/instructor-large": lambda: EmbeddingsWrapper( + "hkunlp/instructor-large" + ), } if embeddings_name in embeddings_factory: @@ -50,34 +63,63 @@ class EmbeddingsSingleton: else: return EmbeddingsWrapper(embeddings_name, *args, **kwargs) + class BaseVectorStore(ABC): def __init__(self): pass @abstractmethod def search(self, *args, **kwargs): + """Search for similar documents/chunks in the vectorstore""" + pass + + @abstractmethod + def add_texts(self, texts, metadatas=None, *args, **kwargs): + """Add texts with their embeddings to the vectorstore""" + pass + + def delete_index(self, *args, **kwargs): + """Delete the entire index/collection""" + pass + + def save_local(self, *args, **kwargs): + """Save vectorstore to local storage""" + pass + + def get_chunks(self, *args, **kwargs): + """Get all chunks from the vectorstore""" + pass + + def add_chunk(self, text, metadata=None, *args, **kwargs): + """Add a single chunk to the vectorstore""" + pass + + def delete_chunk(self, chunk_id, *args, **kwargs): + """Delete a specific chunk from the vectorstore""" pass def is_azure_configured(self): - return settings.OPENAI_API_BASE and settings.OPENAI_API_VERSION and settings.AZURE_DEPLOYMENT_NAME + return ( + settings.OPENAI_API_BASE + and settings.OPENAI_API_VERSION + and settings.AZURE_DEPLOYMENT_NAME + ) def _get_embeddings(self, embeddings_name, embeddings_key=None): if embeddings_name == "openai_text-embedding-ada-002": if self.is_azure_configured(): os.environ["OPENAI_API_TYPE"] = "azure" embedding_instance = EmbeddingsSingleton.get_instance( - embeddings_name, - model=settings.AZURE_EMBEDDINGS_DEPLOYMENT_NAME + embeddings_name, model=settings.AZURE_EMBEDDINGS_DEPLOYMENT_NAME ) else: embedding_instance = EmbeddingsSingleton.get_instance( - embeddings_name, - openai_api_key=embeddings_key + embeddings_name, openai_api_key=embeddings_key ) elif embeddings_name == "huggingface_sentence-transformers/all-mpnet-base-v2": if os.path.exists("./models/all-mpnet-base-v2"): embedding_instance = EmbeddingsSingleton.get_instance( - embeddings_name = "./models/all-mpnet-base-v2", + embeddings_name="./models/all-mpnet-base-v2", ) else: embedding_instance = EmbeddingsSingleton.get_instance( @@ -87,4 +129,3 @@ class BaseVectorStore(ABC): embedding_instance = EmbeddingsSingleton.get_instance(embeddings_name) return embedding_instance - diff --git a/frontend/public/toolIcons/tool_mcp_tool.svg b/frontend/public/toolIcons/tool_mcp_tool.svg new file mode 100644 index 00000000..22c980e3 --- /dev/null +++ b/frontend/public/toolIcons/tool_mcp_tool.svg @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/frontend/src/agents/NewAgent.tsx b/frontend/src/agents/NewAgent.tsx index af7f109e..92e8b961 100644 --- a/frontend/src/agents/NewAgent.tsx +++ b/frontend/src/agents/NewAgent.tsx @@ -45,6 +45,7 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) { description: '', image: '', source: '', + sources: [], chunks: '', retriever: '', prompt_id: 'default', @@ -150,7 +151,41 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) { const formData = new FormData(); formData.append('name', agent.name); formData.append('description', agent.description); - formData.append('source', agent.source); + + if (selectedSourceIds.size > 1) { + const sourcesArray = Array.from(selectedSourceIds) + .map((id) => { + const sourceDoc = sourceDocs?.find( + (source) => + source.id === id || source.retriever === id || source.name === id, + ); + if (sourceDoc?.name === 'Default' && !sourceDoc?.id) { + return 'default'; + } + return sourceDoc?.id || id; + }) + .filter(Boolean); + formData.append('sources', JSON.stringify(sourcesArray)); + formData.append('source', ''); + } else if (selectedSourceIds.size === 1) { + const singleSourceId = Array.from(selectedSourceIds)[0]; + const sourceDoc = sourceDocs?.find( + (source) => + source.id === singleSourceId || + source.retriever === singleSourceId || + source.name === singleSourceId, + ); + let finalSourceId; + if (sourceDoc?.name === 'Default' && !sourceDoc?.id) + finalSourceId = 'default'; + else finalSourceId = sourceDoc?.id || singleSourceId; + formData.append('source', String(finalSourceId)); + formData.append('sources', JSON.stringify([])); + } else { + formData.append('source', ''); + formData.append('sources', JSON.stringify([])); + } + formData.append('chunks', agent.chunks); formData.append('retriever', agent.retriever); formData.append('prompt_id', agent.prompt_id); @@ -196,7 +231,41 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) { const formData = new FormData(); formData.append('name', agent.name); formData.append('description', agent.description); - formData.append('source', agent.source); + + if (selectedSourceIds.size > 1) { + const sourcesArray = Array.from(selectedSourceIds) + .map((id) => { + const sourceDoc = sourceDocs?.find( + (source) => + source.id === id || source.retriever === id || source.name === id, + ); + if (sourceDoc?.name === 'Default' && !sourceDoc?.id) { + return 'default'; + } + return sourceDoc?.id || id; + }) + .filter(Boolean); + formData.append('sources', JSON.stringify(sourcesArray)); + formData.append('source', ''); + } else if (selectedSourceIds.size === 1) { + const singleSourceId = Array.from(selectedSourceIds)[0]; + const sourceDoc = sourceDocs?.find( + (source) => + source.id === singleSourceId || + source.retriever === singleSourceId || + source.name === singleSourceId, + ); + let finalSourceId; + if (sourceDoc?.name === 'Default' && !sourceDoc?.id) + finalSourceId = 'default'; + else finalSourceId = sourceDoc?.id || singleSourceId; + formData.append('source', String(finalSourceId)); + formData.append('sources', JSON.stringify([])); + } else { + formData.append('source', ''); + formData.append('sources', JSON.stringify([])); + } + formData.append('chunks', agent.chunks); formData.append('retriever', agent.retriever); formData.append('prompt_id', agent.prompt_id); @@ -293,9 +362,33 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) { throw new Error('Failed to fetch agent'); } const data = await response.json(); - if (data.source) setSelectedSourceIds(new Set([data.source])); - else if (data.retriever) + + if (data.sources && data.sources.length > 0) { + const mappedSources = data.sources.map((sourceId: string) => { + if (sourceId === 'default') { + const defaultSource = sourceDocs?.find( + (source) => source.name === 'Default', + ); + return defaultSource?.retriever || 'classic'; + } + return sourceId; + }); + setSelectedSourceIds(new Set(mappedSources)); + } else if (data.source) { + if (data.source === 'default') { + const defaultSource = sourceDocs?.find( + (source) => source.name === 'Default', + ); + setSelectedSourceIds( + new Set([defaultSource?.retriever || 'classic']), + ); + } else { + setSelectedSourceIds(new Set([data.source])); + } + } else if (data.retriever) { setSelectedSourceIds(new Set([data.retriever])); + } + if (data.tools) setSelectedToolIds(new Set(data.tools)); if (data.status === 'draft') setEffectiveMode('draft'); if (data.json_schema) { @@ -311,25 +404,57 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) { }, [agentId, mode, token]); useEffect(() => { - const selectedSource = Array.from(selectedSourceIds).map((id) => - sourceDocs?.find( - (source) => - source.id === id || source.retriever === id || source.name === id, - ), - ); - if (selectedSource[0]?.model === embeddingsName) { - if (selectedSource[0] && 'id' in selectedSource[0]) { + const selectedSources = Array.from(selectedSourceIds) + .map((id) => + sourceDocs?.find( + (source) => + source.id === id || source.retriever === id || source.name === id, + ), + ) + .filter(Boolean); + + if (selectedSources.length > 0) { + // Handle multiple sources + if (selectedSources.length > 1) { + // Multiple sources selected - store in sources array + const sourceIds = selectedSources + .map((source) => source?.id) + .filter((id): id is string => Boolean(id)); setAgent((prev) => ({ ...prev, - source: selectedSource[0]?.id || 'default', + sources: sourceIds, + source: '', // Clear single source for multiple sources retriever: '', })); - } else - setAgent((prev) => ({ - ...prev, - source: '', - retriever: selectedSource[0]?.retriever || 'classic', - })); + } else { + // Single source selected - maintain backward compatibility + const selectedSource = selectedSources[0]; + if (selectedSource?.model === embeddingsName) { + if (selectedSource && 'id' in selectedSource) { + setAgent((prev) => ({ + ...prev, + source: selectedSource?.id || 'default', + sources: [], // Clear sources array for single source + retriever: '', + })); + } else { + setAgent((prev) => ({ + ...prev, + source: '', + sources: [], // Clear sources array + retriever: selectedSource?.retriever || 'classic', + })); + } + } + } + } else { + // No sources selected + setAgent((prev) => ({ + ...prev, + source: '', + sources: [], + retriever: '', + })); } }, [selectedSourceIds]); @@ -510,7 +635,7 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) { ) .filter(Boolean) .join(', ') - : 'Select source'} + : 'Select sources'} ) => { setSelectedSourceIds(newSelectedIds); - setIsSourcePopupOpen(false); }} - title="Select Source" + title="Select Sources" searchPlaceholder="Search sources..." - noOptionsMessage="No source available" - singleSelect={true} + noOptionsMessage="No sources available" />
diff --git a/frontend/src/agents/types/index.ts b/frontend/src/agents/types/index.ts index e841cb0a..442097a1 100644 --- a/frontend/src/agents/types/index.ts +++ b/frontend/src/agents/types/index.ts @@ -10,6 +10,7 @@ export type Agent = { description: string; image: string; source: string; + sources?: string[]; chunks: string; retriever: string; prompt_id: string; diff --git a/frontend/src/api/endpoints.ts b/frontend/src/api/endpoints.ts index 955f43ee..dad008da 100644 --- a/frontend/src/api/endpoints.ts +++ b/frontend/src/api/endpoints.ts @@ -57,6 +57,8 @@ const endpoints = { DIRECTORY_STRUCTURE: (docId: string) => `/api/directory_structure?id=${docId}`, MANAGE_SOURCE_FILES: '/api/manage_source_files', + MCP_TEST_CONNECTION: '/api/mcp_server/test', + MCP_SAVE_SERVER: '/api/mcp_server/save', }, CONVERSATION: { ANSWER: '/api/answer', diff --git a/frontend/src/api/services/userService.ts b/frontend/src/api/services/userService.ts index 6e375951..5dda8ddf 100644 --- a/frontend/src/api/services/userService.ts +++ b/frontend/src/api/services/userService.ts @@ -90,7 +90,10 @@ const userService = { path?: string, search?: string, ): Promise => - apiClient.get(endpoints.USER.GET_CHUNKS(docId, page, perPage, path, search), token), + apiClient.get( + endpoints.USER.GET_CHUNKS(docId, page, perPage, path, search), + token, + ), addChunk: (data: any, token: string | null): Promise => apiClient.post(endpoints.USER.ADD_CHUNK, data, token), deleteChunk: ( @@ -105,16 +108,24 @@ const userService = { apiClient.get(endpoints.USER.DIRECTORY_STRUCTURE(docId), token), manageSourceFiles: (data: FormData, token: string | null): Promise => apiClient.postFormData(endpoints.USER.MANAGE_SOURCE_FILES, data, token), - syncConnector: (docId: string, provider: string, token: string | null): Promise => { + testMCPConnection: (data: any, token: string | null): Promise => + apiClient.post(endpoints.USER.MCP_TEST_CONNECTION, data, token), + saveMCPServer: (data: any, token: string | null): Promise => + apiClient.post(endpoints.USER.MCP_SAVE_SERVER, data, token), + syncConnector: ( + docId: string, + provider: string, + token: string | null, + ): Promise => { const sessionToken = getSessionToken(provider); return apiClient.post( endpoints.USER.SYNC_CONNECTOR, { source_id: docId, session_token: sessionToken, - provider: provider + provider: provider, }, - token + token, ); }, }; diff --git a/frontend/src/assets/server.svg b/frontend/src/assets/server.svg new file mode 100644 index 00000000..e69de29b diff --git a/frontend/src/components/ConnectorAuth.tsx b/frontend/src/components/ConnectorAuth.tsx index 22566521..61b6e895 100644 --- a/frontend/src/components/ConnectorAuth.tsx +++ b/frontend/src/components/ConnectorAuth.tsx @@ -16,7 +16,12 @@ const providerLabel = (provider: string) => { return map[provider] || provider.replace(/_/g, ' '); }; -const ConnectorAuth: React.FC = ({ provider, onSuccess, onError, label }) => { +const ConnectorAuth: React.FC = ({ + provider, + onSuccess, + onError, + label, +}) => { const token = useSelector(selectToken); const completedRef = useRef(false); const intervalRef = useRef(null); @@ -31,8 +36,12 @@ const ConnectorAuth: React.FC = ({ provider, onSuccess, onEr const handleAuthMessage = (event: MessageEvent) => { const successGeneric = event.data?.type === 'connector_auth_success'; - const successProvider = event.data?.type === `${provider}_auth_success` || event.data?.type === 'google_drive_auth_success'; - const errorProvider = event.data?.type === `${provider}_auth_error` || event.data?.type === 'google_drive_auth_error'; + const successProvider = + event.data?.type === `${provider}_auth_success` || + event.data?.type === 'google_drive_auth_success'; + const errorProvider = + event.data?.type === `${provider}_auth_error` || + event.data?.type === 'google_drive_auth_error'; if (successGeneric || successProvider) { completedRef.current = true; @@ -54,12 +63,17 @@ const ConnectorAuth: React.FC = ({ provider, onSuccess, onEr cleanup(); const apiHost = import.meta.env.VITE_API_HOST; - const authResponse = await fetch(`${apiHost}/api/connectors/auth?provider=${provider}`, { - headers: { Authorization: `Bearer ${token}` }, - }); + const authResponse = await fetch( + `${apiHost}/api/connectors/auth?provider=${provider}`, + { + headers: { Authorization: `Bearer ${token}` }, + }, + ); if (!authResponse.ok) { - throw new Error(`Failed to get authorization URL: ${authResponse.status}`); + throw new Error( + `Failed to get authorization URL: ${authResponse.status}`, + ); } const authData = await authResponse.json(); @@ -70,10 +84,12 @@ const ConnectorAuth: React.FC = ({ provider, onSuccess, onEr const authWindow = window.open( authData.authorization_url, `${provider}-auth`, - 'width=500,height=600,scrollbars=yes,resizable=yes' + 'width=500,height=600,scrollbars=yes,resizable=yes', ); if (!authWindow) { - throw new Error('Failed to open authentication window. Please allow popups.'); + throw new Error( + 'Failed to open authentication window. Please allow popups.', + ); } window.addEventListener('message', handleAuthMessage as any); @@ -98,10 +114,13 @@ const ConnectorAuth: React.FC = ({ provider, onSuccess, onEr return ( @@ -109,4 +128,3 @@ const ConnectorAuth: React.FC = ({ provider, onSuccess, onEr }; export default ConnectorAuth; - diff --git a/frontend/src/components/ConnectorTreeComponent.tsx b/frontend/src/components/ConnectorTreeComponent.tsx index dc0f937b..73cf6ae0 100644 --- a/frontend/src/components/ConnectorTreeComponent.tsx +++ b/frontend/src/components/ConnectorTreeComponent.tsx @@ -240,8 +240,6 @@ const ConnectorTreeComponent: React.FC = ({ return current; }; - - const getMenuRef = (id: string) => { if (!menuRefs.current[id]) { menuRefs.current[id] = React.createRef(); diff --git a/frontend/src/components/FileTreeComponent.tsx b/frontend/src/components/FileTreeComponent.tsx index 7f124fa7..32b0839b 100644 --- a/frontend/src/components/FileTreeComponent.tsx +++ b/frontend/src/components/FileTreeComponent.tsx @@ -136,8 +136,6 @@ const FileTreeComponent: React.FC = ({ } }, [docId, token]); - - const navigateToDirectory = (dirName: string) => { setCurrentPath((prev) => [...prev, dirName]); }; @@ -445,18 +443,18 @@ const FileTreeComponent: React.FC = ({ const renderPathNavigation = () => { return ( -
+
{/* Left side with path navigation */}
- + {sourceName} {currentPath.length > 0 && ( @@ -487,8 +485,7 @@ const FileTreeComponent: React.FC = ({
-
- +
{processingRef.current && (
{currentOpRef.current === 'add' @@ -497,13 +494,13 @@ const FileTreeComponent: React.FC = ({
)} - {renderFileSearch()} + {renderFileSearch()} {/* Add file button */} {!processingRef.current && ( + +
+ + +
+
+
+
+ + ) + ); +} diff --git a/frontend/src/modals/ShareConversationModal.tsx b/frontend/src/modals/ShareConversationModal.tsx index 99262f01..624d64f5 100644 --- a/frontend/src/modals/ShareConversationModal.tsx +++ b/frontend/src/modals/ShareConversationModal.tsx @@ -60,7 +60,7 @@ export const ShareConversationModal = ({ const [sourcePath, setSourcePath] = useState<{ label: string; value: string; - } | null>(preSelectedDoc ? extractDocPaths([preSelectedDoc])[0] : null); + } | null>(preSelectedDoc ? extractDocPaths(preSelectedDoc)[0] : null); const handleCopyKey = (url: string) => { navigator.clipboard.writeText(url); @@ -105,14 +105,14 @@ export const ShareConversationModal = ({ return (
-

+

{t('modals.shareConv.label')}

-

+

{t('modals.shareConv.note')}

- + {t('modals.shareConv.option')} )}
- + {`${domain}/share/${identifier ?? '....'}`} {status === 'fetched' ? ( ) : (
@@ -336,7 +335,7 @@ export default function Sources({
{loading ? ( -
+
) : !currentDocuments?.length ? ( @@ -351,17 +350,18 @@ export default function Sources({

) : ( -
+
{currentDocuments.map((document, index) => { const docId = document.id ? document.id.toString() : ''; return (
@@ -426,7 +426,7 @@ export default function Sources({ {document.date ? formatDate(document.date) : ''} @@ -436,7 +436,7 @@ export default function Sources({ {document.tokens diff --git a/frontend/src/settings/ToolConfig.tsx b/frontend/src/settings/ToolConfig.tsx index 61a1d850..bca5c6ce 100644 --- a/frontend/src/settings/ToolConfig.tsx +++ b/frontend/src/settings/ToolConfig.tsx @@ -30,9 +30,22 @@ export default function ToolConfig({ handleGoBack: () => void; }) { const token = useSelector(selectToken); - const [authKey, setAuthKey] = React.useState( - 'token' in tool.config ? tool.config.token : '', - ); + const [authKey, setAuthKey] = React.useState(() => { + if (tool.name === 'mcp_tool') { + const config = tool.config as any; + if (config.auth_type === 'api_key') { + return config.api_key || ''; + } else if (config.auth_type === 'bearer') { + return config.encrypted_token || ''; + } else if (config.auth_type === 'basic') { + return config.password || ''; + } + return ''; + } else if ('token' in tool.config) { + return tool.config.token; + } + return ''; + }); const [customName, setCustomName] = React.useState( tool.customName || '', ); @@ -97,6 +110,26 @@ export default function ToolConfig({ }; const handleSaveChanges = () => { + let configToSave; + if (tool.name === 'api_tool') { + configToSave = tool.config; + } else if (tool.name === 'mcp_tool') { + configToSave = { ...tool.config } as any; + const mcpConfig = tool.config as any; + + if (authKey.trim()) { + if (mcpConfig.auth_type === 'api_key') { + configToSave.api_key = authKey; + } else if (mcpConfig.auth_type === 'bearer') { + configToSave.encrypted_token = authKey; + } else if (mcpConfig.auth_type === 'basic') { + configToSave.password = authKey; + } + } + } else { + configToSave = { token: authKey }; + } + userService .updateTool( { @@ -105,7 +138,7 @@ export default function ToolConfig({ displayName: tool.displayName, customName: customName, description: tool.description, - config: tool.name === 'api_tool' ? tool.config : { token: authKey }, + config: configToSave, actions: 'actions' in tool ? tool.actions : [], status: tool.status, }, @@ -196,7 +229,15 @@ export default function ToolConfig({
{Object.keys(tool?.config).length !== 0 && tool.name !== 'api_tool' && (

- {t('settings.tools.authentication')} + {tool.name === 'mcp_tool' + ? (tool.config as any)?.auth_type === 'bearer' + ? 'Bearer Token' + : (tool.config as any)?.auth_type === 'api_key' + ? 'API Key' + : (tool.config as any)?.auth_type === 'basic' + ? 'Password' + : t('settings.tools.authentication') + : t('settings.tools.authentication')}

)}
@@ -208,7 +249,17 @@ export default function ToolConfig({ value={authKey} onChange={(e) => setAuthKey(e.target.value)} borderVariant="thin" - placeholder={t('modals.configTool.apiKeyPlaceholder')} + placeholder={ + tool.name === 'mcp_tool' + ? (tool.config as any)?.auth_type === 'bearer' + ? 'Bearer Token' + : (tool.config as any)?.auth_type === 'api_key' + ? 'API Key' + : (tool.config as any)?.auth_type === 'basic' + ? 'Password' + : t('modals.configTool.apiKeyPlaceholder') + : t('modals.configTool.apiKeyPlaceholder') + } />
)} @@ -450,6 +501,26 @@ export default function ToolConfig({ setModalState={(state) => setShowUnsavedModal(state === 'ACTIVE')} submitLabel={t('settings.tools.saveAndLeave')} handleSubmit={() => { + let configToSave; + if (tool.name === 'api_tool') { + configToSave = tool.config; + } else if (tool.name === 'mcp_tool') { + configToSave = { ...tool.config } as any; + const mcpConfig = tool.config as any; + + if (authKey.trim()) { + if (mcpConfig.auth_type === 'api_key') { + configToSave.api_key = authKey; + } else if (mcpConfig.auth_type === 'bearer') { + configToSave.encrypted_token = authKey; + } else if (mcpConfig.auth_type === 'basic') { + configToSave.password = authKey; + } + } + } else { + configToSave = { token: authKey }; + } + userService .updateTool( { @@ -458,10 +529,7 @@ export default function ToolConfig({ displayName: tool.displayName, customName: customName, description: tool.description, - config: - tool.name === 'api_tool' - ? tool.config - : { token: authKey }, + config: configToSave, actions: 'actions' in tool ? tool.actions : [], status: tool.status, }, diff --git a/frontend/src/upload/Upload.tsx b/frontend/src/upload/Upload.tsx index fe675330..a6db7e56 100644 --- a/frontend/src/upload/Upload.tsx +++ b/frontend/src/upload/Upload.tsx @@ -4,8 +4,15 @@ import { useTranslation } from 'react-i18next'; import { useDispatch, useSelector } from 'react-redux'; import userService from '../api/services/userService'; -import { getSessionToken } from '../utils/providerUtils'; - +import { + getSessionToken, + setSessionToken, + removeSessionToken, +} from '../utils/providerUtils'; +import { formatDate } from '../utils/dateTimeUtils'; +import { formatBytes } from '../utils/stringUtils'; +import FileUpload from '../assets/file_upload.svg'; +import WebsiteCollect from '../assets/website_collect.svg'; import Dropdown from '../components/Dropdown'; import Input from '../components/Input'; import ToggleSwitch from '../components/ToggleSwitch'; @@ -377,7 +384,8 @@ function Upload({ data?.find( (d: Doc) => d.type?.toLowerCase() === 'local', ), - )); + ), + ); }); setProgress( (progress) => diff --git a/frontend/src/utils/providerUtils.ts b/frontend/src/utils/providerUtils.ts index b200928b..25236ad2 100644 --- a/frontend/src/utils/providerUtils.ts +++ b/frontend/src/utils/providerUtils.ts @@ -3,7 +3,6 @@ * Follows the convention: {provider}_session_token */ - export const getSessionToken = (provider: string): string | null => { return localStorage.getItem(`${provider}_session_token`); }; @@ -14,4 +13,4 @@ export const setSessionToken = (provider: string, token: string): void => { export const removeSessionToken = (provider: string): void => { localStorage.removeItem(`${provider}_session_token`); -}; \ No newline at end of file +};