mirror of
https://github.com/arc53/DocsGPT.git
synced 2025-11-29 08:33:20 +00:00
Compare commits
165 Commits
copilot/fi
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37c672b891 | ||
|
|
f7db22edff | ||
|
|
5a9bc6d2bf | ||
|
|
f7f6042579 | ||
|
|
c4a598f3d3 | ||
|
|
7e2cbdd88c | ||
|
|
3b3a04a249 | ||
|
|
f9b2c95695 | ||
|
|
c2c18e8319 | ||
|
|
384ad3e0ac | ||
|
|
8c986aaa7f | ||
|
|
bb4ea76d30 | ||
|
|
2868e47cf8 | ||
|
|
e0adc3e5d5 | ||
|
|
e55d1a5865 | ||
|
|
018273c6b2 | ||
|
|
44b8a11c04 | ||
|
|
56e5aba559 | ||
|
|
46904ccd54 | ||
|
|
5b7c7a4471 | ||
|
|
9da4215d1f | ||
|
|
f39ac9945f | ||
|
|
a0cc2e4d46 | ||
|
|
4065041a9f | ||
|
|
f08067a161 | ||
|
|
545caacfa3 | ||
|
|
a06f646637 | ||
|
|
578c68205a | ||
|
|
f09f1433a9 | ||
|
|
15a9e97a1e | ||
|
|
b3af4ee50b | ||
|
|
e25b988dc8 | ||
|
|
2410bd8654 | ||
|
|
44d21ab703 | ||
|
|
e283957c8f | ||
|
|
b1210c4902 | ||
|
|
e7430f0fbc | ||
|
|
92d6ae54c3 | ||
|
|
f82be23ca9 | ||
|
|
8c3f75e3e2 | ||
|
|
193d59f193 | ||
|
|
c2bebbaefa | ||
|
|
7ae5a9c5a5 | ||
|
|
3b69bea23d | ||
|
|
ab05726b99 | ||
|
|
b2b04268e9 | ||
|
|
927d10d66e | ||
|
|
b67329623c | ||
|
|
6a02bcf15b | ||
|
|
cd0fbf79a3 | ||
|
|
15d2d0115b | ||
|
|
d1a0fe6e91 | ||
|
|
1db80d140f | ||
|
|
896dcf1f9e | ||
|
|
819a12fb49 | ||
|
|
c68273706c | ||
|
|
6bb0cd535a | ||
|
|
cb9ec69cf6 | ||
|
|
143854fa81 | ||
|
|
2f48a3d7d5 | ||
|
|
ec95dafe1e | ||
|
|
3d1fe724e5 | ||
|
|
5c615d6f2d | ||
|
|
d72558eb36 | ||
|
|
65c33ad915 | ||
|
|
9be128a963 | ||
|
|
eb05132008 | ||
|
|
f94a093e8c | ||
|
|
0d0c2daf64 | ||
|
|
823d948b25 | ||
|
|
56831fbcf2 | ||
|
|
bf49b9cb88 | ||
|
|
e01adffbad | ||
|
|
08a5d52d82 | ||
|
|
fdae235742 | ||
|
|
9903fad1e9 | ||
|
|
14bbd5338d | ||
|
|
4a236c2f6f | ||
|
|
0a8cdbd7f1 | ||
|
|
94c49843be | ||
|
|
9281fac898 | ||
|
|
0b2736f454 | ||
|
|
ae116b0d0d | ||
|
|
ba260e3382 | ||
|
|
1282e7687f | ||
|
|
b1d8266eef | ||
|
|
7acae6935b | ||
|
|
092c01cae7 | ||
|
|
56a1066c30 | ||
|
|
1356d71839 | ||
|
|
1eb011e8c3 | ||
|
|
e349eb28b0 | ||
|
|
b000b235a2 | ||
|
|
16fe92282e | ||
|
|
e218e88cf4 | ||
|
|
888ea81a32 | ||
|
|
735fab7640 | ||
|
|
45745c2a47 | ||
|
|
4caff0fcf6 | ||
|
|
762ea6ce7f | ||
|
|
8b4f6553f3 | ||
|
|
a61e44d175 | ||
|
|
e1b1558fc9 | ||
|
|
53225bda4e | ||
|
|
5212769848 | ||
|
|
d5ded3c9f4 | ||
|
|
c92d778894 | ||
|
|
829abd1ad6 | ||
|
|
266d256a07 | ||
|
|
8380cac3e7 | ||
|
|
a24652f901 | ||
|
|
2d203d3c70 | ||
|
|
48d21600da | ||
|
|
2508d0fbb3 | ||
|
|
e90e80c289 | ||
|
|
5e4748f9d9 | ||
|
|
212952f3e9 | ||
|
|
f99b6496c5 | ||
|
|
67423d51b9 | ||
|
|
58465ece65 | ||
|
|
8ede3a0173 | ||
|
|
ad2f0f8950 | ||
|
|
76973a4b4c | ||
|
|
b198e2e029 | ||
|
|
4d6ea401b5 | ||
|
|
b00c4cc3b6 | ||
|
|
4185e64c65 | ||
|
|
6eb2c884a2 | ||
|
|
6c0362a4cf | ||
|
|
50b1755a63 | ||
|
|
ff3c7eb5fb | ||
|
|
3755316d49 | ||
|
|
f952046847 | ||
|
|
969cdb4a63 | ||
|
|
f336d44595 | ||
|
|
a53f93c195 | ||
|
|
fcb334ce33 | ||
|
|
8ddf04a904 | ||
|
|
29698ca169 | ||
|
|
a9baf7436a | ||
|
|
99a8962183 | ||
|
|
afc5b15a6b | ||
|
|
b6ab508e27 | ||
|
|
789e65557a | ||
|
|
8a7806ab2d | ||
|
|
493303e103 | ||
|
|
1d9af05e9e | ||
|
|
5b07c5f2e8 | ||
|
|
a38d71bbfb | ||
|
|
a24a3f868c | ||
|
|
f60c516185 | ||
|
|
26f4646304 | ||
|
|
3a351f67e6 | ||
|
|
e7c09cb91e | ||
|
|
c62040e232 | ||
|
|
d3b592bffc | ||
|
|
4fcbdae5bf | ||
|
|
ca95d7275a | ||
|
|
61baf3701c | ||
|
|
bbce872ac5 | ||
|
|
0f7ebcd8e4 | ||
|
|
82fc19e7b7 | ||
|
|
2ef23fe1b3 | ||
|
|
fd905b1a06 | ||
|
|
ade704d065 |
2
.github/workflows/bandit.yaml
vendored
2
.github/workflows/bandit.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: '3.12'
|
||||
- name: Install dependencies
|
||||
|
||||
2
.github/workflows/pytest.yml
vendored
2
.github/workflows/pytest.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
|
||||
26
README.md
26
README.md
@@ -3,11 +3,11 @@
|
||||
</h1>
|
||||
|
||||
<p align="center">
|
||||
<strong>Open-Source RAG Assistant</strong>
|
||||
<strong>Private AI for agents, assistants and enterprise search</strong>
|
||||
</p>
|
||||
|
||||
<p align="left">
|
||||
<strong><a href="https://www.docsgpt.cloud/">DocsGPT</a></strong> is an open-source genAI tool that helps users get reliable answers from any knowledge source, while avoiding hallucinations. It enables quick and reliable information retrieval, with tooling and agentic system capability built in.
|
||||
<strong><a href="https://www.docsgpt.cloud/">DocsGPT</a></strong> is an open-source AI platform for building intelligent agents and assistants. Features Agent Builder, deep research tools, document analysis (PDF, Office, web content), Multi-model support (choose your provider or run locally), and rich API connectivity for agents with actionable tools and integrations. Deploy anywhere with complete privacy control.
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
@@ -19,10 +19,10 @@
|
||||
<a href="https://discord.gg/n5BX8dh8rU"></a>
|
||||
<a href="https://twitter.com/docsgptai"></a>
|
||||
|
||||
<a href="https://docs.docsgpt.cloud/quickstart">⚡️ Quickstart</a> • <a href="https://app.docsgpt.cloud/">☁️ Cloud Version</a> • <a href="https://discord.gg/n5BX8dh8rU">💬 Discord</a>
|
||||
<br>
|
||||
<a href="https://docs.docsgpt.cloud/">📖 Documentation</a> • <a href="https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md">👫 Contribute</a> • <a href="https://blog.docsgpt.cloud/">🗞 Blog</a>
|
||||
<br>
|
||||
<a href="https://docs.docsgpt.cloud/quickstart">⚡️ Quickstart</a> • <a href="https://app.docsgpt.cloud/">☁️ Cloud Version</a> • <a href="https://discord.gg/n5BX8dh8rU">💬 Discord</a>
|
||||
<br>
|
||||
<a href="https://docs.docsgpt.cloud/">📖 Documentation</a> • <a href="https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md">👫 Contribute</a> • <a href="https://blog.docsgpt.cloud/">🗞 Blog</a>
|
||||
<br>
|
||||
|
||||
</div>
|
||||
<div align="center">
|
||||
@@ -53,9 +53,10 @@
|
||||
- [x] New input box in the conversation menu (April 2025)
|
||||
- [x] Add triggerable actions / tools (webhook) (April 2025)
|
||||
- [x] Agent optimisations (May 2025)
|
||||
- [ ] Filesystem sources update (July 2025)
|
||||
- [ ] Anthropic Tool compatibility (July 2025)
|
||||
- [ ] MCP support (July 2025)
|
||||
- [x] Filesystem sources update (July 2025)
|
||||
- [x] Json Responses (August 2025)
|
||||
- [ ] Sharepoint integration (August 2025)
|
||||
- [ ] MCP support (August 2025)
|
||||
- [ ] Add OAuth 2.0 authentication for tools and sources (August 2025)
|
||||
- [ ] Agent scheduling
|
||||
|
||||
@@ -71,11 +72,10 @@ We're eager to provide personalized assistance when deploying your DocsGPT to a
|
||||
|
||||
## Join the Lighthouse Program 🌟
|
||||
|
||||
Calling all developers and GenAI innovators! The **DocsGPT Lighthouse Program** connects technical leaders actively deploying or extending DocsGPT in real-world scenarios. Collaborate directly with our team to shape the roadmap, access priority support, and build enterprise-ready solutions with exclusive community insights.
|
||||
Calling all developers and GenAI innovators! The **DocsGPT Lighthouse Program** connects technical leaders actively deploying or extending DocsGPT in real-world scenarios. Collaborate directly with our team to shape the roadmap, access priority support, and build enterprise-ready solutions with exclusive community insights.
|
||||
|
||||
[Learn More & Apply →](https://docs.google.com/forms/d/1KAADiJinUJ8EMQyfTXUIGyFbqINNClNR3jBNWq7DgTE)
|
||||
|
||||
|
||||
## QuickStart
|
||||
|
||||
> [!Note]
|
||||
@@ -106,7 +106,7 @@ A more detailed [Quickstart](https://docs.docsgpt.cloud/quickstart) is available
|
||||
PowerShell -ExecutionPolicy Bypass -File .\setup.ps1
|
||||
```
|
||||
|
||||
Either script will guide you through setting up DocsGPT. Four options available: using the public API, running locally, connecting to a local inference engine, or using a cloud API provider. Scripts will automatically configure your `.env` file and handle necessary downloads and installations based on your chosen option.
|
||||
Either script will guide you through setting up DocsGPT. Four options available: using the public API, running locally, connecting to a local inference engine, or using a cloud API provider. Scripts will automatically configure your `.env` file and handle necessary downloads and installations based on your chosen option.
|
||||
|
||||
**Navigate to http://localhost:5173/**
|
||||
|
||||
@@ -115,6 +115,7 @@ To stop DocsGPT, open a terminal in the `DocsGPT` directory and run:
|
||||
```bash
|
||||
docker compose -f deployment/docker-compose.yaml down
|
||||
```
|
||||
|
||||
(or use the specific `docker compose down` command shown after running the setup script).
|
||||
|
||||
> [!Note]
|
||||
@@ -142,7 +143,6 @@ Please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for information abou
|
||||
|
||||
We as members, contributors, and leaders, pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Please refer to the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file for more information about contributing.
|
||||
|
||||
|
||||
## Many Thanks To Our Contributors⚡
|
||||
|
||||
<a href="https://github.com/arc53/DocsGPT/graphs/contributors" alt="View Contributors">
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import logging
|
||||
import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Generator, List, Optional
|
||||
@@ -6,15 +7,15 @@ from bson.objectid import ObjectId
|
||||
|
||||
from application.agents.tools.tool_action_parser import ToolActionParser
|
||||
from application.agents.tools.tool_manager import ToolManager
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
|
||||
from application.llm.handlers.handler_creator import LLMHandlerCreator
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
from application.logging import build_stack_data, log_activity, LogContext
|
||||
from application.retriever.base import BaseRetriever
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseAgent(ABC):
|
||||
def __init__(
|
||||
@@ -28,6 +29,7 @@ class BaseAgent(ABC):
|
||||
chat_history: Optional[List[Dict]] = None,
|
||||
decoded_token: Optional[Dict] = None,
|
||||
attachments: Optional[List[Dict]] = None,
|
||||
json_schema: Optional[Dict] = None,
|
||||
):
|
||||
self.endpoint = endpoint
|
||||
self.llm_name = llm_name
|
||||
@@ -51,6 +53,7 @@ class BaseAgent(ABC):
|
||||
llm_name if llm_name else "default"
|
||||
)
|
||||
self.attachments = attachments or []
|
||||
self.json_schema = json_schema
|
||||
|
||||
@log_activity()
|
||||
def gen(
|
||||
@@ -137,6 +140,40 @@ class BaseAgent(ABC):
|
||||
tool_id, action_name, call_args = parser.parse_args(call)
|
||||
|
||||
call_id = getattr(call, "id", None) or str(uuid.uuid4())
|
||||
|
||||
# Check if parsing failed
|
||||
if tool_id is None or action_name is None:
|
||||
error_message = f"Error: Failed to parse LLM tool call. Tool name: {getattr(call, 'name', 'unknown')}"
|
||||
logger.error(error_message)
|
||||
|
||||
tool_call_data = {
|
||||
"tool_name": "unknown",
|
||||
"call_id": call_id,
|
||||
"action_name": getattr(call, 'name', 'unknown'),
|
||||
"arguments": call_args or {},
|
||||
"result": f"Failed to parse tool call. Invalid tool name format: {getattr(call, 'name', 'unknown')}",
|
||||
}
|
||||
yield {"type": "tool_call", "data": {**tool_call_data, "status": "error"}}
|
||||
self.tool_calls.append(tool_call_data)
|
||||
return "Failed to parse tool call.", call_id
|
||||
|
||||
# Check if tool_id exists in available tools
|
||||
if tool_id not in tools_dict:
|
||||
error_message = f"Error: Tool ID '{tool_id}' extracted from LLM call not found in available tools_dict. Available IDs: {list(tools_dict.keys())}"
|
||||
logger.error(error_message)
|
||||
|
||||
# Return error result
|
||||
tool_call_data = {
|
||||
"tool_name": "unknown",
|
||||
"call_id": call_id,
|
||||
"action_name": f"{action_name}_{tool_id}",
|
||||
"arguments": call_args,
|
||||
"result": f"Tool with ID {tool_id} not found. Available tools: {list(tools_dict.keys())}",
|
||||
}
|
||||
yield {"type": "tool_call", "data": {**tool_call_data, "status": "error"}}
|
||||
self.tool_calls.append(tool_call_data)
|
||||
return f"Tool with ID {tool_id} not found.", call_id
|
||||
|
||||
tool_call_data = {
|
||||
"tool_name": tools_dict[tool_id]["name"],
|
||||
"call_id": call_id,
|
||||
@@ -283,6 +320,21 @@ class BaseAgent(ABC):
|
||||
and self.tools
|
||||
):
|
||||
gen_kwargs["tools"] = self.tools
|
||||
|
||||
if (
|
||||
self.json_schema
|
||||
and hasattr(self.llm, "_supports_structured_output")
|
||||
and self.llm._supports_structured_output()
|
||||
):
|
||||
structured_format = self.llm.prepare_structured_output_format(
|
||||
self.json_schema
|
||||
)
|
||||
if structured_format:
|
||||
if self.llm_name == "openai":
|
||||
gen_kwargs["response_format"] = structured_format
|
||||
elif self.llm_name == "google":
|
||||
gen_kwargs["response_schema"] = structured_format
|
||||
|
||||
resp = self.llm.gen_stream(**gen_kwargs)
|
||||
|
||||
if log_context:
|
||||
@@ -307,11 +359,25 @@ class BaseAgent(ABC):
|
||||
return resp
|
||||
|
||||
def _handle_response(self, response, tools_dict, messages, log_context):
|
||||
is_structured_output = (
|
||||
self.json_schema is not None
|
||||
and hasattr(self.llm, "_supports_structured_output")
|
||||
and self.llm._supports_structured_output()
|
||||
)
|
||||
|
||||
if isinstance(response, str):
|
||||
yield {"answer": response}
|
||||
answer_data = {"answer": response}
|
||||
if is_structured_output:
|
||||
answer_data["structured"] = True
|
||||
answer_data["schema"] = self.json_schema
|
||||
yield answer_data
|
||||
return
|
||||
if hasattr(response, "message") and getattr(response.message, "content", None):
|
||||
yield {"answer": response.message.content}
|
||||
answer_data = {"answer": response.message.content}
|
||||
if is_structured_output:
|
||||
answer_data["structured"] = True
|
||||
answer_data["schema"] = self.json_schema
|
||||
yield answer_data
|
||||
return
|
||||
processed_response_gen = self._llm_handler(
|
||||
response, tools_dict, messages, log_context, self.attachments
|
||||
@@ -319,8 +385,16 @@ class BaseAgent(ABC):
|
||||
|
||||
for event in processed_response_gen:
|
||||
if isinstance(event, str):
|
||||
yield {"answer": event}
|
||||
answer_data = {"answer": event}
|
||||
if is_structured_output:
|
||||
answer_data["structured"] = True
|
||||
answer_data["schema"] = self.json_schema
|
||||
yield answer_data
|
||||
elif hasattr(event, "message") and getattr(event.message, "content", None):
|
||||
yield {"answer": event.message.content}
|
||||
answer_data = {"answer": event.message.content}
|
||||
if is_structured_output:
|
||||
answer_data["structured"] = True
|
||||
answer_data["schema"] = self.json_schema
|
||||
yield answer_data
|
||||
elif isinstance(event, dict) and "type" in event:
|
||||
yield event
|
||||
|
||||
@@ -19,8 +19,20 @@ class ToolActionParser:
|
||||
def _parse_openai_llm(self, call):
|
||||
try:
|
||||
call_args = json.loads(call.arguments)
|
||||
tool_id = call.name.split("_")[-1]
|
||||
action_name = call.name.rsplit("_", 1)[0]
|
||||
tool_parts = call.name.split("_")
|
||||
|
||||
# If the tool name doesn't contain an underscore, it's likely a hallucinated tool
|
||||
if len(tool_parts) < 2:
|
||||
logger.warning(f"Invalid tool name format: {call.name}. Expected format: action_name_tool_id")
|
||||
return None, None, None
|
||||
|
||||
tool_id = tool_parts[-1]
|
||||
action_name = "_".join(tool_parts[:-1])
|
||||
|
||||
# Validate that tool_id looks like a numerical ID
|
||||
if not tool_id.isdigit():
|
||||
logger.warning(f"Tool ID '{tool_id}' is not numerical. This might be a hallucinated tool call.")
|
||||
|
||||
except (AttributeError, TypeError) as e:
|
||||
logger.error(f"Error parsing OpenAI LLM call: {e}")
|
||||
return None, None, None
|
||||
@@ -29,8 +41,20 @@ class ToolActionParser:
|
||||
def _parse_google_llm(self, call):
|
||||
try:
|
||||
call_args = call.arguments
|
||||
tool_id = call.name.split("_")[-1]
|
||||
action_name = call.name.rsplit("_", 1)[0]
|
||||
tool_parts = call.name.split("_")
|
||||
|
||||
# If the tool name doesn't contain an underscore, it's likely a hallucinated tool
|
||||
if len(tool_parts) < 2:
|
||||
logger.warning(f"Invalid tool name format: {call.name}. Expected format: action_name_tool_id")
|
||||
return None, None, None
|
||||
|
||||
tool_id = tool_parts[-1]
|
||||
action_name = "_".join(tool_parts[:-1])
|
||||
|
||||
# Validate that tool_id looks like a numerical ID
|
||||
if not tool_id.isdigit():
|
||||
logger.warning(f"Tool ID '{tool_id}' is not numerical. This might be a hallucinated tool call.")
|
||||
|
||||
except (AttributeError, TypeError) as e:
|
||||
logger.error(f"Error parsing Google LLM call: {e}")
|
||||
return None, None, None
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
from flask_restx import Api
|
||||
|
||||
api = Api(
|
||||
version="1.0",
|
||||
title="DocsGPT API",
|
||||
description="API for DocsGPT",
|
||||
)
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
from flask import Blueprint
|
||||
|
||||
from application.api import api
|
||||
from application.api.answer.routes.answer import AnswerResource
|
||||
from application.api.answer.routes.base import answer_ns
|
||||
from application.api.answer.routes.stream import StreamResource
|
||||
|
||||
|
||||
answer = Blueprint("answer", __name__)
|
||||
|
||||
api.add_namespace(answer_ns)
|
||||
|
||||
|
||||
def init_answer_routes():
|
||||
api.add_resource(StreamResource, "/stream")
|
||||
api.add_resource(AnswerResource, "/api/answer")
|
||||
|
||||
|
||||
init_answer_routes()
|
||||
|
||||
@@ -1,914 +0,0 @@
|
||||
import asyncio
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from bson.dbref import DBRef
|
||||
from bson.objectid import ObjectId
|
||||
from flask import Blueprint, make_response, request, Response
|
||||
from flask_restx import fields, Namespace, Resource
|
||||
|
||||
from application.agents.agent_creator import AgentCreator
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.error import bad_request
|
||||
from application.extensions import api
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
from application.retriever.retriever_creator import RetrieverCreator
|
||||
from application.utils import check_required_fields, limit_chat_history
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
conversations_collection = db["conversations"]
|
||||
sources_collection = db["sources"]
|
||||
prompts_collection = db["prompts"]
|
||||
agents_collection = db["agents"]
|
||||
user_logs_collection = db["user_logs"]
|
||||
attachments_collection = db["attachments"]
|
||||
|
||||
answer = Blueprint("answer", __name__)
|
||||
answer_ns = Namespace("answer", description="Answer related operations", path="/")
|
||||
api.add_namespace(answer_ns)
|
||||
|
||||
gpt_model = ""
|
||||
# to have some kind of default behaviour
|
||||
if settings.LLM_PROVIDER == "openai":
|
||||
gpt_model = "gpt-4o-mini"
|
||||
elif settings.LLM_PROVIDER == "anthropic":
|
||||
gpt_model = "claude-2"
|
||||
elif settings.LLM_PROVIDER == "groq":
|
||||
gpt_model = "llama3-8b-8192"
|
||||
elif settings.LLM_PROVIDER == "novita":
|
||||
gpt_model = "deepseek/deepseek-r1"
|
||||
|
||||
if settings.LLM_NAME: # in case there is particular model name configured
|
||||
gpt_model = settings.LLM_NAME
|
||||
|
||||
# load the prompts
|
||||
current_dir = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
)
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_default.txt"), "r") as f:
|
||||
chat_combine_template = f.read()
|
||||
|
||||
with open(os.path.join(current_dir, "prompts", "chat_reduce_prompt.txt"), "r") as f:
|
||||
chat_reduce_template = f.read()
|
||||
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_creative.txt"), "r") as f:
|
||||
chat_combine_creative = f.read()
|
||||
|
||||
with open(os.path.join(current_dir, "prompts", "chat_combine_strict.txt"), "r") as f:
|
||||
chat_combine_strict = f.read()
|
||||
|
||||
api_key_set = settings.API_KEY is not None
|
||||
embeddings_key_set = settings.EMBEDDINGS_KEY is not None
|
||||
|
||||
|
||||
async def async_generate(chain, question, chat_history):
|
||||
result = await chain.arun({"question": question, "chat_history": chat_history})
|
||||
return result
|
||||
|
||||
|
||||
def run_async_chain(chain, question, chat_history):
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
result = {}
|
||||
try:
|
||||
answer = loop.run_until_complete(async_generate(chain, question, chat_history))
|
||||
finally:
|
||||
loop.close()
|
||||
result["answer"] = answer
|
||||
return result
|
||||
|
||||
|
||||
def get_agent_key(agent_id, user_id):
|
||||
if not agent_id:
|
||||
return None, False, None
|
||||
|
||||
try:
|
||||
agent = agents_collection.find_one({"_id": ObjectId(agent_id)})
|
||||
if agent is None:
|
||||
raise Exception("Agent not found", 404)
|
||||
|
||||
is_owner = agent.get("user") == user_id
|
||||
|
||||
if is_owner:
|
||||
agents_collection.update_one(
|
||||
{"_id": ObjectId(agent_id)},
|
||||
{"$set": {"lastUsedAt": datetime.datetime.now(datetime.timezone.utc)}},
|
||||
)
|
||||
return str(agent["key"]), False, None
|
||||
|
||||
is_shared_with_user = agent.get(
|
||||
"shared_publicly", False
|
||||
) or user_id in agent.get("shared_with", [])
|
||||
|
||||
if is_shared_with_user:
|
||||
return str(agent["key"]), True, agent.get("shared_token")
|
||||
|
||||
raise Exception("Unauthorized access to the agent", 403)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_agent_key: {str(e)}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
def get_data_from_api_key(api_key):
|
||||
data = agents_collection.find_one({"key": api_key})
|
||||
if not data:
|
||||
raise Exception("Invalid API Key, please generate a new key", 401)
|
||||
|
||||
source = data.get("source")
|
||||
if isinstance(source, DBRef):
|
||||
source_doc = db.dereference(source)
|
||||
data["source"] = str(source_doc["_id"])
|
||||
data["retriever"] = source_doc.get("retriever", data.get("retriever"))
|
||||
else:
|
||||
data["source"] = {}
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def get_retriever(source_id: str):
|
||||
doc = sources_collection.find_one({"_id": ObjectId(source_id)})
|
||||
if doc is None:
|
||||
raise Exception("Source document does not exist", 404)
|
||||
retriever_name = None if "retriever" not in doc else doc["retriever"]
|
||||
return retriever_name
|
||||
|
||||
|
||||
def is_azure_configured():
|
||||
return (
|
||||
settings.OPENAI_API_BASE
|
||||
and settings.OPENAI_API_VERSION
|
||||
and settings.AZURE_DEPLOYMENT_NAME
|
||||
)
|
||||
|
||||
|
||||
def save_conversation(
|
||||
conversation_id,
|
||||
question,
|
||||
response,
|
||||
thought,
|
||||
source_log_docs,
|
||||
tool_calls,
|
||||
llm,
|
||||
decoded_token,
|
||||
index=None,
|
||||
api_key=None,
|
||||
agent_id=None,
|
||||
is_shared_usage=False,
|
||||
shared_token=None,
|
||||
attachment_ids=None,
|
||||
):
|
||||
current_time = datetime.datetime.now(datetime.timezone.utc)
|
||||
if conversation_id is not None and index is not None:
|
||||
conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id), f"queries.{index}": {"$exists": True}},
|
||||
{
|
||||
"$set": {
|
||||
f"queries.{index}.prompt": question,
|
||||
f"queries.{index}.response": response,
|
||||
f"queries.{index}.thought": thought,
|
||||
f"queries.{index}.sources": source_log_docs,
|
||||
f"queries.{index}.tool_calls": tool_calls,
|
||||
f"queries.{index}.timestamp": current_time,
|
||||
f"queries.{index}.attachments": attachment_ids,
|
||||
}
|
||||
},
|
||||
)
|
||||
##remove following queries from the array
|
||||
conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id), f"queries.{index}": {"$exists": True}},
|
||||
{"$push": {"queries": {"$each": [], "$slice": index + 1}}},
|
||||
)
|
||||
elif conversation_id is not None and conversation_id != "None":
|
||||
conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id)},
|
||||
{
|
||||
"$push": {
|
||||
"queries": {
|
||||
"prompt": question,
|
||||
"response": response,
|
||||
"thought": thought,
|
||||
"sources": source_log_docs,
|
||||
"tool_calls": tool_calls,
|
||||
"timestamp": current_time,
|
||||
"attachments": attachment_ids,
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
else:
|
||||
# create new conversation
|
||||
# generate summary
|
||||
messages_summary = [
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Summarise following conversation in no more than 3 "
|
||||
"words, respond ONLY with the summary, use the same "
|
||||
"language as the user query",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Summarise following conversation in no more than 3 words, "
|
||||
"respond ONLY with the summary, use the same language as the "
|
||||
"user query \n\nUser: " + question + "\n\n" + "AI: " + response,
|
||||
},
|
||||
]
|
||||
|
||||
completion = llm.gen(model=gpt_model, messages=messages_summary, max_tokens=30)
|
||||
conversation_data = {
|
||||
"user": decoded_token.get("sub"),
|
||||
"date": datetime.datetime.utcnow(),
|
||||
"name": completion,
|
||||
"queries": [
|
||||
{
|
||||
"prompt": question,
|
||||
"response": response,
|
||||
"thought": thought,
|
||||
"sources": source_log_docs,
|
||||
"tool_calls": tool_calls,
|
||||
"timestamp": current_time,
|
||||
"attachments": attachment_ids,
|
||||
}
|
||||
],
|
||||
}
|
||||
if api_key:
|
||||
if agent_id:
|
||||
conversation_data["agent_id"] = agent_id
|
||||
if is_shared_usage:
|
||||
conversation_data["is_shared_usage"] = is_shared_usage
|
||||
conversation_data["shared_token"] = shared_token
|
||||
api_key_doc = agents_collection.find_one({"key": api_key})
|
||||
if api_key_doc:
|
||||
conversation_data["api_key"] = api_key_doc["key"]
|
||||
conversation_id = conversations_collection.insert_one(
|
||||
conversation_data
|
||||
).inserted_id
|
||||
return conversation_id
|
||||
|
||||
|
||||
def get_prompt(prompt_id):
|
||||
if prompt_id == "default":
|
||||
prompt = chat_combine_template
|
||||
elif prompt_id == "creative":
|
||||
prompt = chat_combine_creative
|
||||
elif prompt_id == "strict":
|
||||
prompt = chat_combine_strict
|
||||
else:
|
||||
prompt = prompts_collection.find_one({"_id": ObjectId(prompt_id)})["content"]
|
||||
return prompt
|
||||
|
||||
|
||||
def complete_stream(
|
||||
question,
|
||||
agent,
|
||||
retriever,
|
||||
conversation_id,
|
||||
user_api_key,
|
||||
decoded_token,
|
||||
isNoneDoc=False,
|
||||
index=None,
|
||||
should_save_conversation=True,
|
||||
attachment_ids=None,
|
||||
agent_id=None,
|
||||
is_shared_usage=False,
|
||||
shared_token=None,
|
||||
):
|
||||
try:
|
||||
response_full, thought, source_log_docs, tool_calls = "", "", [], []
|
||||
|
||||
answer = agent.gen(query=question, retriever=retriever)
|
||||
|
||||
for line in answer:
|
||||
if "answer" in line:
|
||||
response_full += str(line["answer"])
|
||||
data = json.dumps({"type": "answer", "answer": line["answer"]})
|
||||
yield f"data: {data}\n\n"
|
||||
elif "sources" in line:
|
||||
truncated_sources = []
|
||||
source_log_docs = line["sources"]
|
||||
for source in line["sources"]:
|
||||
truncated_source = source.copy()
|
||||
if "text" in truncated_source:
|
||||
truncated_source["text"] = (
|
||||
truncated_source["text"][:100].strip() + "..."
|
||||
)
|
||||
truncated_sources.append(truncated_source)
|
||||
if len(truncated_sources) > 0:
|
||||
data = json.dumps({"type": "source", "source": truncated_sources})
|
||||
yield f"data: {data}\n\n"
|
||||
elif "tool_calls" in line:
|
||||
tool_calls = line["tool_calls"]
|
||||
elif "thought" in line:
|
||||
thought += line["thought"]
|
||||
data = json.dumps({"type": "thought", "thought": line["thought"]})
|
||||
yield f"data: {data}\n\n"
|
||||
elif "type" in line:
|
||||
data = json.dumps(line)
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
if isNoneDoc:
|
||||
for doc in source_log_docs:
|
||||
doc["source"] = "None"
|
||||
|
||||
llm = LLMCreator.create_llm(
|
||||
settings.LLM_PROVIDER,
|
||||
api_key=settings.API_KEY,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
)
|
||||
|
||||
if should_save_conversation:
|
||||
conversation_id = save_conversation(
|
||||
conversation_id,
|
||||
question,
|
||||
response_full,
|
||||
thought,
|
||||
source_log_docs,
|
||||
tool_calls,
|
||||
llm,
|
||||
decoded_token,
|
||||
index,
|
||||
api_key=user_api_key,
|
||||
attachment_ids=attachment_ids,
|
||||
agent_id=agent_id,
|
||||
is_shared_usage=is_shared_usage,
|
||||
shared_token=shared_token,
|
||||
)
|
||||
else:
|
||||
conversation_id = None
|
||||
|
||||
# send data.type = "end" to indicate that the stream has ended as json
|
||||
data = json.dumps({"type": "id", "id": str(conversation_id)})
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
retriever_params = retriever.get_params()
|
||||
user_logs_collection.insert_one(
|
||||
{
|
||||
"action": "stream_answer",
|
||||
"level": "info",
|
||||
"user": decoded_token.get("sub"),
|
||||
"api_key": user_api_key,
|
||||
"question": question,
|
||||
"response": response_full,
|
||||
"sources": source_log_docs,
|
||||
"retriever_params": retriever_params,
|
||||
"attachments": attachment_ids,
|
||||
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
||||
}
|
||||
)
|
||||
data = json.dumps({"type": "end"})
|
||||
yield f"data: {data}\n\n"
|
||||
except Exception as e:
|
||||
logger.error(f"Error in stream: {str(e)}", exc_info=True)
|
||||
data = json.dumps(
|
||||
{
|
||||
"type": "error",
|
||||
"error": "Please try again later. We apologize for any inconvenience.",
|
||||
}
|
||||
)
|
||||
yield f"data: {data}\n\n"
|
||||
return
|
||||
|
||||
|
||||
@answer_ns.route("/stream")
|
||||
class Stream(Resource):
|
||||
stream_model = api.model(
|
||||
"StreamModel",
|
||||
{
|
||||
"question": fields.String(
|
||||
required=True, description="Question to be asked"
|
||||
),
|
||||
"history": fields.List(
|
||||
fields.String, required=False, description="Chat history"
|
||||
),
|
||||
"conversation_id": fields.String(
|
||||
required=False, description="Conversation ID"
|
||||
),
|
||||
"prompt_id": fields.String(
|
||||
required=False, default="default", description="Prompt ID"
|
||||
),
|
||||
"chunks": fields.Integer(
|
||||
required=False, default=2, description="Number of chunks"
|
||||
),
|
||||
"token_limit": fields.Integer(required=False, description="Token limit"),
|
||||
"retriever": fields.String(required=False, description="Retriever type"),
|
||||
"api_key": fields.String(required=False, description="API key"),
|
||||
"active_docs": fields.String(
|
||||
required=False, description="Active documents"
|
||||
),
|
||||
"isNoneDoc": fields.Boolean(
|
||||
required=False, description="Flag indicating if no document is used"
|
||||
),
|
||||
"index": fields.Integer(
|
||||
required=False, description="Index of the query to update"
|
||||
),
|
||||
"save_conversation": fields.Boolean(
|
||||
required=False,
|
||||
default=True,
|
||||
description="Whether to save the conversation",
|
||||
),
|
||||
"attachments": fields.List(
|
||||
fields.String, required=False, description="List of attachment IDs"
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@api.expect(stream_model)
|
||||
@api.doc(description="Stream a response based on the question and retriever")
|
||||
def post(self):
|
||||
data = request.get_json()
|
||||
required_fields = ["question"]
|
||||
if "index" in data:
|
||||
required_fields = ["question", "conversation_id"]
|
||||
missing_fields = check_required_fields(data, required_fields)
|
||||
if missing_fields:
|
||||
return missing_fields
|
||||
|
||||
save_conv = data.get("save_conversation", True)
|
||||
|
||||
try:
|
||||
question = data["question"]
|
||||
history = limit_chat_history(
|
||||
json.loads(data.get("history", "[]")), gpt_model=gpt_model
|
||||
)
|
||||
conversation_id = data.get("conversation_id")
|
||||
prompt_id = data.get("prompt_id", "default")
|
||||
attachment_ids = data.get("attachments", [])
|
||||
|
||||
index = data.get("index", None)
|
||||
chunks = int(data.get("chunks", 2))
|
||||
token_limit = data.get("token_limit", settings.DEFAULT_MAX_HISTORY)
|
||||
retriever_name = data.get("retriever", "classic")
|
||||
agent_id = data.get("agent_id", None)
|
||||
agent_type = settings.AGENT_NAME
|
||||
decoded_token = getattr(request, "decoded_token", None)
|
||||
user_sub = decoded_token.get("sub") if decoded_token else None
|
||||
agent_key, is_shared_usage, shared_token = get_agent_key(agent_id, user_sub)
|
||||
|
||||
if agent_key:
|
||||
data.update({"api_key": agent_key})
|
||||
else:
|
||||
agent_id = None
|
||||
|
||||
if "api_key" in data:
|
||||
data_key = get_data_from_api_key(data["api_key"])
|
||||
chunks = int(data_key.get("chunks", 2))
|
||||
prompt_id = data_key.get("prompt_id", "default")
|
||||
source = {"active_docs": data_key.get("source")}
|
||||
retriever_name = data_key.get("retriever", retriever_name)
|
||||
user_api_key = data["api_key"]
|
||||
agent_type = data_key.get("agent_type", agent_type)
|
||||
if is_shared_usage:
|
||||
decoded_token = request.decoded_token
|
||||
else:
|
||||
decoded_token = {"sub": data_key.get("user")}
|
||||
is_shared_usage = False
|
||||
|
||||
elif "active_docs" in data:
|
||||
source = {"active_docs": data["active_docs"]}
|
||||
retriever_name = get_retriever(data["active_docs"]) or retriever_name
|
||||
user_api_key = None
|
||||
decoded_token = request.decoded_token
|
||||
|
||||
else:
|
||||
source = {}
|
||||
user_api_key = None
|
||||
decoded_token = request.decoded_token
|
||||
|
||||
if not decoded_token:
|
||||
return make_response({"error": "Unauthorized"}, 401)
|
||||
|
||||
attachments = get_attachments_content(
|
||||
attachment_ids, decoded_token.get("sub")
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"/stream - request_data: {data}, source: {source}, attachments: {len(attachments)}",
|
||||
extra={"data": json.dumps({"request_data": data, "source": source})},
|
||||
)
|
||||
|
||||
prompt = get_prompt(prompt_id)
|
||||
if "isNoneDoc" in data and data["isNoneDoc"] is True:
|
||||
chunks = 0
|
||||
|
||||
agent = AgentCreator.create_agent(
|
||||
agent_type,
|
||||
endpoint="stream",
|
||||
llm_name=settings.LLM_PROVIDER,
|
||||
gpt_model=gpt_model,
|
||||
api_key=settings.API_KEY,
|
||||
user_api_key=user_api_key,
|
||||
prompt=prompt,
|
||||
chat_history=history,
|
||||
decoded_token=decoded_token,
|
||||
attachments=attachments,
|
||||
)
|
||||
|
||||
retriever = RetrieverCreator.create_retriever(
|
||||
retriever_name,
|
||||
source=source,
|
||||
chat_history=history,
|
||||
prompt=prompt,
|
||||
chunks=chunks,
|
||||
token_limit=token_limit,
|
||||
gpt_model=gpt_model,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
)
|
||||
|
||||
return Response(
|
||||
complete_stream(
|
||||
question=question,
|
||||
agent=agent,
|
||||
retriever=retriever,
|
||||
conversation_id=conversation_id,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
isNoneDoc=data.get("isNoneDoc"),
|
||||
index=index,
|
||||
should_save_conversation=save_conv,
|
||||
attachment_ids=attachment_ids,
|
||||
agent_id=agent_id,
|
||||
is_shared_usage=is_shared_usage,
|
||||
shared_token=shared_token,
|
||||
),
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
|
||||
except ValueError:
|
||||
message = "Malformed request body"
|
||||
logger.error(f"/stream - error: {message}")
|
||||
return Response(
|
||||
error_stream_generate(message),
|
||||
status=400,
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"/stream - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||
)
|
||||
status_code = 400
|
||||
return Response(
|
||||
error_stream_generate("Unknown error occurred"),
|
||||
status=status_code,
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
|
||||
|
||||
def error_stream_generate(err_response):
|
||||
data = json.dumps({"type": "error", "error": err_response})
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
|
||||
@answer_ns.route("/api/answer")
|
||||
class Answer(Resource):
|
||||
answer_model = api.model(
|
||||
"AnswerModel",
|
||||
{
|
||||
"question": fields.String(
|
||||
required=True, description="The question to answer"
|
||||
),
|
||||
"history": fields.List(
|
||||
fields.String, required=False, description="Conversation history"
|
||||
),
|
||||
"conversation_id": fields.String(
|
||||
required=False, description="Conversation ID"
|
||||
),
|
||||
"prompt_id": fields.String(
|
||||
required=False, default="default", description="Prompt ID"
|
||||
),
|
||||
"chunks": fields.Integer(
|
||||
required=False, default=2, description="Number of chunks"
|
||||
),
|
||||
"token_limit": fields.Integer(required=False, description="Token limit"),
|
||||
"retriever": fields.String(required=False, description="Retriever type"),
|
||||
"api_key": fields.String(required=False, description="API key"),
|
||||
"active_docs": fields.String(
|
||||
required=False, description="Active documents"
|
||||
),
|
||||
"isNoneDoc": fields.Boolean(
|
||||
required=False, description="Flag indicating if no document is used"
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@api.expect(answer_model)
|
||||
@api.doc(description="Provide an answer based on the question and retriever")
|
||||
def post(self):
|
||||
data = request.get_json()
|
||||
required_fields = ["question"]
|
||||
missing_fields = check_required_fields(data, required_fields)
|
||||
if missing_fields:
|
||||
return missing_fields
|
||||
|
||||
try:
|
||||
question = data["question"]
|
||||
history = limit_chat_history(
|
||||
json.loads(data.get("history", "[]")), gpt_model=gpt_model
|
||||
)
|
||||
conversation_id = data.get("conversation_id")
|
||||
prompt_id = data.get("prompt_id", "default")
|
||||
chunks = int(data.get("chunks", 2))
|
||||
token_limit = data.get("token_limit", settings.DEFAULT_MAX_HISTORY)
|
||||
retriever_name = data.get("retriever", "classic")
|
||||
agent_type = settings.AGENT_NAME
|
||||
|
||||
if "api_key" in data:
|
||||
data_key = get_data_from_api_key(data["api_key"])
|
||||
chunks = int(data_key.get("chunks", 2))
|
||||
prompt_id = data_key.get("prompt_id", "default")
|
||||
source = {"active_docs": data_key.get("source")}
|
||||
retriever_name = data_key.get("retriever", retriever_name)
|
||||
user_api_key = data["api_key"]
|
||||
agent_type = data_key.get("agent_type", agent_type)
|
||||
decoded_token = {"sub": data_key.get("user")}
|
||||
|
||||
elif "active_docs" in data:
|
||||
source = {"active_docs": data["active_docs"]}
|
||||
retriever_name = get_retriever(data["active_docs"]) or retriever_name
|
||||
user_api_key = None
|
||||
decoded_token = request.decoded_token
|
||||
|
||||
else:
|
||||
source = {}
|
||||
user_api_key = None
|
||||
decoded_token = request.decoded_token
|
||||
|
||||
if not decoded_token:
|
||||
return make_response({"error": "Unauthorized"}, 401)
|
||||
|
||||
prompt = get_prompt(prompt_id)
|
||||
|
||||
logger.info(
|
||||
f"/api/answer - request_data: {data}, source: {source}",
|
||||
extra={"data": json.dumps({"request_data": data, "source": source})},
|
||||
)
|
||||
|
||||
agent = AgentCreator.create_agent(
|
||||
agent_type,
|
||||
endpoint="api/answer",
|
||||
llm_name=settings.LLM_PROVIDER,
|
||||
gpt_model=gpt_model,
|
||||
api_key=settings.API_KEY,
|
||||
user_api_key=user_api_key,
|
||||
prompt=prompt,
|
||||
chat_history=history,
|
||||
decoded_token=decoded_token,
|
||||
)
|
||||
|
||||
retriever = RetrieverCreator.create_retriever(
|
||||
retriever_name,
|
||||
source=source,
|
||||
chat_history=history,
|
||||
prompt=prompt,
|
||||
chunks=chunks,
|
||||
token_limit=token_limit,
|
||||
gpt_model=gpt_model,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
)
|
||||
|
||||
response_full = ""
|
||||
source_log_docs = []
|
||||
tool_calls = []
|
||||
stream_ended = False
|
||||
thought = ""
|
||||
|
||||
for line in complete_stream(
|
||||
question=question,
|
||||
agent=agent,
|
||||
retriever=retriever,
|
||||
conversation_id=conversation_id,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
isNoneDoc=data.get("isNoneDoc"),
|
||||
index=None,
|
||||
should_save_conversation=False,
|
||||
):
|
||||
try:
|
||||
event_data = line.replace("data: ", "").strip()
|
||||
event = json.loads(event_data)
|
||||
|
||||
if event["type"] == "answer":
|
||||
response_full += event["answer"]
|
||||
elif event["type"] == "source":
|
||||
source_log_docs = event["source"]
|
||||
elif event["type"] == "tool_calls":
|
||||
tool_calls = event["tool_calls"]
|
||||
elif event["type"] == "thought":
|
||||
thought = event["thought"]
|
||||
elif event["type"] == "error":
|
||||
logger.error(f"Error from stream: {event['error']}")
|
||||
return bad_request(500, event["error"])
|
||||
elif event["type"] == "end":
|
||||
stream_ended = True
|
||||
|
||||
except (json.JSONDecodeError, KeyError) as e:
|
||||
logger.warning(f"Error parsing stream event: {e}, line: {line}")
|
||||
continue
|
||||
|
||||
if not stream_ended:
|
||||
logger.error("Stream ended unexpectedly without an 'end' event.")
|
||||
return bad_request(500, "Stream ended unexpectedly.")
|
||||
|
||||
if data.get("isNoneDoc"):
|
||||
for doc in source_log_docs:
|
||||
doc["source"] = "None"
|
||||
|
||||
llm = LLMCreator.create_llm(
|
||||
settings.LLM_PROVIDER,
|
||||
api_key=settings.API_KEY,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
)
|
||||
|
||||
result = {"answer": response_full, "sources": source_log_docs}
|
||||
result["conversation_id"] = str(
|
||||
save_conversation(
|
||||
conversation_id,
|
||||
question,
|
||||
response_full,
|
||||
thought,
|
||||
source_log_docs,
|
||||
tool_calls,
|
||||
llm,
|
||||
decoded_token,
|
||||
api_key=user_api_key,
|
||||
)
|
||||
)
|
||||
|
||||
retriever_params = retriever.get_params()
|
||||
user_logs_collection.insert_one(
|
||||
{
|
||||
"action": "api_answer",
|
||||
"level": "info",
|
||||
"user": decoded_token.get("sub"),
|
||||
"api_key": user_api_key,
|
||||
"question": question,
|
||||
"response": response_full,
|
||||
"sources": source_log_docs,
|
||||
"retriever_params": retriever_params,
|
||||
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"/api/answer - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||
)
|
||||
return bad_request(500, str(e))
|
||||
|
||||
return make_response(result, 200)
|
||||
|
||||
|
||||
@answer_ns.route("/api/search")
|
||||
class Search(Resource):
|
||||
search_model = api.model(
|
||||
"SearchModel",
|
||||
{
|
||||
"question": fields.String(
|
||||
required=True, description="The question to search"
|
||||
),
|
||||
"chunks": fields.Integer(
|
||||
required=False, default=2, description="Number of chunks"
|
||||
),
|
||||
"api_key": fields.String(
|
||||
required=False, description="API key for authentication"
|
||||
),
|
||||
"active_docs": fields.String(
|
||||
required=False, description="Active documents for retrieval"
|
||||
),
|
||||
"retriever": fields.String(required=False, description="Retriever type"),
|
||||
"token_limit": fields.Integer(
|
||||
required=False, description="Limit for tokens"
|
||||
),
|
||||
"isNoneDoc": fields.Boolean(
|
||||
required=False, description="Flag indicating if no document is used"
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@api.expect(search_model)
|
||||
@api.doc(
|
||||
description="Search for relevant documents based on the question and retriever"
|
||||
)
|
||||
def post(self):
|
||||
data = request.get_json()
|
||||
required_fields = ["question"]
|
||||
missing_fields = check_required_fields(data, required_fields)
|
||||
if missing_fields:
|
||||
return missing_fields
|
||||
|
||||
try:
|
||||
question = data["question"]
|
||||
chunks = int(data.get("chunks", 2))
|
||||
token_limit = data.get("token_limit", settings.DEFAULT_MAX_HISTORY)
|
||||
retriever_name = data.get("retriever", "classic")
|
||||
|
||||
if "api_key" in data:
|
||||
data_key = get_data_from_api_key(data["api_key"])
|
||||
chunks = int(data_key.get("chunks", 2))
|
||||
source = {"active_docs": data_key.get("source")}
|
||||
user_api_key = data["api_key"]
|
||||
decoded_token = {"sub": data_key.get("user")}
|
||||
|
||||
elif "active_docs" in data:
|
||||
source = {"active_docs": data["active_docs"]}
|
||||
user_api_key = None
|
||||
decoded_token = request.decoded_token
|
||||
|
||||
else:
|
||||
source = {}
|
||||
user_api_key = None
|
||||
decoded_token = request.decoded_token
|
||||
|
||||
if not decoded_token:
|
||||
return make_response({"error": "Unauthorized"}, 401)
|
||||
|
||||
logger.info(
|
||||
f"/api/answer - request_data: {data}, source: {source}",
|
||||
extra={"data": json.dumps({"request_data": data, "source": source})},
|
||||
)
|
||||
|
||||
retriever = RetrieverCreator.create_retriever(
|
||||
retriever_name,
|
||||
source=source,
|
||||
chat_history=[],
|
||||
prompt="default",
|
||||
chunks=chunks,
|
||||
token_limit=token_limit,
|
||||
gpt_model=gpt_model,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
)
|
||||
|
||||
docs = retriever.search(question)
|
||||
retriever_params = retriever.get_params()
|
||||
|
||||
user_logs_collection.insert_one(
|
||||
{
|
||||
"action": "api_search",
|
||||
"level": "info",
|
||||
"user": decoded_token.get("sub"),
|
||||
"api_key": user_api_key,
|
||||
"question": question,
|
||||
"sources": docs,
|
||||
"retriever_params": retriever_params,
|
||||
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
||||
}
|
||||
)
|
||||
|
||||
if data.get("isNoneDoc"):
|
||||
for doc in docs:
|
||||
doc["source"] = "None"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"/api/search - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||
)
|
||||
return bad_request(500, str(e))
|
||||
|
||||
return make_response(docs, 200)
|
||||
|
||||
|
||||
def get_attachments_content(attachment_ids, user):
|
||||
"""
|
||||
Retrieve content from attachment documents based on their IDs.
|
||||
|
||||
Args:
|
||||
attachment_ids (list): List of attachment document IDs
|
||||
user (str): User identifier to verify ownership
|
||||
|
||||
Returns:
|
||||
list: List of dictionaries containing attachment content and metadata
|
||||
"""
|
||||
if not attachment_ids:
|
||||
return []
|
||||
|
||||
attachments = []
|
||||
for attachment_id in attachment_ids:
|
||||
try:
|
||||
attachment_doc = attachments_collection.find_one(
|
||||
{"_id": ObjectId(attachment_id), "user": user}
|
||||
)
|
||||
|
||||
if attachment_doc:
|
||||
attachments.append(attachment_doc)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error retrieving attachment {attachment_id}: {e}", exc_info=True
|
||||
)
|
||||
|
||||
return attachments
|
||||
0
application/api/answer/routes/__init__.py
Normal file
0
application/api/answer/routes/__init__.py
Normal file
122
application/api/answer/routes/answer.py
Normal file
122
application/api/answer/routes/answer.py
Normal file
@@ -0,0 +1,122 @@
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
from flask import make_response, request
|
||||
from flask_restx import fields, Resource
|
||||
|
||||
from application.api import api
|
||||
|
||||
from application.api.answer.routes.base import answer_ns, BaseAnswerResource
|
||||
|
||||
from application.api.answer.services.stream_processor import StreamProcessor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@answer_ns.route("/api/answer")
|
||||
class AnswerResource(Resource, BaseAnswerResource):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Resource.__init__(self, *args, **kwargs)
|
||||
BaseAnswerResource.__init__(self)
|
||||
|
||||
answer_model = answer_ns.model(
|
||||
"AnswerModel",
|
||||
{
|
||||
"question": fields.String(
|
||||
required=True, description="Question to be asked"
|
||||
),
|
||||
"history": fields.List(
|
||||
fields.String,
|
||||
required=False,
|
||||
description="Conversation history (only for new conversations)",
|
||||
),
|
||||
"conversation_id": fields.String(
|
||||
required=False,
|
||||
description="Existing conversation ID (loads history)",
|
||||
),
|
||||
"prompt_id": fields.String(
|
||||
required=False, default="default", description="Prompt ID"
|
||||
),
|
||||
"chunks": fields.Integer(
|
||||
required=False, default=2, description="Number of chunks"
|
||||
),
|
||||
"token_limit": fields.Integer(required=False, description="Token limit"),
|
||||
"retriever": fields.String(required=False, description="Retriever type"),
|
||||
"api_key": fields.String(required=False, description="API key"),
|
||||
"active_docs": fields.String(
|
||||
required=False, description="Active documents"
|
||||
),
|
||||
"isNoneDoc": fields.Boolean(
|
||||
required=False, description="Flag indicating if no document is used"
|
||||
),
|
||||
"save_conversation": fields.Boolean(
|
||||
required=False,
|
||||
default=True,
|
||||
description="Whether to save the conversation",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@api.expect(answer_model)
|
||||
@api.doc(description="Provide a response based on the question and retriever")
|
||||
def post(self):
|
||||
data = request.get_json()
|
||||
if error := self.validate_request(data):
|
||||
return error
|
||||
decoded_token = getattr(request, "decoded_token", None)
|
||||
processor = StreamProcessor(data, decoded_token)
|
||||
try:
|
||||
processor.initialize()
|
||||
if not processor.decoded_token:
|
||||
return make_response({"error": "Unauthorized"}, 401)
|
||||
agent = processor.create_agent()
|
||||
retriever = processor.create_retriever()
|
||||
|
||||
stream = self.complete_stream(
|
||||
question=data["question"],
|
||||
agent=agent,
|
||||
retriever=retriever,
|
||||
conversation_id=processor.conversation_id,
|
||||
user_api_key=processor.agent_config.get("user_api_key"),
|
||||
decoded_token=processor.decoded_token,
|
||||
isNoneDoc=data.get("isNoneDoc"),
|
||||
index=None,
|
||||
should_save_conversation=data.get("save_conversation", True),
|
||||
)
|
||||
stream_result = self.process_response_stream(stream)
|
||||
|
||||
if len(stream_result) == 7:
|
||||
(
|
||||
conversation_id,
|
||||
response,
|
||||
sources,
|
||||
tool_calls,
|
||||
thought,
|
||||
error,
|
||||
structured_info,
|
||||
) = stream_result
|
||||
else:
|
||||
conversation_id, response, sources, tool_calls, thought, error = (
|
||||
stream_result
|
||||
)
|
||||
structured_info = None
|
||||
|
||||
if error:
|
||||
return make_response({"error": error}, 400)
|
||||
result = {
|
||||
"conversation_id": conversation_id,
|
||||
"answer": response,
|
||||
"sources": sources,
|
||||
"tool_calls": tool_calls,
|
||||
"thought": thought,
|
||||
}
|
||||
|
||||
if structured_info:
|
||||
result.update(structured_info)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"/api/answer - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||
)
|
||||
return make_response({"error": str(e)}, 500)
|
||||
return make_response(result, 200)
|
||||
263
application/api/answer/routes/base.py
Normal file
263
application/api/answer/routes/base.py
Normal file
@@ -0,0 +1,263 @@
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, Generator, List, Optional
|
||||
|
||||
from flask import Response
|
||||
from flask_restx import Namespace
|
||||
|
||||
from application.api.answer.services.conversation_service import ConversationService
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
from application.utils import check_required_fields, get_gpt_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
answer_ns = Namespace("answer", description="Answer related operations", path="/")
|
||||
|
||||
|
||||
class BaseAnswerResource:
|
||||
"""Shared base class for answer endpoints"""
|
||||
|
||||
def __init__(self):
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
self.user_logs_collection = db["user_logs"]
|
||||
self.gpt_model = get_gpt_model()
|
||||
self.conversation_service = ConversationService()
|
||||
|
||||
def validate_request(
|
||||
self, data: Dict[str, Any], require_conversation_id: bool = False
|
||||
) -> Optional[Response]:
|
||||
"""Common request validation"""
|
||||
required_fields = ["question"]
|
||||
if require_conversation_id:
|
||||
required_fields.append("conversation_id")
|
||||
if missing_fields := check_required_fields(data, required_fields):
|
||||
return missing_fields
|
||||
return None
|
||||
|
||||
def complete_stream(
|
||||
self,
|
||||
question: str,
|
||||
agent: Any,
|
||||
retriever: Any,
|
||||
conversation_id: Optional[str],
|
||||
user_api_key: Optional[str],
|
||||
decoded_token: Dict[str, Any],
|
||||
isNoneDoc: bool = False,
|
||||
index: Optional[int] = None,
|
||||
should_save_conversation: bool = True,
|
||||
attachment_ids: Optional[List[str]] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
is_shared_usage: bool = False,
|
||||
shared_token: Optional[str] = None,
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Generator function that streams the complete conversation response.
|
||||
|
||||
Args:
|
||||
question: The user's question
|
||||
agent: The agent instance
|
||||
retriever: The retriever instance
|
||||
conversation_id: Existing conversation ID
|
||||
user_api_key: User's API key if any
|
||||
decoded_token: Decoded JWT token
|
||||
isNoneDoc: Flag for document-less responses
|
||||
index: Index of message to update
|
||||
should_save_conversation: Whether to persist the conversation
|
||||
attachment_ids: List of attachment IDs
|
||||
agent_id: ID of agent used
|
||||
is_shared_usage: Flag for shared agent usage
|
||||
shared_token: Token for shared agent
|
||||
|
||||
Yields:
|
||||
Server-sent event strings
|
||||
"""
|
||||
try:
|
||||
response_full, thought, source_log_docs, tool_calls = "", "", [], []
|
||||
is_structured = False
|
||||
schema_info = None
|
||||
structured_chunks = []
|
||||
|
||||
for line in agent.gen(query=question, retriever=retriever):
|
||||
if "answer" in line:
|
||||
response_full += str(line["answer"])
|
||||
if line.get("structured"):
|
||||
is_structured = True
|
||||
schema_info = line.get("schema")
|
||||
structured_chunks.append(line["answer"])
|
||||
else:
|
||||
data = json.dumps({"type": "answer", "answer": line["answer"]})
|
||||
yield f"data: {data}\n\n"
|
||||
elif "sources" in line:
|
||||
truncated_sources = []
|
||||
source_log_docs = line["sources"]
|
||||
for source in line["sources"]:
|
||||
truncated_source = source.copy()
|
||||
if "text" in truncated_source:
|
||||
truncated_source["text"] = (
|
||||
truncated_source["text"][:100].strip() + "..."
|
||||
)
|
||||
truncated_sources.append(truncated_source)
|
||||
if truncated_sources:
|
||||
data = json.dumps(
|
||||
{"type": "source", "source": truncated_sources}
|
||||
)
|
||||
yield f"data: {data}\n\n"
|
||||
elif "tool_calls" in line:
|
||||
tool_calls = line["tool_calls"]
|
||||
elif "thought" in line:
|
||||
thought += line["thought"]
|
||||
data = json.dumps({"type": "thought", "thought": line["thought"]})
|
||||
yield f"data: {data}\n\n"
|
||||
elif "type" in line:
|
||||
data = json.dumps(line)
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
if is_structured and structured_chunks:
|
||||
structured_data = {
|
||||
"type": "structured_answer",
|
||||
"answer": response_full,
|
||||
"structured": True,
|
||||
"schema": schema_info,
|
||||
}
|
||||
data = json.dumps(structured_data)
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
if isNoneDoc:
|
||||
for doc in source_log_docs:
|
||||
doc["source"] = "None"
|
||||
llm = LLMCreator.create_llm(
|
||||
settings.LLM_PROVIDER,
|
||||
api_key=settings.API_KEY,
|
||||
user_api_key=user_api_key,
|
||||
decoded_token=decoded_token,
|
||||
)
|
||||
|
||||
if should_save_conversation:
|
||||
conversation_id = self.conversation_service.save_conversation(
|
||||
conversation_id,
|
||||
question,
|
||||
response_full,
|
||||
thought,
|
||||
source_log_docs,
|
||||
tool_calls,
|
||||
llm,
|
||||
self.gpt_model,
|
||||
decoded_token,
|
||||
index=index,
|
||||
api_key=user_api_key,
|
||||
agent_id=agent_id,
|
||||
is_shared_usage=is_shared_usage,
|
||||
shared_token=shared_token,
|
||||
attachment_ids=attachment_ids,
|
||||
)
|
||||
else:
|
||||
conversation_id = None
|
||||
id_data = {"type": "id", "id": str(conversation_id)}
|
||||
data = json.dumps(id_data)
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
retriever_params = retriever.get_params()
|
||||
log_data = {
|
||||
"action": "stream_answer",
|
||||
"level": "info",
|
||||
"user": decoded_token.get("sub"),
|
||||
"api_key": user_api_key,
|
||||
"question": question,
|
||||
"response": response_full,
|
||||
"sources": source_log_docs,
|
||||
"retriever_params": retriever_params,
|
||||
"attachments": attachment_ids,
|
||||
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
||||
}
|
||||
if is_structured:
|
||||
log_data["structured_output"] = True
|
||||
if schema_info:
|
||||
log_data["schema"] = schema_info
|
||||
|
||||
# clean up text fields to be no longer than 10000 characters
|
||||
for key, value in log_data.items():
|
||||
if isinstance(value, str) and len(value) > 10000:
|
||||
log_data[key] = value[:10000]
|
||||
|
||||
self.user_logs_collection.insert_one(log_data)
|
||||
|
||||
# End of stream
|
||||
|
||||
data = json.dumps({"type": "end"})
|
||||
yield f"data: {data}\n\n"
|
||||
except Exception as e:
|
||||
logger.error(f"Error in stream: {str(e)}", exc_info=True)
|
||||
data = json.dumps(
|
||||
{
|
||||
"type": "error",
|
||||
"error": "Please try again later. We apologize for any inconvenience.",
|
||||
}
|
||||
)
|
||||
yield f"data: {data}\n\n"
|
||||
return
|
||||
|
||||
def process_response_stream(self, stream):
|
||||
"""Process the stream response for non-streaming endpoint"""
|
||||
conversation_id = ""
|
||||
response_full = ""
|
||||
source_log_docs = []
|
||||
tool_calls = []
|
||||
thought = ""
|
||||
stream_ended = False
|
||||
is_structured = False
|
||||
schema_info = None
|
||||
|
||||
for line in stream:
|
||||
try:
|
||||
event_data = line.replace("data: ", "").strip()
|
||||
event = json.loads(event_data)
|
||||
|
||||
if event["type"] == "id":
|
||||
conversation_id = event["id"]
|
||||
elif event["type"] == "answer":
|
||||
response_full += event["answer"]
|
||||
elif event["type"] == "structured_answer":
|
||||
response_full = event["answer"]
|
||||
is_structured = True
|
||||
schema_info = event.get("schema")
|
||||
elif event["type"] == "source":
|
||||
source_log_docs = event["source"]
|
||||
elif event["type"] == "tool_calls":
|
||||
tool_calls = event["tool_calls"]
|
||||
elif event["type"] == "thought":
|
||||
thought = event["thought"]
|
||||
elif event["type"] == "error":
|
||||
logger.error(f"Error from stream: {event['error']}")
|
||||
return None, None, None, None, event["error"]
|
||||
elif event["type"] == "end":
|
||||
stream_ended = True
|
||||
except (json.JSONDecodeError, KeyError) as e:
|
||||
logger.warning(f"Error parsing stream event: {e}, line: {line}")
|
||||
continue
|
||||
if not stream_ended:
|
||||
logger.error("Stream ended unexpectedly without an 'end' event.")
|
||||
return None, None, None, None, "Stream ended unexpectedly"
|
||||
|
||||
result = (
|
||||
conversation_id,
|
||||
response_full,
|
||||
source_log_docs,
|
||||
tool_calls,
|
||||
thought,
|
||||
None,
|
||||
)
|
||||
|
||||
if is_structured:
|
||||
result = result + ({"structured": True, "schema": schema_info},)
|
||||
|
||||
return result
|
||||
|
||||
def error_stream_generate(self, err_response):
|
||||
data = json.dumps({"type": "error", "error": err_response})
|
||||
yield f"data: {data}\n\n"
|
||||
117
application/api/answer/routes/stream.py
Normal file
117
application/api/answer/routes/stream.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import logging
|
||||
import traceback
|
||||
|
||||
from flask import request, Response
|
||||
from flask_restx import fields, Resource
|
||||
|
||||
from application.api import api
|
||||
|
||||
from application.api.answer.routes.base import answer_ns, BaseAnswerResource
|
||||
|
||||
from application.api.answer.services.stream_processor import StreamProcessor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@answer_ns.route("/stream")
|
||||
class StreamResource(Resource, BaseAnswerResource):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Resource.__init__(self, *args, **kwargs)
|
||||
BaseAnswerResource.__init__(self)
|
||||
|
||||
stream_model = answer_ns.model(
|
||||
"StreamModel",
|
||||
{
|
||||
"question": fields.String(
|
||||
required=True, description="Question to be asked"
|
||||
),
|
||||
"history": fields.List(
|
||||
fields.String,
|
||||
required=False,
|
||||
description="Conversation history (only for new conversations)",
|
||||
),
|
||||
"conversation_id": fields.String(
|
||||
required=False,
|
||||
description="Existing conversation ID (loads history)",
|
||||
),
|
||||
"prompt_id": fields.String(
|
||||
required=False, default="default", description="Prompt ID"
|
||||
),
|
||||
"chunks": fields.Integer(
|
||||
required=False, default=2, description="Number of chunks"
|
||||
),
|
||||
"token_limit": fields.Integer(required=False, description="Token limit"),
|
||||
"retriever": fields.String(required=False, description="Retriever type"),
|
||||
"api_key": fields.String(required=False, description="API key"),
|
||||
"active_docs": fields.String(
|
||||
required=False, description="Active documents"
|
||||
),
|
||||
"isNoneDoc": fields.Boolean(
|
||||
required=False, description="Flag indicating if no document is used"
|
||||
),
|
||||
"index": fields.Integer(
|
||||
required=False, description="Index of the query to update"
|
||||
),
|
||||
"save_conversation": fields.Boolean(
|
||||
required=False,
|
||||
default=True,
|
||||
description="Whether to save the conversation",
|
||||
),
|
||||
"attachments": fields.List(
|
||||
fields.String, required=False, description="List of attachment IDs"
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@api.expect(stream_model)
|
||||
@api.doc(description="Stream a response based on the question and retriever")
|
||||
def post(self):
|
||||
data = request.get_json()
|
||||
if error := self.validate_request(data, "index" in data):
|
||||
return error
|
||||
decoded_token = getattr(request, "decoded_token", None)
|
||||
processor = StreamProcessor(data, decoded_token)
|
||||
try:
|
||||
processor.initialize()
|
||||
agent = processor.create_agent()
|
||||
retriever = processor.create_retriever()
|
||||
|
||||
return Response(
|
||||
self.complete_stream(
|
||||
question=data["question"],
|
||||
agent=agent,
|
||||
retriever=retriever,
|
||||
conversation_id=processor.conversation_id,
|
||||
user_api_key=processor.agent_config.get("user_api_key"),
|
||||
decoded_token=processor.decoded_token,
|
||||
isNoneDoc=data.get("isNoneDoc"),
|
||||
index=data.get("index"),
|
||||
should_save_conversation=data.get("save_conversation", True),
|
||||
attachment_ids=data.get("attachments", []),
|
||||
agent_id=data.get("agent_id"),
|
||||
is_shared_usage=processor.is_shared_usage,
|
||||
shared_token=processor.shared_token,
|
||||
),
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
except ValueError as e:
|
||||
message = "Malformed request body"
|
||||
logger.error(
|
||||
f"/stream - error: {message} - specific error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||
)
|
||||
return Response(
|
||||
self.error_stream_generate(message),
|
||||
status=400,
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"/stream - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()},
|
||||
)
|
||||
return Response(
|
||||
self.error_stream_generate("Unknown error occurred"),
|
||||
status=400,
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
0
application/api/answer/services/__init__.py
Normal file
0
application/api/answer/services/__init__.py
Normal file
180
application/api/answer/services/conversation_service.py
Normal file
180
application/api/answer/services/conversation_service.py
Normal file
@@ -0,0 +1,180 @@
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
|
||||
from application.core.settings import settings
|
||||
from bson import ObjectId
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConversationService:
|
||||
def __init__(self):
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
self.conversations_collection = db["conversations"]
|
||||
self.agents_collection = db["agents"]
|
||||
|
||||
def get_conversation(
|
||||
self, conversation_id: str, user_id: str
|
||||
) -> Optional[Dict[str, Any]]:
|
||||
"""Retrieve a conversation with proper access control"""
|
||||
if not conversation_id or not user_id:
|
||||
return None
|
||||
try:
|
||||
conversation = self.conversations_collection.find_one(
|
||||
{
|
||||
"_id": ObjectId(conversation_id),
|
||||
"$or": [{"user": user_id}, {"shared_with": user_id}],
|
||||
}
|
||||
)
|
||||
|
||||
if not conversation:
|
||||
logger.warning(
|
||||
f"Conversation not found or unauthorized - ID: {conversation_id}, User: {user_id}"
|
||||
)
|
||||
return None
|
||||
conversation["_id"] = str(conversation["_id"])
|
||||
return conversation
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching conversation: {str(e)}", exc_info=True)
|
||||
return None
|
||||
|
||||
def save_conversation(
|
||||
self,
|
||||
conversation_id: Optional[str],
|
||||
question: str,
|
||||
response: str,
|
||||
thought: str,
|
||||
sources: List[Dict[str, Any]],
|
||||
tool_calls: List[Dict[str, Any]],
|
||||
llm: Any,
|
||||
gpt_model: str,
|
||||
decoded_token: Dict[str, Any],
|
||||
index: Optional[int] = None,
|
||||
api_key: Optional[str] = None,
|
||||
agent_id: Optional[str] = None,
|
||||
is_shared_usage: bool = False,
|
||||
shared_token: Optional[str] = None,
|
||||
attachment_ids: Optional[List[str]] = None,
|
||||
) -> str:
|
||||
"""Save or update a conversation in the database"""
|
||||
user_id = decoded_token.get("sub")
|
||||
if not user_id:
|
||||
raise ValueError("User ID not found in token")
|
||||
current_time = datetime.now(timezone.utc)
|
||||
|
||||
# clean up in sources array such that we save max 1k characters for text part
|
||||
for source in sources:
|
||||
if "text" in source and isinstance(source["text"], str):
|
||||
source["text"] = source["text"][:1000]
|
||||
|
||||
if conversation_id is not None and index is not None:
|
||||
# Update existing conversation with new query
|
||||
|
||||
result = self.conversations_collection.update_one(
|
||||
{
|
||||
"_id": ObjectId(conversation_id),
|
||||
"user": user_id,
|
||||
f"queries.{index}": {"$exists": True},
|
||||
},
|
||||
{
|
||||
"$set": {
|
||||
f"queries.{index}.prompt": question,
|
||||
f"queries.{index}.response": response,
|
||||
f"queries.{index}.thought": thought,
|
||||
f"queries.{index}.sources": sources,
|
||||
f"queries.{index}.tool_calls": tool_calls,
|
||||
f"queries.{index}.timestamp": current_time,
|
||||
f"queries.{index}.attachments": attachment_ids,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if result.matched_count == 0:
|
||||
raise ValueError("Conversation not found or unauthorized")
|
||||
self.conversations_collection.update_one(
|
||||
{
|
||||
"_id": ObjectId(conversation_id),
|
||||
"user": user_id,
|
||||
f"queries.{index}": {"$exists": True},
|
||||
},
|
||||
{"$push": {"queries": {"$each": [], "$slice": index + 1}}},
|
||||
)
|
||||
return conversation_id
|
||||
elif conversation_id:
|
||||
# Append new message to existing conversation
|
||||
|
||||
result = self.conversations_collection.update_one(
|
||||
{"_id": ObjectId(conversation_id), "user": user_id},
|
||||
{
|
||||
"$push": {
|
||||
"queries": {
|
||||
"prompt": question,
|
||||
"response": response,
|
||||
"thought": thought,
|
||||
"sources": sources,
|
||||
"tool_calls": tool_calls,
|
||||
"timestamp": current_time,
|
||||
"attachments": attachment_ids,
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if result.matched_count == 0:
|
||||
raise ValueError("Conversation not found or unauthorized")
|
||||
return conversation_id
|
||||
else:
|
||||
# Create new conversation
|
||||
|
||||
messages_summary = [
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Summarise following conversation in no more than 3 "
|
||||
"words, respond ONLY with the summary, use the same "
|
||||
"language as the user query",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Summarise following conversation in no more than 3 words, "
|
||||
"respond ONLY with the summary, use the same language as the "
|
||||
"user query \n\nUser: " + question + "\n\n" + "AI: " + response,
|
||||
},
|
||||
]
|
||||
|
||||
completion = llm.gen(
|
||||
model=gpt_model, messages=messages_summary, max_tokens=30
|
||||
)
|
||||
|
||||
conversation_data = {
|
||||
"user": user_id,
|
||||
"date": current_time,
|
||||
"name": completion,
|
||||
"queries": [
|
||||
{
|
||||
"prompt": question,
|
||||
"response": response,
|
||||
"thought": thought,
|
||||
"sources": sources,
|
||||
"tool_calls": tool_calls,
|
||||
"timestamp": current_time,
|
||||
"attachments": attachment_ids,
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
if api_key:
|
||||
if agent_id:
|
||||
conversation_data["agent_id"] = agent_id
|
||||
if is_shared_usage:
|
||||
conversation_data["is_shared_usage"] = is_shared_usage
|
||||
conversation_data["shared_token"] = shared_token
|
||||
agent = self.agents_collection.find_one({"key": api_key})
|
||||
if agent:
|
||||
conversation_data["api_key"] = agent["key"]
|
||||
result = self.conversations_collection.insert_one(conversation_data)
|
||||
return str(result.inserted_id)
|
||||
277
application/api/answer/services/stream_processor.py
Normal file
277
application/api/answer/services/stream_processor.py
Normal file
@@ -0,0 +1,277 @@
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from bson.dbref import DBRef
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
from application.agents.agent_creator import AgentCreator
|
||||
from application.api.answer.services.conversation_service import ConversationService
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.retriever.retriever_creator import RetrieverCreator
|
||||
from application.utils import get_gpt_model, limit_chat_history
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_prompt(prompt_id: str, prompts_collection=None) -> str:
|
||||
"""
|
||||
Get a prompt by preset name or MongoDB ID
|
||||
"""
|
||||
current_dir = Path(__file__).resolve().parents[3]
|
||||
prompts_dir = current_dir / "prompts"
|
||||
|
||||
preset_mapping = {
|
||||
"default": "chat_combine_default.txt",
|
||||
"creative": "chat_combine_creative.txt",
|
||||
"strict": "chat_combine_strict.txt",
|
||||
"reduce": "chat_reduce_prompt.txt",
|
||||
}
|
||||
|
||||
if prompt_id in preset_mapping:
|
||||
file_path = os.path.join(prompts_dir, preset_mapping[prompt_id])
|
||||
try:
|
||||
with open(file_path, "r") as f:
|
||||
return f.read()
|
||||
except FileNotFoundError:
|
||||
raise FileNotFoundError(f"Prompt file not found: {file_path}")
|
||||
try:
|
||||
if prompts_collection is None:
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
prompts_collection = db["prompts"]
|
||||
prompt_doc = prompts_collection.find_one({"_id": ObjectId(prompt_id)})
|
||||
if not prompt_doc:
|
||||
raise ValueError(f"Prompt with ID {prompt_id} not found")
|
||||
return prompt_doc["content"]
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid prompt ID: {prompt_id}") from e
|
||||
|
||||
|
||||
class StreamProcessor:
|
||||
def __init__(
|
||||
self, request_data: Dict[str, Any], decoded_token: Optional[Dict[str, Any]]
|
||||
):
|
||||
mongo = MongoDB.get_client()
|
||||
self.db = mongo[settings.MONGO_DB_NAME]
|
||||
self.agents_collection = self.db["agents"]
|
||||
self.attachments_collection = self.db["attachments"]
|
||||
self.prompts_collection = self.db["prompts"]
|
||||
|
||||
self.data = request_data
|
||||
self.decoded_token = decoded_token
|
||||
self.initial_user_id = (
|
||||
self.decoded_token.get("sub") if self.decoded_token is not None else None
|
||||
)
|
||||
self.conversation_id = self.data.get("conversation_id")
|
||||
self.source = (
|
||||
{"active_docs": self.data["active_docs"]}
|
||||
if "active_docs" in self.data
|
||||
else {}
|
||||
)
|
||||
self.attachments = []
|
||||
self.history = []
|
||||
self.agent_config = {}
|
||||
self.retriever_config = {}
|
||||
self.is_shared_usage = False
|
||||
self.shared_token = None
|
||||
self.gpt_model = get_gpt_model()
|
||||
self.conversation_service = ConversationService()
|
||||
|
||||
def initialize(self):
|
||||
"""Initialize all required components for processing"""
|
||||
self._configure_retriever()
|
||||
self._configure_agent()
|
||||
self._load_conversation_history()
|
||||
self._process_attachments()
|
||||
|
||||
def _load_conversation_history(self):
|
||||
"""Load conversation history either from DB or request"""
|
||||
if self.conversation_id and self.initial_user_id:
|
||||
conversation = self.conversation_service.get_conversation(
|
||||
self.conversation_id, self.initial_user_id
|
||||
)
|
||||
if not conversation:
|
||||
raise ValueError("Conversation not found or unauthorized")
|
||||
self.history = [
|
||||
{"prompt": query["prompt"], "response": query["response"]}
|
||||
for query in conversation.get("queries", [])
|
||||
]
|
||||
else:
|
||||
self.history = limit_chat_history(
|
||||
json.loads(self.data.get("history", "[]")), gpt_model=self.gpt_model
|
||||
)
|
||||
|
||||
def _process_attachments(self):
|
||||
"""Process any attachments in the request"""
|
||||
attachment_ids = self.data.get("attachments", [])
|
||||
self.attachments = self._get_attachments_content(
|
||||
attachment_ids, self.initial_user_id
|
||||
)
|
||||
|
||||
def _get_attachments_content(self, attachment_ids, user_id):
|
||||
"""
|
||||
Retrieve content from attachment documents based on their IDs.
|
||||
"""
|
||||
if not attachment_ids:
|
||||
return []
|
||||
attachments = []
|
||||
for attachment_id in attachment_ids:
|
||||
try:
|
||||
attachment_doc = self.attachments_collection.find_one(
|
||||
{"_id": ObjectId(attachment_id), "user": user_id}
|
||||
)
|
||||
|
||||
if attachment_doc:
|
||||
attachments.append(attachment_doc)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error retrieving attachment {attachment_id}: {e}", exc_info=True
|
||||
)
|
||||
return attachments
|
||||
|
||||
def _get_agent_key(self, agent_id: Optional[str], user_id: Optional[str]) -> tuple:
|
||||
"""Get API key for agent with access control"""
|
||||
if not agent_id:
|
||||
return None, False, None
|
||||
try:
|
||||
agent = self.agents_collection.find_one({"_id": ObjectId(agent_id)})
|
||||
if agent is None:
|
||||
raise Exception("Agent not found")
|
||||
is_owner = agent.get("user") == user_id
|
||||
is_shared_with_user = agent.get(
|
||||
"shared_publicly", False
|
||||
) or user_id in agent.get("shared_with", [])
|
||||
|
||||
if not (is_owner or is_shared_with_user):
|
||||
raise Exception("Unauthorized access to the agent")
|
||||
if is_owner:
|
||||
self.agents_collection.update_one(
|
||||
{"_id": ObjectId(agent_id)},
|
||||
{
|
||||
"$set": {
|
||||
"lastUsedAt": datetime.datetime.now(datetime.timezone.utc)
|
||||
}
|
||||
},
|
||||
)
|
||||
return str(agent["key"]), not is_owner, agent.get("shared_token")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in get_agent_key: {str(e)}", exc_info=True)
|
||||
raise
|
||||
|
||||
def _get_data_from_api_key(self, api_key: str) -> Dict[str, Any]:
|
||||
data = self.agents_collection.find_one({"key": api_key})
|
||||
if not data:
|
||||
raise Exception("Invalid API Key, please generate a new key", 401)
|
||||
source = data.get("source")
|
||||
if isinstance(source, DBRef):
|
||||
source_doc = self.db.dereference(source)
|
||||
data["source"] = str(source_doc["_id"])
|
||||
data["retriever"] = source_doc.get("retriever", data.get("retriever"))
|
||||
data["chunks"] = source_doc.get("chunks", data.get("chunks"))
|
||||
else:
|
||||
data["source"] = None
|
||||
return data
|
||||
|
||||
def _configure_agent(self):
|
||||
"""Configure the agent based on request data"""
|
||||
agent_id = self.data.get("agent_id")
|
||||
self.agent_key, self.is_shared_usage, self.shared_token = self._get_agent_key(
|
||||
agent_id, self.initial_user_id
|
||||
)
|
||||
|
||||
api_key = self.data.get("api_key")
|
||||
if api_key:
|
||||
data_key = self._get_data_from_api_key(api_key)
|
||||
self.agent_config.update(
|
||||
{
|
||||
"prompt_id": data_key.get("prompt_id", "default"),
|
||||
"agent_type": data_key.get("agent_type", settings.AGENT_NAME),
|
||||
"user_api_key": api_key,
|
||||
"json_schema": data_key.get("json_schema"),
|
||||
}
|
||||
)
|
||||
self.initial_user_id = data_key.get("user")
|
||||
self.decoded_token = {"sub": data_key.get("user")}
|
||||
if data_key.get("source"):
|
||||
self.source = {"active_docs": data_key["source"]}
|
||||
if data_key.get("retriever"):
|
||||
self.retriever_config["retriever_name"] = data_key["retriever"]
|
||||
if data_key.get("chunks") is not None:
|
||||
self.retriever_config["chunks"] = data_key["chunks"]
|
||||
elif self.agent_key:
|
||||
data_key = self._get_data_from_api_key(self.agent_key)
|
||||
self.agent_config.update(
|
||||
{
|
||||
"prompt_id": data_key.get("prompt_id", "default"),
|
||||
"agent_type": data_key.get("agent_type", settings.AGENT_NAME),
|
||||
"user_api_key": self.agent_key,
|
||||
"json_schema": data_key.get("json_schema"),
|
||||
}
|
||||
)
|
||||
self.decoded_token = (
|
||||
self.decoded_token
|
||||
if self.is_shared_usage
|
||||
else {"sub": data_key.get("user")}
|
||||
)
|
||||
if data_key.get("source"):
|
||||
self.source = {"active_docs": data_key["source"]}
|
||||
if data_key.get("retriever"):
|
||||
self.retriever_config["retriever_name"] = data_key["retriever"]
|
||||
if data_key.get("chunks") is not None:
|
||||
self.retriever_config["chunks"] = data_key["chunks"]
|
||||
else:
|
||||
self.agent_config.update(
|
||||
{
|
||||
"prompt_id": self.data.get("prompt_id", "default"),
|
||||
"agent_type": settings.AGENT_NAME,
|
||||
"user_api_key": None,
|
||||
"json_schema": None,
|
||||
}
|
||||
)
|
||||
|
||||
def _configure_retriever(self):
|
||||
"""Configure the retriever based on request data"""
|
||||
self.retriever_config = {
|
||||
"retriever_name": self.data.get("retriever", "classic"),
|
||||
"chunks": int(self.data.get("chunks", 2)),
|
||||
"token_limit": self.data.get("token_limit", settings.DEFAULT_MAX_HISTORY),
|
||||
}
|
||||
|
||||
if "isNoneDoc" in self.data and self.data["isNoneDoc"]:
|
||||
self.retriever_config["chunks"] = 0
|
||||
|
||||
def create_agent(self):
|
||||
"""Create and return the configured agent"""
|
||||
return AgentCreator.create_agent(
|
||||
self.agent_config["agent_type"],
|
||||
endpoint="stream",
|
||||
llm_name=settings.LLM_PROVIDER,
|
||||
gpt_model=self.gpt_model,
|
||||
api_key=settings.API_KEY,
|
||||
user_api_key=self.agent_config["user_api_key"],
|
||||
prompt=get_prompt(self.agent_config["prompt_id"], self.prompts_collection),
|
||||
chat_history=self.history,
|
||||
decoded_token=self.decoded_token,
|
||||
attachments=self.attachments,
|
||||
json_schema=self.agent_config.get("json_schema"),
|
||||
)
|
||||
|
||||
def create_retriever(self):
|
||||
"""Create and return the configured retriever"""
|
||||
return RetrieverCreator.create_retriever(
|
||||
self.retriever_config["retriever_name"],
|
||||
source=self.source,
|
||||
chat_history=self.history,
|
||||
prompt=get_prompt(self.agent_config["prompt_id"], self.prompts_collection),
|
||||
chunks=self.retriever_config["chunks"],
|
||||
token_limit=self.retriever_config["token_limit"],
|
||||
gpt_model=self.gpt_model,
|
||||
user_api_key=self.agent_config["user_api_key"],
|
||||
decoded_token=self.decoded_token,
|
||||
)
|
||||
627
application/api/connector/routes.py
Normal file
627
application/api/connector/routes.py
Normal file
@@ -0,0 +1,627 @@
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
|
||||
|
||||
from bson.objectid import ObjectId
|
||||
from flask import (
|
||||
Blueprint,
|
||||
current_app,
|
||||
jsonify,
|
||||
make_response,
|
||||
request
|
||||
)
|
||||
from flask_restx import fields, Namespace, Resource
|
||||
|
||||
|
||||
|
||||
|
||||
from application.api.user.tasks import (
|
||||
ingest_connector_task,
|
||||
)
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.api import api
|
||||
|
||||
|
||||
from application.utils import (
|
||||
check_required_fields
|
||||
)
|
||||
|
||||
|
||||
from application.parser.connectors.connector_creator import ConnectorCreator
|
||||
|
||||
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
sources_collection = db["sources"]
|
||||
sessions_collection = db["connector_sessions"]
|
||||
|
||||
connector = Blueprint("connector", __name__)
|
||||
connectors_ns = Namespace("connectors", description="Connector operations", path="/")
|
||||
api.add_namespace(connectors_ns)
|
||||
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/upload")
|
||||
class UploadConnector(Resource):
|
||||
@api.expect(
|
||||
api.model(
|
||||
"ConnectorUploadModel",
|
||||
{
|
||||
"user": fields.String(required=True, description="User ID"),
|
||||
"source": fields.String(
|
||||
required=True, description="Source type (google_drive, github, etc.)"
|
||||
),
|
||||
"name": fields.String(required=True, description="Job name"),
|
||||
"data": fields.String(required=True, description="Configuration data"),
|
||||
"repo_url": fields.String(description="GitHub repository URL"),
|
||||
},
|
||||
)
|
||||
)
|
||||
@api.doc(
|
||||
description="Uploads connector source for vectorization",
|
||||
)
|
||||
def post(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
data = request.form
|
||||
required_fields = ["user", "source", "name", "data"]
|
||||
missing_fields = check_required_fields(data, required_fields)
|
||||
if missing_fields:
|
||||
return missing_fields
|
||||
try:
|
||||
config = json.loads(data["data"])
|
||||
source_data = None
|
||||
sync_frequency = config.get("sync_frequency", "never")
|
||||
|
||||
if data["source"] == "github":
|
||||
source_data = config.get("repo_url")
|
||||
elif data["source"] in ["crawler", "url"]:
|
||||
source_data = config.get("url")
|
||||
elif data["source"] == "reddit":
|
||||
source_data = config
|
||||
elif data["source"] in ConnectorCreator.get_supported_connectors():
|
||||
session_token = config.get("session_token")
|
||||
if not session_token:
|
||||
return make_response(jsonify({
|
||||
"success": False,
|
||||
"error": f"Missing session_token in {data['source']} configuration"
|
||||
}), 400)
|
||||
|
||||
file_ids = config.get("file_ids", [])
|
||||
if isinstance(file_ids, str):
|
||||
file_ids = [id.strip() for id in file_ids.split(',') if id.strip()]
|
||||
elif not isinstance(file_ids, list):
|
||||
file_ids = []
|
||||
|
||||
folder_ids = config.get("folder_ids", [])
|
||||
if isinstance(folder_ids, str):
|
||||
folder_ids = [id.strip() for id in folder_ids.split(',') if id.strip()]
|
||||
elif not isinstance(folder_ids, list):
|
||||
folder_ids = []
|
||||
|
||||
config["file_ids"] = file_ids
|
||||
config["folder_ids"] = folder_ids
|
||||
|
||||
task = ingest_connector_task.delay(
|
||||
job_name=data["name"],
|
||||
user=decoded_token.get("sub"),
|
||||
source_type=data["source"],
|
||||
session_token=session_token,
|
||||
file_ids=file_ids,
|
||||
folder_ids=folder_ids,
|
||||
recursive=config.get("recursive", False),
|
||||
retriever=config.get("retriever", "classic"),
|
||||
sync_frequency=sync_frequency
|
||||
)
|
||||
return make_response(jsonify({"success": True, "task_id": task.id}), 200)
|
||||
task = ingest_connector_task.delay(
|
||||
source_data=source_data,
|
||||
job_name=data["name"],
|
||||
user=decoded_token.get("sub"),
|
||||
loader=data["source"],
|
||||
sync_frequency=sync_frequency
|
||||
)
|
||||
except Exception as err:
|
||||
current_app.logger.error(
|
||||
f"Error uploading connector source: {err}", exc_info=True
|
||||
)
|
||||
return make_response(jsonify({"success": False}), 400)
|
||||
return make_response(jsonify({"success": True, "task_id": task.id}), 200)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/task_status")
|
||||
class ConnectorTaskStatus(Resource):
|
||||
task_status_model = api.model(
|
||||
"ConnectorTaskStatusModel",
|
||||
{"task_id": fields.String(required=True, description="Task ID")},
|
||||
)
|
||||
|
||||
@api.expect(task_status_model)
|
||||
@api.doc(description="Get connector task status")
|
||||
def get(self):
|
||||
task_id = request.args.get("task_id")
|
||||
if not task_id:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Task ID is required"}), 400
|
||||
)
|
||||
try:
|
||||
from application.celery_init import celery
|
||||
|
||||
task = celery.AsyncResult(task_id)
|
||||
task_meta = task.info
|
||||
print(f"Task status: {task.status}")
|
||||
if not isinstance(
|
||||
task_meta, (dict, list, str, int, float, bool, type(None))
|
||||
):
|
||||
task_meta = str(task_meta)
|
||||
except Exception as err:
|
||||
current_app.logger.error(f"Error getting task status: {err}", exc_info=True)
|
||||
return make_response(jsonify({"success": False}), 400)
|
||||
return make_response(jsonify({"status": task.status, "result": task_meta}), 200)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/sources")
|
||||
class ConnectorSources(Resource):
|
||||
@api.doc(description="Get connector sources")
|
||||
def get(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
user = decoded_token.get("sub")
|
||||
try:
|
||||
sources = sources_collection.find({"user": user, "type": "connector"}).sort("date", -1)
|
||||
connector_sources = []
|
||||
for source in sources:
|
||||
connector_sources.append({
|
||||
"id": str(source["_id"]),
|
||||
"name": source.get("name"),
|
||||
"date": source.get("date"),
|
||||
"type": source.get("type"),
|
||||
"source": source.get("source"),
|
||||
"tokens": source.get("tokens", ""),
|
||||
"retriever": source.get("retriever", "classic"),
|
||||
"syncFrequency": source.get("sync_frequency", ""),
|
||||
})
|
||||
except Exception as err:
|
||||
current_app.logger.error(f"Error retrieving connector sources: {err}", exc_info=True)
|
||||
return make_response(jsonify({"success": False}), 400)
|
||||
return make_response(jsonify(connector_sources), 200)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/delete")
|
||||
class DeleteConnectorSource(Resource):
|
||||
@api.doc(
|
||||
description="Delete a connector source",
|
||||
params={"source_id": "The source ID to delete"},
|
||||
)
|
||||
def delete(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
source_id = request.args.get("source_id")
|
||||
if not source_id:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "source_id is required"}), 400
|
||||
)
|
||||
try:
|
||||
result = sources_collection.delete_one(
|
||||
{"_id": ObjectId(source_id), "user": decoded_token.get("sub")}
|
||||
)
|
||||
if result.deleted_count == 0:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Source not found"}), 404
|
||||
)
|
||||
except Exception as err:
|
||||
current_app.logger.error(
|
||||
f"Error deleting connector source: {err}", exc_info=True
|
||||
)
|
||||
return make_response(jsonify({"success": False}), 400)
|
||||
return make_response(jsonify({"success": True}), 200)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/auth")
|
||||
class ConnectorAuth(Resource):
|
||||
@api.doc(description="Get connector OAuth authorization URL", params={"provider": "Connector provider (e.g., google_drive)"})
|
||||
def get(self):
|
||||
try:
|
||||
provider = request.args.get('provider') or request.args.get('source')
|
||||
if not provider:
|
||||
return make_response(jsonify({"success": False, "error": "Missing provider"}), 400)
|
||||
|
||||
if not ConnectorCreator.is_supported(provider):
|
||||
return make_response(jsonify({"success": False, "error": f"Unsupported provider: {provider}"}), 400)
|
||||
|
||||
import uuid
|
||||
state = str(uuid.uuid4())
|
||||
auth = ConnectorCreator.create_auth(provider)
|
||||
authorization_url = auth.get_authorization_url(state=state)
|
||||
return make_response(jsonify({
|
||||
"success": True,
|
||||
"authorization_url": authorization_url,
|
||||
"state": state
|
||||
}), 200)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error generating connector auth URL: {e}")
|
||||
return make_response(jsonify({"success": False, "error": str(e)}), 500)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/callback")
|
||||
class ConnectorsCallback(Resource):
|
||||
@api.doc(description="Handle OAuth callback for external connectors")
|
||||
def get(self):
|
||||
"""Handle OAuth callback for external connectors"""
|
||||
try:
|
||||
from application.parser.connectors.connector_creator import ConnectorCreator
|
||||
from flask import request, redirect
|
||||
import uuid
|
||||
|
||||
provider = request.args.get('provider', 'google_drive')
|
||||
authorization_code = request.args.get('code')
|
||||
_ = request.args.get('state')
|
||||
error = request.args.get('error')
|
||||
|
||||
if error:
|
||||
return redirect(f"/api/connectors/callback-status?status=error&message=OAuth+error:+{error}.+Please+try+again+and+make+sure+to+grant+all+requested+permissions,+including+offline+access.&provider={provider}")
|
||||
|
||||
if not authorization_code:
|
||||
return redirect(f"/api/connectors/callback-status?status=error&message=Authorization+code+not+provided.+Please+complete+the+authorization+process+and+make+sure+to+grant+offline+access.&provider={provider}")
|
||||
|
||||
try:
|
||||
auth = ConnectorCreator.create_auth(provider)
|
||||
token_info = auth.exchange_code_for_tokens(authorization_code)
|
||||
|
||||
session_token = str(uuid.uuid4())
|
||||
|
||||
|
||||
try:
|
||||
credentials = auth.create_credentials_from_token_info(token_info)
|
||||
service = auth.build_drive_service(credentials)
|
||||
user_info = service.about().get(fields="user").execute()
|
||||
user_email = user_info.get('user', {}).get('emailAddress', 'Connected User')
|
||||
except Exception as e:
|
||||
current_app.logger.warning(f"Could not get user info: {e}")
|
||||
user_email = 'Connected User'
|
||||
|
||||
sanitized_token_info = {
|
||||
"access_token": token_info.get("access_token"),
|
||||
"refresh_token": token_info.get("refresh_token"),
|
||||
"token_uri": token_info.get("token_uri"),
|
||||
"expiry": token_info.get("expiry"),
|
||||
"scopes": token_info.get("scopes")
|
||||
}
|
||||
|
||||
user_id = request.decoded_token.get("sub") if getattr(request, "decoded_token", None) else None
|
||||
sessions_collection.insert_one({
|
||||
"session_token": session_token,
|
||||
"user": user_id,
|
||||
"token_info": sanitized_token_info,
|
||||
"created_at": datetime.datetime.now(datetime.timezone.utc),
|
||||
"user_email": user_email,
|
||||
"provider": provider
|
||||
})
|
||||
|
||||
# Redirect to success page with session token and user email
|
||||
return redirect(f"/api/connectors/callback-status?status=success&message=Authentication+successful&provider={provider}&session_token={session_token}&user_email={user_email}")
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error exchanging code for tokens: {str(e)}", exc_info=True)
|
||||
return redirect(f"/api/connectors/callback-status?status=error&message=Failed+to+exchange+authorization+code+for+tokens:+{str(e)}&provider={provider}")
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error handling connector callback: {e}")
|
||||
return redirect(f"/api/connectors/callback-status?status=error&message=Failed+to+complete+connector+authentication:+{str(e)}.+Please+try+again+and+make+sure+to+grant+all+requested+permissions,+including+offline+access.")
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/refresh")
|
||||
class ConnectorRefresh(Resource):
|
||||
@api.expect(api.model("ConnectorRefreshModel", {"provider": fields.String(required=True), "refresh_token": fields.String(required=True)}))
|
||||
@api.doc(description="Refresh connector access token")
|
||||
def post(self):
|
||||
try:
|
||||
data = request.get_json()
|
||||
provider = data.get('provider')
|
||||
refresh_token = data.get('refresh_token')
|
||||
|
||||
if not provider or not refresh_token:
|
||||
return make_response(jsonify({"success": False, "error": "provider and refresh_token are required"}), 400)
|
||||
|
||||
auth = ConnectorCreator.create_auth(provider)
|
||||
token_info = auth.refresh_access_token(refresh_token)
|
||||
return make_response(jsonify({"success": True, "token_info": token_info}), 200)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error refreshing token for connector: {e}")
|
||||
return make_response(jsonify({"success": False, "error": str(e)}), 500)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/files")
|
||||
class ConnectorFiles(Resource):
|
||||
@api.expect(api.model("ConnectorFilesModel", {"provider": fields.String(required=True), "session_token": fields.String(required=True), "folder_id": fields.String(required=False), "limit": fields.Integer(required=False), "page_token": fields.String(required=False)}))
|
||||
@api.doc(description="List files from a connector provider (supports pagination)")
|
||||
def post(self):
|
||||
try:
|
||||
data = request.get_json()
|
||||
provider = data.get('provider')
|
||||
session_token = data.get('session_token')
|
||||
folder_id = data.get('folder_id')
|
||||
limit = data.get('limit', 10)
|
||||
page_token = data.get('page_token')
|
||||
if not provider or not session_token:
|
||||
return make_response(jsonify({"success": False, "error": "provider and session_token are required"}), 400)
|
||||
|
||||
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False, "error": "Unauthorized"}), 401)
|
||||
user = decoded_token.get('sub')
|
||||
session = sessions_collection.find_one({"session_token": session_token, "user": user})
|
||||
if not session:
|
||||
return make_response(jsonify({"success": False, "error": "Invalid or unauthorized session"}), 401)
|
||||
|
||||
loader = ConnectorCreator.create_connector(provider, session_token)
|
||||
documents = loader.load_data({
|
||||
'limit': limit,
|
||||
'list_only': True,
|
||||
'session_token': session_token,
|
||||
'folder_id': folder_id,
|
||||
'page_token': page_token
|
||||
})
|
||||
|
||||
files = []
|
||||
for doc in documents[:limit]:
|
||||
metadata = doc.extra_info
|
||||
modified_time = metadata.get('modified_time')
|
||||
if modified_time:
|
||||
date_part = modified_time.split('T')[0]
|
||||
time_part = modified_time.split('T')[1].split('.')[0].split('Z')[0]
|
||||
formatted_time = f"{date_part} {time_part}"
|
||||
else:
|
||||
formatted_time = None
|
||||
|
||||
files.append({
|
||||
'id': doc.doc_id,
|
||||
'name': metadata.get('file_name', 'Unknown File'),
|
||||
'type': metadata.get('mime_type', 'unknown'),
|
||||
'size': metadata.get('size', None),
|
||||
'modifiedTime': formatted_time
|
||||
})
|
||||
|
||||
next_token = getattr(loader, 'next_page_token', None)
|
||||
has_more = bool(next_token)
|
||||
|
||||
return make_response(jsonify({"success": True, "files": files, "total": len(files), "next_page_token": next_token, "has_more": has_more}), 200)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error loading connector files: {e}")
|
||||
return make_response(jsonify({"success": False, "error": f"Failed to load files: {str(e)}"}), 500)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/validate-session")
|
||||
class ConnectorValidateSession(Resource):
|
||||
@api.expect(api.model("ConnectorValidateSessionModel", {"provider": fields.String(required=True), "session_token": fields.String(required=True)}))
|
||||
@api.doc(description="Validate connector session token and return user info")
|
||||
def post(self):
|
||||
try:
|
||||
data = request.get_json()
|
||||
provider = data.get('provider')
|
||||
session_token = data.get('session_token')
|
||||
if not provider or not session_token:
|
||||
return make_response(jsonify({"success": False, "error": "provider and session_token are required"}), 400)
|
||||
|
||||
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False, "error": "Unauthorized"}), 401)
|
||||
user = decoded_token.get('sub')
|
||||
|
||||
session = sessions_collection.find_one({"session_token": session_token, "user": user})
|
||||
if not session or "token_info" not in session:
|
||||
return make_response(jsonify({"success": False, "error": "Invalid or expired session"}), 401)
|
||||
|
||||
token_info = session["token_info"]
|
||||
auth = ConnectorCreator.create_auth(provider)
|
||||
is_expired = auth.is_token_expired(token_info)
|
||||
|
||||
return make_response(jsonify({
|
||||
"success": True,
|
||||
"expired": is_expired,
|
||||
"user_email": session.get('user_email', 'Connected User')
|
||||
}), 200)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error validating connector session: {e}")
|
||||
return make_response(jsonify({"success": False, "error": str(e)}), 500)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/disconnect")
|
||||
class ConnectorDisconnect(Resource):
|
||||
@api.expect(api.model("ConnectorDisconnectModel", {"provider": fields.String(required=True), "session_token": fields.String(required=False)}))
|
||||
@api.doc(description="Disconnect a connector session")
|
||||
def post(self):
|
||||
try:
|
||||
data = request.get_json()
|
||||
provider = data.get('provider')
|
||||
session_token = data.get('session_token')
|
||||
if not provider:
|
||||
return make_response(jsonify({"success": False, "error": "provider is required"}), 400)
|
||||
|
||||
|
||||
if session_token:
|
||||
sessions_collection.delete_one({"session_token": session_token})
|
||||
|
||||
return make_response(jsonify({"success": True}), 200)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error disconnecting connector session: {e}")
|
||||
return make_response(jsonify({"success": False, "error": str(e)}), 500)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/sync")
|
||||
class ConnectorSync(Resource):
|
||||
@api.expect(
|
||||
api.model(
|
||||
"ConnectorSyncModel",
|
||||
{
|
||||
"source_id": fields.String(required=True, description="Source ID to sync"),
|
||||
"session_token": fields.String(required=True, description="Authentication token")
|
||||
},
|
||||
)
|
||||
)
|
||||
@api.doc(description="Sync connector source to check for modifications")
|
||||
def post(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
|
||||
try:
|
||||
data = request.get_json()
|
||||
source_id = data.get('source_id')
|
||||
session_token = data.get('session_token')
|
||||
|
||||
if not all([source_id, session_token]):
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": "source_id and session_token are required"
|
||||
}),
|
||||
400
|
||||
)
|
||||
source = sources_collection.find_one({"_id": ObjectId(source_id)})
|
||||
if not source:
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": "Source not found"
|
||||
}),
|
||||
404
|
||||
)
|
||||
|
||||
if source.get('user') != decoded_token.get('sub'):
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": "Unauthorized access to source"
|
||||
}),
|
||||
403
|
||||
)
|
||||
|
||||
remote_data = {}
|
||||
try:
|
||||
if source.get('remote_data'):
|
||||
remote_data = json.loads(source.get('remote_data'))
|
||||
except json.JSONDecodeError:
|
||||
current_app.logger.error(f"Invalid remote_data format for source {source_id}")
|
||||
remote_data = {}
|
||||
|
||||
source_type = remote_data.get('provider')
|
||||
if not source_type:
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": "Source provider not found in remote_data"
|
||||
}),
|
||||
400
|
||||
)
|
||||
|
||||
# Extract configuration from remote_data
|
||||
file_ids = remote_data.get('file_ids', [])
|
||||
folder_ids = remote_data.get('folder_ids', [])
|
||||
recursive = remote_data.get('recursive', True)
|
||||
|
||||
# Start the sync task
|
||||
task = ingest_connector_task.delay(
|
||||
job_name=source.get('name'),
|
||||
user=decoded_token.get('sub'),
|
||||
source_type=source_type,
|
||||
session_token=session_token,
|
||||
file_ids=file_ids,
|
||||
folder_ids=folder_ids,
|
||||
recursive=recursive,
|
||||
retriever=source.get('retriever', 'classic'),
|
||||
operation_mode="sync",
|
||||
doc_id=source_id,
|
||||
sync_frequency=source.get('sync_frequency', 'never')
|
||||
)
|
||||
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": True,
|
||||
"task_id": task.id
|
||||
}),
|
||||
200
|
||||
)
|
||||
|
||||
except Exception as err:
|
||||
current_app.logger.error(
|
||||
f"Error syncing connector source: {err}",
|
||||
exc_info=True
|
||||
)
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": False,
|
||||
"error": str(err)
|
||||
}),
|
||||
400
|
||||
)
|
||||
|
||||
|
||||
@connectors_ns.route("/api/connectors/callback-status")
|
||||
class ConnectorCallbackStatus(Resource):
|
||||
@api.doc(description="Return HTML page with connector authentication status")
|
||||
def get(self):
|
||||
"""Return HTML page with connector authentication status"""
|
||||
try:
|
||||
status = request.args.get('status', 'error')
|
||||
message = request.args.get('message', '')
|
||||
provider = request.args.get('provider', 'connector')
|
||||
session_token = request.args.get('session_token', '')
|
||||
user_email = request.args.get('user_email', '')
|
||||
|
||||
html_content = f"""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>{provider.replace('_', ' ').title()} Authentication</title>
|
||||
<style>
|
||||
body {{ font-family: Arial, sans-serif; text-align: center; padding: 40px; }}
|
||||
.container {{ max-width: 600px; margin: 0 auto; }}
|
||||
.success {{ color: #4CAF50; }}
|
||||
.error {{ color: #F44336; }}
|
||||
</style>
|
||||
<script>
|
||||
window.onload = function() {{
|
||||
const status = "{status}";
|
||||
const sessionToken = "{session_token}";
|
||||
const userEmail = "{user_email}";
|
||||
|
||||
if (status === "success" && window.opener) {{
|
||||
window.opener.postMessage({{
|
||||
type: '{provider}_auth_success',
|
||||
session_token: sessionToken,
|
||||
user_email: userEmail
|
||||
}}, '*');
|
||||
|
||||
setTimeout(() => window.close(), 3000);
|
||||
}}
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h2>{provider.replace('_', ' ').title()} Authentication</h2>
|
||||
<div class="{status}">
|
||||
<p>{message}</p>
|
||||
{f'<p>Connected as: {user_email}</p>' if status == 'success' else ''}
|
||||
</div>
|
||||
<p><small>You can close this window. {f"Your {provider.replace('_', ' ').title()} is now connected and ready to use." if status == 'success' else ''}</small></p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
return make_response(html_content, 200, {'Content-Type': 'text/html'})
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error rendering callback status page: {e}")
|
||||
return make_response("Authentication error occurred", 500, {'Content-Type': 'text/html'})
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
import datetime
|
||||
import json
|
||||
from flask import Blueprint, request, send_from_directory
|
||||
from werkzeug.utils import secure_filename
|
||||
from bson.objectid import ObjectId
|
||||
@@ -48,7 +49,17 @@ def upload_index_files():
|
||||
remote_data = request.form["remote_data"] if "remote_data" in request.form else None
|
||||
sync_frequency = request.form["sync_frequency"] if "sync_frequency" in request.form else None
|
||||
|
||||
original_file_path = request.form.get("original_file_path")
|
||||
file_path = request.form.get("file_path")
|
||||
directory_structure = request.form.get("directory_structure")
|
||||
|
||||
if directory_structure:
|
||||
try:
|
||||
directory_structure = json.loads(directory_structure)
|
||||
except Exception:
|
||||
logger.error("Error parsing directory_structure")
|
||||
directory_structure = {}
|
||||
else:
|
||||
directory_structure = {}
|
||||
|
||||
storage = StorageCreator.get_storage()
|
||||
index_base_path = f"indexes/{id}"
|
||||
@@ -66,10 +77,13 @@ def upload_index_files():
|
||||
file_pkl = request.files["file_pkl"]
|
||||
if file_pkl.filename == "":
|
||||
return {"status": "no file name"}
|
||||
|
||||
|
||||
# Save index files to storage
|
||||
storage.save_file(file_faiss, f"{index_base_path}/index.faiss")
|
||||
storage.save_file(file_pkl, f"{index_base_path}/index.pkl")
|
||||
faiss_storage_path = f"{index_base_path}/index.faiss"
|
||||
pkl_storage_path = f"{index_base_path}/index.pkl"
|
||||
storage.save_file(file_faiss, faiss_storage_path)
|
||||
storage.save_file(file_pkl, pkl_storage_path)
|
||||
|
||||
|
||||
existing_entry = sources_collection.find_one({"_id": ObjectId(id)})
|
||||
if existing_entry:
|
||||
@@ -87,7 +101,8 @@ def upload_index_files():
|
||||
"retriever": retriever,
|
||||
"remote_data": remote_data,
|
||||
"sync_frequency": sync_frequency,
|
||||
"file_path": original_file_path,
|
||||
"file_path": file_path,
|
||||
"directory_structure": directory_structure,
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -105,7 +120,8 @@ def upload_index_files():
|
||||
"retriever": retriever,
|
||||
"remote_data": remote_data,
|
||||
"sync_frequency": sync_frequency,
|
||||
"file_path": original_file_path,
|
||||
"file_path": file_path,
|
||||
"directory_structure": directory_structure,
|
||||
}
|
||||
)
|
||||
return {"status": "ok"}
|
||||
|
||||
@@ -3,11 +3,11 @@ import json
|
||||
import math
|
||||
import os
|
||||
import secrets
|
||||
import shutil
|
||||
import uuid
|
||||
from functools import wraps
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import tempfile
|
||||
import zipfile
|
||||
from bson.binary import Binary, UuidRepresentation
|
||||
from bson.dbref import DBRef
|
||||
from bson.objectid import ObjectId
|
||||
@@ -28,13 +28,14 @@ from application.agents.tools.tool_manager import ToolManager
|
||||
|
||||
from application.api.user.tasks import (
|
||||
ingest,
|
||||
ingest_connector_task,
|
||||
ingest_remote,
|
||||
process_agent_webhook,
|
||||
store_attachment,
|
||||
)
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.extensions import api
|
||||
from application.api import api
|
||||
from application.storage.storage_creator import StorageCreator
|
||||
from application.tts.google_tts import GoogleTTS
|
||||
from application.utils import (
|
||||
@@ -44,7 +45,9 @@ from application.utils import (
|
||||
validate_function_name,
|
||||
validate_required_fields,
|
||||
)
|
||||
from application.utils import num_tokens_from_string
|
||||
from application.vectorstore.vector_creator import VectorCreator
|
||||
from application.parser.connectors.connector_creator import ConnectorCreator
|
||||
|
||||
storage = StorageCreator.get_storage()
|
||||
|
||||
@@ -62,12 +65,15 @@ user_logs_collection = db["user_logs"]
|
||||
user_tools_collection = db["user_tools"]
|
||||
attachments_collection = db["attachments"]
|
||||
|
||||
agents_collection.create_index(
|
||||
[("shared", 1)],
|
||||
name="shared_index",
|
||||
background=True,
|
||||
)
|
||||
users_collection.create_index("user_id", unique=True)
|
||||
try:
|
||||
agents_collection.create_index(
|
||||
[("shared", 1)],
|
||||
name="shared_index",
|
||||
background=True,
|
||||
)
|
||||
users_collection.create_index("user_id", unique=True)
|
||||
except Exception as e:
|
||||
print("Error creating indexes:", e)
|
||||
|
||||
user = Blueprint("user", __name__)
|
||||
user_ns = Namespace("user", description="User related operations", path="/")
|
||||
@@ -471,7 +477,7 @@ class DeleteByIds(Resource):
|
||||
@user_ns.route("/api/delete_old")
|
||||
class DeleteOldIndexes(Resource):
|
||||
@api.doc(
|
||||
description="Deletes old indexes",
|
||||
description="Deletes old indexes and associated files",
|
||||
params={"source_id": "The source ID to delete"},
|
||||
)
|
||||
def get(self):
|
||||
@@ -488,21 +494,40 @@ class DeleteOldIndexes(Resource):
|
||||
)
|
||||
if not doc:
|
||||
return make_response(jsonify({"status": "not found"}), 404)
|
||||
|
||||
storage = StorageCreator.get_storage()
|
||||
|
||||
try:
|
||||
# Delete vector index
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
shutil.rmtree(os.path.join(current_dir, "indexes", str(doc["_id"])))
|
||||
index_path = f"indexes/{str(doc['_id'])}"
|
||||
if storage.file_exists(f"{index_path}/index.faiss"):
|
||||
storage.delete_file(f"{index_path}/index.faiss")
|
||||
if storage.file_exists(f"{index_path}/index.pkl"):
|
||||
storage.delete_file(f"{index_path}/index.pkl")
|
||||
else:
|
||||
vectorstore = VectorCreator.create_vectorstore(
|
||||
settings.VECTOR_STORE, source_id=str(doc["_id"])
|
||||
)
|
||||
vectorstore.delete_index()
|
||||
|
||||
if "file_path" in doc and doc["file_path"]:
|
||||
file_path = doc["file_path"]
|
||||
if storage.is_directory(file_path):
|
||||
files = storage.list_files(file_path)
|
||||
for f in files:
|
||||
storage.delete_file(f)
|
||||
else:
|
||||
storage.delete_file(file_path)
|
||||
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except Exception as err:
|
||||
current_app.logger.error(
|
||||
f"Error deleting old indexes: {err}", exc_info=True
|
||||
f"Error deleting files and indexes: {err}", exc_info=True
|
||||
)
|
||||
return make_response(jsonify({"success": False}), 400)
|
||||
|
||||
sources_collection.delete_one({"_id": ObjectId(source_id)})
|
||||
return make_response(jsonify({"success": True}), 200)
|
||||
|
||||
@@ -546,146 +571,276 @@ class UploadFile(Resource):
|
||||
# Create safe versions for filesystem operations
|
||||
safe_user = safe_filename(user)
|
||||
dir_name = safe_filename(job_name)
|
||||
base_path = f"{settings.UPLOAD_FOLDER}/{safe_user}/{dir_name}"
|
||||
|
||||
try:
|
||||
storage = StorageCreator.get_storage()
|
||||
base_path = f"{settings.UPLOAD_FOLDER}/{safe_user}/{dir_name}"
|
||||
|
||||
if len(files) > 1:
|
||||
temp_files = []
|
||||
for file in files:
|
||||
filename = safe_filename(file.filename)
|
||||
temp_path = f"{base_path}/temp/{filename}"
|
||||
storage.save_file(file, temp_path)
|
||||
temp_files.append(temp_path)
|
||||
print(f"Saved file: {filename}")
|
||||
zip_filename = f"{dir_name}.zip"
|
||||
zip_path = f"{base_path}/{zip_filename}"
|
||||
zip_temp_path = None
|
||||
|
||||
def create_zip_archive(temp_paths, dir_name, storage):
|
||||
import tempfile
|
||||
for file in files:
|
||||
original_filename = file.filename
|
||||
safe_file = safe_filename(original_filename)
|
||||
|
||||
with tempfile.NamedTemporaryFile(
|
||||
delete=False, suffix=".zip"
|
||||
) as temp_zip_file:
|
||||
zip_output_path = temp_zip_file.name
|
||||
with tempfile.TemporaryDirectory() as stage_dir:
|
||||
for path in temp_paths:
|
||||
try:
|
||||
file_data = storage.get_file(path)
|
||||
with open(
|
||||
os.path.join(stage_dir, os.path.basename(path)),
|
||||
"wb",
|
||||
) as f:
|
||||
f.write(file_data.read())
|
||||
except Exception as e:
|
||||
current_app.logger.error(
|
||||
f"Error processing file {path} for zipping: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
if os.path.exists(zip_output_path):
|
||||
os.remove(zip_output_path)
|
||||
raise
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_file_path = os.path.join(temp_dir, safe_file)
|
||||
file.save(temp_file_path)
|
||||
|
||||
if zipfile.is_zipfile(temp_file_path):
|
||||
try:
|
||||
shutil.make_archive(
|
||||
base_name=zip_output_path.replace(".zip", ""),
|
||||
format="zip",
|
||||
root_dir=stage_dir,
|
||||
)
|
||||
with zipfile.ZipFile(temp_file_path, 'r') as zip_ref:
|
||||
zip_ref.extractall(path=temp_dir)
|
||||
|
||||
# Walk through extracted files and upload them
|
||||
for root, _, files in os.walk(temp_dir):
|
||||
for extracted_file in files:
|
||||
if os.path.join(root, extracted_file) == temp_file_path:
|
||||
continue
|
||||
|
||||
rel_path = os.path.relpath(os.path.join(root, extracted_file), temp_dir)
|
||||
storage_path = f"{base_path}/{rel_path}"
|
||||
|
||||
with open(os.path.join(root, extracted_file), 'rb') as f:
|
||||
storage.save_file(f, storage_path)
|
||||
except Exception as e:
|
||||
current_app.logger.error(
|
||||
f"Error creating zip archive: {e}", exc_info=True
|
||||
)
|
||||
if os.path.exists(zip_output_path):
|
||||
os.remove(zip_output_path)
|
||||
raise
|
||||
return zip_output_path
|
||||
current_app.logger.error(f"Error extracting zip: {e}", exc_info=True)
|
||||
# If zip extraction fails, save the original zip file
|
||||
file_path = f"{base_path}/{safe_file}"
|
||||
with open(temp_file_path, 'rb') as f:
|
||||
storage.save_file(f, file_path)
|
||||
else:
|
||||
# For non-zip files, save directly
|
||||
file_path = f"{base_path}/{safe_file}"
|
||||
with open(temp_file_path, 'rb') as f:
|
||||
storage.save_file(f, file_path)
|
||||
|
||||
try:
|
||||
zip_temp_path = create_zip_archive(temp_files, dir_name, storage)
|
||||
with open(zip_temp_path, "rb") as zip_file:
|
||||
storage.save_file(zip_file, zip_path)
|
||||
task = ingest.delay(
|
||||
settings.UPLOAD_FOLDER,
|
||||
[
|
||||
".rst",
|
||||
".md",
|
||||
".pdf",
|
||||
".txt",
|
||||
".docx",
|
||||
".csv",
|
||||
".epub",
|
||||
".html",
|
||||
".mdx",
|
||||
".json",
|
||||
".xlsx",
|
||||
".pptx",
|
||||
".png",
|
||||
".jpg",
|
||||
".jpeg",
|
||||
],
|
||||
job_name,
|
||||
zip_filename,
|
||||
user,
|
||||
dir_name,
|
||||
safe_user,
|
||||
)
|
||||
finally:
|
||||
# Clean up temporary files
|
||||
|
||||
for temp_path in temp_files:
|
||||
try:
|
||||
storage.delete_file(temp_path)
|
||||
except Exception as e:
|
||||
current_app.logger.error(
|
||||
f"Error deleting temporary file {temp_path}: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
# Clean up the zip file if it was created
|
||||
|
||||
if zip_temp_path and os.path.exists(zip_temp_path):
|
||||
os.remove(zip_temp_path)
|
||||
else: # Keep this else block for single file upload
|
||||
# For single file
|
||||
|
||||
file = files[0]
|
||||
filename = safe_filename(file.filename)
|
||||
file_path = f"{base_path}/{filename}"
|
||||
|
||||
storage.save_file(file, file_path)
|
||||
|
||||
task = ingest.delay(
|
||||
settings.UPLOAD_FOLDER,
|
||||
[
|
||||
".rst",
|
||||
".md",
|
||||
".pdf",
|
||||
".txt",
|
||||
".docx",
|
||||
".csv",
|
||||
".epub",
|
||||
".html",
|
||||
".mdx",
|
||||
".json",
|
||||
".xlsx",
|
||||
".pptx",
|
||||
".png",
|
||||
".jpg",
|
||||
".jpeg",
|
||||
],
|
||||
job_name,
|
||||
filename, # Corrected variable for single-file case
|
||||
user,
|
||||
dir_name,
|
||||
safe_user,
|
||||
)
|
||||
task = ingest.delay(
|
||||
settings.UPLOAD_FOLDER,
|
||||
[
|
||||
".rst", ".md", ".pdf", ".txt", ".docx", ".csv", ".epub",
|
||||
".html", ".mdx", ".json", ".xlsx", ".pptx", ".png",
|
||||
".jpg", ".jpeg",
|
||||
],
|
||||
job_name,
|
||||
user,
|
||||
file_path=base_path,
|
||||
filename=dir_name
|
||||
)
|
||||
except Exception as err:
|
||||
current_app.logger.error(f"Error uploading file: {err}", exc_info=True)
|
||||
return make_response(jsonify({"success": False}), 400)
|
||||
return make_response(jsonify({"success": True, "task_id": task.id}), 200)
|
||||
|
||||
|
||||
@user_ns.route("/api/manage_source_files")
|
||||
class ManageSourceFiles(Resource):
|
||||
@api.expect(
|
||||
api.model(
|
||||
"ManageSourceFilesModel",
|
||||
{
|
||||
"source_id": fields.String(required=True, description="Source ID to modify"),
|
||||
"operation": fields.String(required=True, description="Operation: 'add', 'remove', or 'remove_directory'"),
|
||||
"file_paths": fields.List(fields.String, required=False, description="File paths to remove (for remove operation)"),
|
||||
"directory_path": fields.String(required=False, description="Directory path to remove (for remove_directory operation)"),
|
||||
"file": fields.Raw(required=False, description="Files to add (for add operation)"),
|
||||
"parent_dir": fields.String(required=False, description="Parent directory path relative to source root"),
|
||||
},
|
||||
)
|
||||
)
|
||||
@api.doc(
|
||||
description="Add files, remove files, or remove directories from an existing source",
|
||||
)
|
||||
def post(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False, "message": "Unauthorized"}), 401)
|
||||
|
||||
user = decoded_token.get("sub")
|
||||
source_id = request.form.get("source_id")
|
||||
operation = request.form.get("operation")
|
||||
|
||||
if not source_id or not operation:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "source_id and operation are required"}), 400
|
||||
)
|
||||
|
||||
if operation not in ["add", "remove", "remove_directory"]:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "operation must be 'add', 'remove', or 'remove_directory'"}), 400
|
||||
)
|
||||
|
||||
try:
|
||||
ObjectId(source_id)
|
||||
except Exception:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Invalid source ID format"}), 400
|
||||
)
|
||||
|
||||
try:
|
||||
source = sources_collection.find_one({"_id": ObjectId(source_id), "user": user})
|
||||
if not source:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Source not found or access denied"}), 404
|
||||
)
|
||||
except Exception as err:
|
||||
current_app.logger.error(f"Error finding source: {err}", exc_info=True)
|
||||
return make_response(jsonify({"success": False, "message": "Database error"}), 500)
|
||||
|
||||
try:
|
||||
storage = StorageCreator.get_storage()
|
||||
source_file_path = source.get("file_path", "")
|
||||
parent_dir = request.form.get("parent_dir", "")
|
||||
|
||||
if parent_dir and (parent_dir.startswith("/") or ".." in parent_dir):
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Invalid parent directory path"}), 400
|
||||
)
|
||||
|
||||
if operation == "add":
|
||||
files = request.files.getlist("file")
|
||||
if not files or all(file.filename == "" for file in files):
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "No files provided for add operation"}), 400
|
||||
)
|
||||
|
||||
added_files = []
|
||||
|
||||
target_dir = source_file_path
|
||||
if parent_dir:
|
||||
target_dir = f"{source_file_path}/{parent_dir}"
|
||||
|
||||
for file in files:
|
||||
if file.filename:
|
||||
safe_filename_str = safe_filename(file.filename)
|
||||
file_path = f"{target_dir}/{safe_filename_str}"
|
||||
|
||||
# Save file to storage
|
||||
storage.save_file(file, file_path)
|
||||
added_files.append(safe_filename_str)
|
||||
|
||||
# Trigger re-ingestion pipeline
|
||||
from application.api.user.tasks import reingest_source_task
|
||||
|
||||
task = reingest_source_task.delay(source_id=source_id, user=user)
|
||||
|
||||
return make_response(jsonify({
|
||||
"success": True,
|
||||
"message": f"Added {len(added_files)} files",
|
||||
"added_files": added_files,
|
||||
"parent_dir": parent_dir,
|
||||
"reingest_task_id": task.id
|
||||
}), 200)
|
||||
|
||||
elif operation == "remove":
|
||||
file_paths_str = request.form.get("file_paths")
|
||||
if not file_paths_str:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "file_paths required for remove operation"}), 400
|
||||
)
|
||||
|
||||
try:
|
||||
file_paths = json.loads(file_paths_str) if isinstance(file_paths_str, str) else file_paths_str
|
||||
except Exception:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Invalid file_paths format"}), 400
|
||||
)
|
||||
|
||||
# Remove files from storage and directory structure
|
||||
removed_files = []
|
||||
for file_path in file_paths:
|
||||
full_path = f"{source_file_path}/{file_path}"
|
||||
|
||||
# Remove from storage
|
||||
if storage.file_exists(full_path):
|
||||
storage.delete_file(full_path)
|
||||
removed_files.append(file_path)
|
||||
|
||||
# Trigger re-ingestion pipeline
|
||||
from application.api.user.tasks import reingest_source_task
|
||||
|
||||
task = reingest_source_task.delay(source_id=source_id, user=user)
|
||||
|
||||
return make_response(jsonify({
|
||||
"success": True,
|
||||
"message": f"Removed {len(removed_files)} files",
|
||||
"removed_files": removed_files,
|
||||
"reingest_task_id": task.id
|
||||
}), 200)
|
||||
|
||||
elif operation == "remove_directory":
|
||||
directory_path = request.form.get("directory_path")
|
||||
if not directory_path:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "directory_path required for remove_directory operation"}), 400
|
||||
)
|
||||
|
||||
# Validate directory path (prevent path traversal)
|
||||
if directory_path.startswith("/") or ".." in directory_path:
|
||||
current_app.logger.warning(
|
||||
f"Invalid directory path attempted for removal. "
|
||||
f"User: {user}, Source ID: {source_id}, Directory path: {directory_path}"
|
||||
)
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Invalid directory path"}), 400
|
||||
)
|
||||
|
||||
full_directory_path = f"{source_file_path}/{directory_path}" if directory_path else source_file_path
|
||||
|
||||
if not storage.is_directory(full_directory_path):
|
||||
current_app.logger.warning(
|
||||
f"Directory not found or is not a directory for removal. "
|
||||
f"User: {user}, Source ID: {source_id}, Directory path: {directory_path}, "
|
||||
f"Full path: {full_directory_path}"
|
||||
)
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Directory not found or is not a directory"}), 404
|
||||
)
|
||||
|
||||
success = storage.remove_directory(full_directory_path)
|
||||
|
||||
if not success:
|
||||
current_app.logger.error(
|
||||
f"Failed to remove directory from storage. "
|
||||
f"User: {user}, Source ID: {source_id}, Directory path: {directory_path}, "
|
||||
f"Full path: {full_directory_path}"
|
||||
)
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Failed to remove directory"}), 500
|
||||
)
|
||||
|
||||
current_app.logger.info(
|
||||
f"Successfully removed directory. "
|
||||
f"User: {user}, Source ID: {source_id}, Directory path: {directory_path}, "
|
||||
f"Full path: {full_directory_path}"
|
||||
)
|
||||
|
||||
# Trigger re-ingestion pipeline
|
||||
from application.api.user.tasks import reingest_source_task
|
||||
|
||||
task = reingest_source_task.delay(source_id=source_id, user=user)
|
||||
|
||||
return make_response(jsonify({
|
||||
"success": True,
|
||||
"message": f"Successfully removed directory: {directory_path}",
|
||||
"removed_directory": directory_path,
|
||||
"reingest_task_id": task.id
|
||||
}), 200)
|
||||
|
||||
except Exception as err:
|
||||
error_context = f"operation={operation}, user={user}, source_id={source_id}"
|
||||
if operation == "remove_directory":
|
||||
directory_path = request.form.get("directory_path", "")
|
||||
error_context += f", directory_path={directory_path}"
|
||||
elif operation == "remove":
|
||||
file_paths_str = request.form.get("file_paths", "")
|
||||
error_context += f", file_paths={file_paths_str}"
|
||||
elif operation == "add":
|
||||
parent_dir = request.form.get("parent_dir", "")
|
||||
error_context += f", parent_dir={parent_dir}"
|
||||
|
||||
current_app.logger.error(f"Error managing source files: {err} ({error_context})", exc_info=True)
|
||||
return make_response(jsonify({"success": False, "message": "Operation failed"}), 500)
|
||||
|
||||
|
||||
@user_ns.route("/api/remote")
|
||||
class UploadRemote(Resource):
|
||||
@api.expect(
|
||||
@@ -724,6 +879,42 @@ class UploadRemote(Resource):
|
||||
source_data = config.get("url")
|
||||
elif data["source"] == "reddit":
|
||||
source_data = config
|
||||
elif data["source"] in ConnectorCreator.get_supported_connectors():
|
||||
session_token = config.get("session_token")
|
||||
if not session_token:
|
||||
return make_response(jsonify({
|
||||
"success": False,
|
||||
"error": f"Missing session_token in {data['source']} configuration"
|
||||
}), 400)
|
||||
|
||||
# Process file_ids
|
||||
file_ids = config.get("file_ids", [])
|
||||
if isinstance(file_ids, str):
|
||||
file_ids = [id.strip() for id in file_ids.split(',') if id.strip()]
|
||||
elif not isinstance(file_ids, list):
|
||||
file_ids = []
|
||||
|
||||
# Process folder_ids
|
||||
folder_ids = config.get("folder_ids", [])
|
||||
if isinstance(folder_ids, str):
|
||||
folder_ids = [id.strip() for id in folder_ids.split(',') if id.strip()]
|
||||
elif not isinstance(folder_ids, list):
|
||||
folder_ids = []
|
||||
|
||||
config["file_ids"] = file_ids
|
||||
config["folder_ids"] = folder_ids
|
||||
|
||||
task = ingest_connector_task.delay(
|
||||
job_name=data["name"],
|
||||
user=decoded_token.get("sub"),
|
||||
source_type=data["source"],
|
||||
session_token=session_token,
|
||||
file_ids=file_ids,
|
||||
folder_ids=folder_ids,
|
||||
recursive=config.get("recursive", False),
|
||||
retriever=config.get("retriever", "classic")
|
||||
)
|
||||
return make_response(jsonify({"success": True, "task_id": task.id}), 200)
|
||||
task = ingest_remote.delay(
|
||||
source_data=source_data,
|
||||
job_name=data["name"],
|
||||
@@ -831,6 +1022,8 @@ class PaginatedSources(Resource):
|
||||
"tokens": doc.get("tokens", ""),
|
||||
"retriever": doc.get("retriever", "classic"),
|
||||
"syncFrequency": doc.get("sync_frequency", ""),
|
||||
"isNested": bool(doc.get("directory_structure")),
|
||||
"type": doc.get("type", "file")
|
||||
}
|
||||
paginated_docs.append(doc_data)
|
||||
response = {
|
||||
@@ -878,6 +1071,8 @@ class CombinedJson(Resource):
|
||||
"tokens": index.get("tokens", ""),
|
||||
"retriever": index.get("retriever", "classic"),
|
||||
"syncFrequency": index.get("sync_frequency", ""),
|
||||
"is_nested": bool(index.get("directory_structure")),
|
||||
"type": index.get("type", "file") # Add type field with default "file"
|
||||
}
|
||||
)
|
||||
except Exception as err:
|
||||
@@ -1124,6 +1319,7 @@ class GetAgent(Resource):
|
||||
"tool_details": resolve_tool_details(agent.get("tools", [])),
|
||||
"agent_type": agent.get("agent_type", ""),
|
||||
"status": agent.get("status", ""),
|
||||
"json_schema": agent.get("json_schema"),
|
||||
"created_at": agent.get("createdAt", ""),
|
||||
"updated_at": agent.get("updatedAt", ""),
|
||||
"last_used_at": agent.get("lastUsedAt", ""),
|
||||
@@ -1178,6 +1374,7 @@ class GetAgents(Resource):
|
||||
"tool_details": resolve_tool_details(agent.get("tools", [])),
|
||||
"agent_type": agent.get("agent_type", ""),
|
||||
"status": agent.get("status", ""),
|
||||
"json_schema": agent.get("json_schema"),
|
||||
"created_at": agent.get("createdAt", ""),
|
||||
"updated_at": agent.get("updatedAt", ""),
|
||||
"last_used_at": agent.get("lastUsedAt", ""),
|
||||
@@ -1223,6 +1420,9 @@ class CreateAgent(Resource):
|
||||
"status": fields.String(
|
||||
required=True, description="Status of the agent (draft or published)"
|
||||
),
|
||||
"json_schema": fields.Raw(
|
||||
required=False, description="JSON schema for enforcing structured output format"
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1241,8 +1441,36 @@ class CreateAgent(Resource):
|
||||
data["tools"] = json.loads(data["tools"])
|
||||
except json.JSONDecodeError:
|
||||
data["tools"] = []
|
||||
if "json_schema" in data:
|
||||
try:
|
||||
data["json_schema"] = json.loads(data["json_schema"])
|
||||
except json.JSONDecodeError:
|
||||
data["json_schema"] = None
|
||||
print(f"Received data: {data}")
|
||||
|
||||
# Validate JSON schema if provided
|
||||
if data.get("json_schema"):
|
||||
try:
|
||||
# Basic validation - ensure it's a valid JSON structure
|
||||
json_schema = data.get("json_schema")
|
||||
if not isinstance(json_schema, dict):
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "JSON schema must be a valid JSON object"}),
|
||||
400
|
||||
)
|
||||
|
||||
# Validate that it has either a 'schema' property or is itself a schema
|
||||
if "schema" not in json_schema and "type" not in json_schema:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "JSON schema must contain either a 'schema' property or be a valid JSON schema with 'type' property"}),
|
||||
400
|
||||
)
|
||||
except Exception as e:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": f"Invalid JSON schema: {str(e)}"}),
|
||||
400
|
||||
)
|
||||
|
||||
if data.get("status") not in ["draft", "published"]:
|
||||
return make_response(
|
||||
jsonify(
|
||||
@@ -1299,6 +1527,7 @@ class CreateAgent(Resource):
|
||||
"tools": data.get("tools", []),
|
||||
"agent_type": data.get("agent_type", ""),
|
||||
"status": data.get("status"),
|
||||
"json_schema": data.get("json_schema"),
|
||||
"createdAt": datetime.datetime.now(datetime.timezone.utc),
|
||||
"updatedAt": datetime.datetime.now(datetime.timezone.utc),
|
||||
"lastUsedAt": None,
|
||||
@@ -1339,6 +1568,9 @@ class UpdateAgent(Resource):
|
||||
"status": fields.String(
|
||||
required=True, description="Status of the agent (draft or published)"
|
||||
),
|
||||
"json_schema": fields.Raw(
|
||||
required=False, description="JSON schema for enforcing structured output format"
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1357,6 +1589,11 @@ class UpdateAgent(Resource):
|
||||
data["tools"] = json.loads(data["tools"])
|
||||
except json.JSONDecodeError:
|
||||
data["tools"] = []
|
||||
if "json_schema" in data:
|
||||
try:
|
||||
data["json_schema"] = json.loads(data["json_schema"])
|
||||
except json.JSONDecodeError:
|
||||
data["json_schema"] = None
|
||||
|
||||
if not ObjectId.is_valid(agent_id):
|
||||
return make_response(
|
||||
@@ -1402,6 +1639,7 @@ class UpdateAgent(Resource):
|
||||
"tools",
|
||||
"agent_type",
|
||||
"status",
|
||||
"json_schema",
|
||||
]
|
||||
|
||||
for field in allowed_fields:
|
||||
@@ -1786,10 +2024,20 @@ class SharedAgent(Resource):
|
||||
else ""
|
||||
),
|
||||
"description": shared_agent.get("description", ""),
|
||||
"source": (
|
||||
str(source_doc["_id"])
|
||||
if isinstance(shared_agent.get("source"), DBRef)
|
||||
and (source_doc := db.dereference(shared_agent.get("source")))
|
||||
else ""
|
||||
),
|
||||
"chunks": shared_agent.get("chunks", "0"),
|
||||
"retriever": shared_agent.get("retriever", "classic"),
|
||||
"prompt_id": shared_agent.get("prompt_id", "default"),
|
||||
"tools": shared_agent.get("tools", []),
|
||||
"tool_details": resolve_tool_details(shared_agent.get("tools", [])),
|
||||
"agent_type": shared_agent.get("agent_type", ""),
|
||||
"status": shared_agent.get("status", ""),
|
||||
"json_schema": shared_agent.get("json_schema"),
|
||||
"created_at": shared_agent.get("createdAt", ""),
|
||||
"updated_at": shared_agent.get("updatedAt", ""),
|
||||
"shared": shared_agent.get("shared_publicly", False),
|
||||
@@ -1867,6 +2115,7 @@ class SharedAgents(Resource):
|
||||
"tool_details": resolve_tool_details(agent.get("tools", [])),
|
||||
"agent_type": agent.get("agent_type", ""),
|
||||
"status": agent.get("status", ""),
|
||||
"json_schema": agent.get("json_schema"),
|
||||
"created_at": agent.get("createdAt", ""),
|
||||
"updated_at": agent.get("updatedAt", ""),
|
||||
"pinned": str(agent["_id"]) in pinned_ids,
|
||||
@@ -3322,8 +3571,14 @@ class DeleteTool(Resource):
|
||||
@user_ns.route("/api/get_chunks")
|
||||
class GetChunks(Resource):
|
||||
@api.doc(
|
||||
description="Retrieves all chunks associated with a document",
|
||||
params={"id": "The document ID"},
|
||||
description="Retrieves chunks from a document, optionally filtered by file path and search term",
|
||||
params={
|
||||
"id": "The document ID",
|
||||
"page": "Page number for pagination",
|
||||
"per_page": "Number of chunks per page",
|
||||
"path": "Optional: Filter chunks by relative file path",
|
||||
"search": "Optional: Search term to filter chunks by title or content"
|
||||
},
|
||||
)
|
||||
def get(self):
|
||||
decoded_token = request.decoded_token
|
||||
@@ -3333,6 +3588,8 @@ class GetChunks(Resource):
|
||||
doc_id = request.args.get("id")
|
||||
page = int(request.args.get("page", 1))
|
||||
per_page = int(request.args.get("per_page", 10))
|
||||
path = request.args.get("path")
|
||||
search_term = request.args.get("search", "").strip().lower()
|
||||
|
||||
if not ObjectId.is_valid(doc_id):
|
||||
return make_response(jsonify({"error": "Invalid doc_id"}), 400)
|
||||
@@ -3344,6 +3601,30 @@ class GetChunks(Resource):
|
||||
try:
|
||||
store = get_vector_store(doc_id)
|
||||
chunks = store.get_chunks()
|
||||
|
||||
filtered_chunks = []
|
||||
for chunk in chunks:
|
||||
metadata = chunk.get("metadata", {})
|
||||
|
||||
# Filter by path if provided
|
||||
if path:
|
||||
chunk_source = metadata.get("source", "")
|
||||
# Check if the chunk's source matches the requested path
|
||||
if not chunk_source or not chunk_source.endswith(path):
|
||||
continue
|
||||
|
||||
# Filter by search term if provided
|
||||
if search_term:
|
||||
text_match = search_term in chunk.get("text", "").lower()
|
||||
title_match = search_term in metadata.get("title", "").lower()
|
||||
|
||||
if not (text_match or title_match):
|
||||
continue
|
||||
|
||||
filtered_chunks.append(chunk)
|
||||
|
||||
chunks = filtered_chunks
|
||||
|
||||
total_chunks = len(chunks)
|
||||
start = (page - 1) * per_page
|
||||
end = start + per_page
|
||||
@@ -3356,6 +3637,8 @@ class GetChunks(Resource):
|
||||
"per_page": per_page,
|
||||
"total": total_chunks,
|
||||
"chunks": paginated_chunks,
|
||||
"path": path if path else None,
|
||||
"search": search_term if search_term else None
|
||||
}
|
||||
),
|
||||
200,
|
||||
@@ -3364,7 +3647,6 @@ class GetChunks(Resource):
|
||||
current_app.logger.error(f"Error getting chunks: {e}", exc_info=True)
|
||||
return make_response(jsonify({"success": False}), 500)
|
||||
|
||||
|
||||
@user_ns.route("/api/add_chunk")
|
||||
class AddChunk(Resource):
|
||||
@api.expect(
|
||||
@@ -3396,6 +3678,8 @@ class AddChunk(Resource):
|
||||
doc_id = data.get("id")
|
||||
text = data.get("text")
|
||||
metadata = data.get("metadata", {})
|
||||
token_count = num_tokens_from_string(text)
|
||||
metadata["token_count"] = token_count
|
||||
|
||||
if not ObjectId.is_valid(doc_id):
|
||||
return make_response(jsonify({"error": "Invalid doc_id"}), 400)
|
||||
@@ -3492,6 +3776,12 @@ class UpdateChunk(Resource):
|
||||
text = data.get("text")
|
||||
metadata = data.get("metadata")
|
||||
|
||||
if text is not None:
|
||||
token_count = num_tokens_from_string(text)
|
||||
if metadata is None:
|
||||
metadata = {}
|
||||
metadata["token_count"] = token_count
|
||||
|
||||
if not ObjectId.is_valid(doc_id):
|
||||
return make_response(jsonify({"error": "Invalid doc_id"}), 400)
|
||||
doc = sources_collection.find_one({"_id": ObjectId(doc_id), "user": user})
|
||||
@@ -3501,31 +3791,45 @@ class UpdateChunk(Resource):
|
||||
)
|
||||
try:
|
||||
store = get_vector_store(doc_id)
|
||||
|
||||
chunks = store.get_chunks()
|
||||
existing_chunk = next((c for c in chunks if c["doc_id"] == chunk_id), None)
|
||||
if not existing_chunk:
|
||||
return make_response(jsonify({"error": "Chunk not found"}), 404)
|
||||
deleted = store.delete_chunk(chunk_id)
|
||||
if not deleted:
|
||||
return make_response(
|
||||
jsonify({"error": "Failed to delete existing chunk"}), 500
|
||||
)
|
||||
|
||||
new_text = text if text is not None else existing_chunk["text"]
|
||||
new_metadata = (
|
||||
metadata if metadata is not None else existing_chunk["metadata"]
|
||||
)
|
||||
|
||||
new_chunk_id = store.add_chunk(new_text, new_metadata)
|
||||
if metadata is not None:
|
||||
new_metadata = existing_chunk["metadata"].copy()
|
||||
new_metadata.update(metadata)
|
||||
else:
|
||||
new_metadata = existing_chunk["metadata"].copy()
|
||||
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"message": "Chunk updated successfully",
|
||||
"new_chunk_id": new_chunk_id,
|
||||
}
|
||||
),
|
||||
200,
|
||||
)
|
||||
if text is not None:
|
||||
new_metadata["token_count"] = num_tokens_from_string(new_text)
|
||||
|
||||
try:
|
||||
new_chunk_id = store.add_chunk(new_text, new_metadata)
|
||||
|
||||
deleted = store.delete_chunk(chunk_id)
|
||||
if not deleted:
|
||||
current_app.logger.warning(f"Failed to delete old chunk {chunk_id}, but new chunk {new_chunk_id} was created")
|
||||
|
||||
return make_response(
|
||||
jsonify(
|
||||
{
|
||||
"message": "Chunk updated successfully",
|
||||
"chunk_id": new_chunk_id,
|
||||
"original_chunk_id": chunk_id,
|
||||
}
|
||||
),
|
||||
200,
|
||||
)
|
||||
except Exception as add_error:
|
||||
current_app.logger.error(f"Failed to add updated chunk: {add_error}")
|
||||
return make_response(
|
||||
jsonify({"error": "Failed to update chunk - addition failed"}), 500
|
||||
)
|
||||
except Exception as e:
|
||||
current_app.logger.error(f"Error updating chunk: {e}", exc_info=True)
|
||||
return make_response(jsonify({"success": False}), 500)
|
||||
@@ -3538,14 +3842,18 @@ class StoreAttachment(Resource):
|
||||
"AttachmentModel",
|
||||
{
|
||||
"file": fields.Raw(required=True, description="File to upload"),
|
||||
"api_key": fields.String(
|
||||
required=False, description="API key (optional)"
|
||||
),
|
||||
},
|
||||
)
|
||||
)
|
||||
@api.doc(description="Stores a single attachment without vectorization or training")
|
||||
@api.doc(
|
||||
description="Stores a single attachment without vectorization or training. Supports user or API key authentication."
|
||||
)
|
||||
def post(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
decoded_token = getattr(request, "decoded_token", None)
|
||||
api_key = request.form.get("api_key") or request.args.get("api_key")
|
||||
file = request.files.get("file")
|
||||
|
||||
if not file or file.filename == "":
|
||||
@@ -3553,11 +3861,25 @@ class StoreAttachment(Resource):
|
||||
jsonify({"status": "error", "message": "Missing file"}),
|
||||
400,
|
||||
)
|
||||
user = safe_filename(decoded_token.get("sub"))
|
||||
|
||||
user = None
|
||||
if decoded_token:
|
||||
user = safe_filename(decoded_token.get("sub"))
|
||||
elif api_key:
|
||||
agent = agents_collection.find_one({"key": api_key})
|
||||
if not agent:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Invalid API key"}), 401
|
||||
)
|
||||
user = safe_filename(agent.get("user"))
|
||||
else:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Authentication required"}), 401
|
||||
)
|
||||
|
||||
try:
|
||||
attachment_id = ObjectId()
|
||||
original_filename = secure_filename(os.path.basename(file.filename))
|
||||
original_filename = safe_filename(os.path.basename(file.filename))
|
||||
relative_path = f"{settings.UPLOAD_FOLDER}/{user}/attachments/{str(attachment_id)}/{original_filename}"
|
||||
|
||||
metadata = storage.save_file(file, relative_path)
|
||||
@@ -3611,3 +3933,66 @@ class ServeImage(Resource):
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": "Error retrieving image"}), 500
|
||||
)
|
||||
|
||||
|
||||
@user_ns.route("/api/directory_structure")
|
||||
class DirectoryStructure(Resource):
|
||||
@api.doc(
|
||||
description="Get the directory structure for a document",
|
||||
params={"id": "The document ID"},
|
||||
)
|
||||
def get(self):
|
||||
decoded_token = request.decoded_token
|
||||
if not decoded_token:
|
||||
return make_response(jsonify({"success": False}), 401)
|
||||
|
||||
user = decoded_token.get("sub")
|
||||
doc_id = request.args.get("id")
|
||||
|
||||
if not doc_id:
|
||||
return make_response(
|
||||
jsonify({"error": "Document ID is required"}), 400
|
||||
)
|
||||
|
||||
if not ObjectId.is_valid(doc_id):
|
||||
return make_response(jsonify({"error": "Invalid document ID"}), 400)
|
||||
|
||||
try:
|
||||
doc = sources_collection.find_one({"_id": ObjectId(doc_id), "user": user})
|
||||
if not doc:
|
||||
return make_response(
|
||||
jsonify({"error": "Document not found or access denied"}), 404
|
||||
)
|
||||
|
||||
directory_structure = doc.get("directory_structure", {})
|
||||
base_path = doc.get("file_path", "")
|
||||
|
||||
provider = None
|
||||
remote_data = doc.get("remote_data")
|
||||
try:
|
||||
if isinstance(remote_data, str) and remote_data:
|
||||
remote_data_obj = json.loads(remote_data)
|
||||
provider = remote_data_obj.get("provider")
|
||||
except Exception as e:
|
||||
current_app.logger.warning(
|
||||
f"Failed to parse remote_data for doc {doc_id}: {e}")
|
||||
|
||||
return make_response(
|
||||
jsonify({
|
||||
"success": True,
|
||||
"directory_structure": directory_structure,
|
||||
"base_path": base_path,
|
||||
"provider": provider,
|
||||
}), 200
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
current_app.logger.error(
|
||||
f"Error retrieving directory structure: {e}", exc_info=True
|
||||
)
|
||||
return make_response(
|
||||
jsonify({"success": False, "error": str(e)}), 500
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@ from application.worker import (
|
||||
|
||||
|
||||
@celery.task(bind=True)
|
||||
def ingest(self, directory, formats, job_name, filename, user, dir_name, user_dir):
|
||||
resp = ingest_worker(self, directory, formats, job_name, filename, user, dir_name, user_dir)
|
||||
def ingest(self, directory, formats, job_name, user, file_path, filename):
|
||||
resp = ingest_worker(self, directory, formats, job_name, file_path, filename, user)
|
||||
return resp
|
||||
|
||||
|
||||
@@ -22,6 +22,13 @@ def ingest_remote(self, source_data, job_name, user, loader):
|
||||
return resp
|
||||
|
||||
|
||||
@celery.task(bind=True)
|
||||
def reingest_source_task(self, source_id, user):
|
||||
from application.worker import reingest_source_worker
|
||||
resp = reingest_source_worker(self, source_id, user)
|
||||
return resp
|
||||
|
||||
|
||||
@celery.task(bind=True)
|
||||
def schedule_syncs(self, frequency):
|
||||
resp = sync_worker(self, frequency)
|
||||
@@ -40,6 +47,39 @@ def process_agent_webhook(self, agent_id, payload):
|
||||
return resp
|
||||
|
||||
|
||||
@celery.task(bind=True)
|
||||
def ingest_connector_task(
|
||||
self,
|
||||
job_name,
|
||||
user,
|
||||
source_type,
|
||||
session_token=None,
|
||||
file_ids=None,
|
||||
folder_ids=None,
|
||||
recursive=True,
|
||||
retriever="classic",
|
||||
operation_mode="upload",
|
||||
doc_id=None,
|
||||
sync_frequency="never"
|
||||
):
|
||||
from application.worker import ingest_connector
|
||||
resp = ingest_connector(
|
||||
self,
|
||||
job_name,
|
||||
user,
|
||||
source_type,
|
||||
session_token=session_token,
|
||||
file_ids=file_ids,
|
||||
folder_ids=folder_ids,
|
||||
recursive=recursive,
|
||||
retriever=retriever,
|
||||
operation_mode=operation_mode,
|
||||
doc_id=doc_id,
|
||||
sync_frequency=sync_frequency
|
||||
)
|
||||
return resp
|
||||
|
||||
|
||||
@celery.on_after_configure.connect
|
||||
def setup_periodic_tasks(sender, **kwargs):
|
||||
sender.add_periodic_task(
|
||||
|
||||
@@ -12,25 +12,26 @@ from application.core.logging_config import setup_logging
|
||||
|
||||
setup_logging()
|
||||
|
||||
from application.api.answer.routes import answer # noqa: E402
|
||||
from application.api import api # noqa: E402
|
||||
from application.api.answer import answer # noqa: E402
|
||||
from application.api.internal.routes import internal # noqa: E402
|
||||
from application.api.user.routes import user # noqa: E402
|
||||
from application.api.connector.routes import connector # noqa: E402
|
||||
from application.celery_init import celery # noqa: E402
|
||||
from application.core.settings import settings # noqa: E402
|
||||
from application.extensions import api # noqa: E402
|
||||
|
||||
|
||||
if platform.system() == "Windows":
|
||||
import pathlib
|
||||
|
||||
pathlib.PosixPath = pathlib.WindowsPath
|
||||
|
||||
dotenv.load_dotenv()
|
||||
|
||||
app = Flask(__name__)
|
||||
app.register_blueprint(user)
|
||||
app.register_blueprint(answer)
|
||||
app.register_blueprint(internal)
|
||||
app.register_blueprint(connector)
|
||||
app.config.update(
|
||||
UPLOAD_FOLDER="inputs",
|
||||
CELERY_BROKER_URL=settings.CELERY_BROKER_URL,
|
||||
@@ -52,7 +53,6 @@ if settings.AUTH_TYPE in ("simple_jwt", "session_jwt") and not settings.JWT_SECR
|
||||
settings.JWT_SECRET_KEY = new_key
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to setup JWT_SECRET_KEY: {e}")
|
||||
|
||||
SIMPLE_JWT_TOKEN = None
|
||||
if settings.AUTH_TYPE == "simple_jwt":
|
||||
payload = {"sub": "local"}
|
||||
@@ -92,7 +92,6 @@ def generate_token():
|
||||
def authenticate_request():
|
||||
if request.method == "OPTIONS":
|
||||
return "", 200
|
||||
|
||||
decoded_token = handle_auth(request)
|
||||
if not decoded_token:
|
||||
request.decoded_token = None
|
||||
|
||||
@@ -10,7 +10,7 @@ current_dir = os.path.dirname(
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
AUTH_TYPE: Optional[str] = None
|
||||
AUTH_TYPE: Optional[str] = None # simple_jwt, session_jwt, or None
|
||||
LLM_PROVIDER: str = "docsgpt"
|
||||
LLM_NAME: Optional[str] = (
|
||||
None # if LLM_PROVIDER is openai, LLM_NAME can be gpt-4 or gpt-3.5-turbo
|
||||
@@ -30,6 +30,7 @@ class Settings(BaseSettings):
|
||||
}
|
||||
UPLOAD_FOLDER: str = "inputs"
|
||||
PARSE_PDF_AS_IMAGE: bool = False
|
||||
PARSE_IMAGE_REMOTE: bool = False
|
||||
VECTOR_STORE: str = (
|
||||
"faiss" # "faiss" or "elasticsearch" or "qdrant" or "milvus" or "lancedb"
|
||||
)
|
||||
@@ -39,6 +40,13 @@ class Settings(BaseSettings):
|
||||
FALLBACK_LLM_NAME: Optional[str] = None # model name for fallback llm
|
||||
FALLBACK_LLM_API_KEY: Optional[str] = None # api key for fallback llm
|
||||
|
||||
# Google Drive integration
|
||||
GOOGLE_CLIENT_ID: Optional[str] = None # Replace with your actual Google OAuth client ID
|
||||
GOOGLE_CLIENT_SECRET: Optional[str] = None# Replace with your actual Google OAuth client secret
|
||||
CONNECTOR_REDIRECT_BASE_URI: Optional[str] = "http://127.0.0.1:7091/api/connectors/callback"
|
||||
##append ?provider={provider_name} in your Provider console like http://127.0.0.1:7091/api/connectors/callback?provider=google_drive
|
||||
|
||||
|
||||
# LLM Cache
|
||||
CACHE_REDIS_URL: str = "redis://localhost:6379/2"
|
||||
|
||||
@@ -88,7 +96,9 @@ class Settings(BaseSettings):
|
||||
QDRANT_HOST: Optional[str] = None
|
||||
QDRANT_PATH: Optional[str] = None
|
||||
QDRANT_DISTANCE_FUNC: str = "Cosine"
|
||||
|
||||
|
||||
# PGVector vectorstore config
|
||||
PGVECTOR_CONNECTION_STRING: Optional[str] = None
|
||||
# Milvus vectorstore config
|
||||
MILVUS_COLLECTION_NAME: Optional[str] = "docsgpt"
|
||||
MILVUS_URI: Optional[str] = "./milvus_local.db" # milvus lite version as default
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
from flask_restx import Api
|
||||
|
||||
api = Api(
|
||||
version="1.0",
|
||||
title="DocsGPT API",
|
||||
description="API for DocsGPT",
|
||||
)
|
||||
@@ -120,6 +120,20 @@ class BaseLLM(ABC):
|
||||
def _supports_tools(self):
|
||||
raise NotImplementedError("Subclass must implement _supports_tools method")
|
||||
|
||||
def supports_structured_output(self):
|
||||
"""Check if the LLM supports structured output/JSON schema enforcement"""
|
||||
return hasattr(self, "_supports_structured_output") and callable(
|
||||
getattr(self, "_supports_structured_output")
|
||||
)
|
||||
|
||||
def _supports_structured_output(self):
|
||||
return False
|
||||
|
||||
def prepare_structured_output_format(self, json_schema):
|
||||
"""Prepare structured output format specific to the LLM provider"""
|
||||
_ = json_schema
|
||||
return None
|
||||
|
||||
def get_supported_attachment_types(self):
|
||||
"""
|
||||
Return a list of MIME types supported by this LLM for file uploads.
|
||||
@@ -127,4 +141,4 @@ class BaseLLM(ABC):
|
||||
Returns:
|
||||
list: List of supported MIME types
|
||||
"""
|
||||
return [] # Default: no attachments supported
|
||||
return []
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
import logging
|
||||
import json
|
||||
|
||||
from application.core.settings import settings
|
||||
|
||||
from application.llm.base import BaseLLM
|
||||
from application.storage.storage_creator import StorageCreator
|
||||
from application.core.settings import settings
|
||||
|
||||
|
||||
class GoogleLLM(BaseLLM):
|
||||
@@ -24,12 +26,12 @@ class GoogleLLM(BaseLLM):
|
||||
list: List of supported MIME types
|
||||
"""
|
||||
return [
|
||||
'application/pdf',
|
||||
'image/png',
|
||||
'image/jpeg',
|
||||
'image/jpg',
|
||||
'image/webp',
|
||||
'image/gif'
|
||||
"application/pdf",
|
||||
"image/png",
|
||||
"image/jpeg",
|
||||
"image/jpg",
|
||||
"image/webp",
|
||||
"image/gif",
|
||||
]
|
||||
|
||||
def prepare_messages_with_attachments(self, messages, attachments=None):
|
||||
@@ -70,26 +72,30 @@ class GoogleLLM(BaseLLM):
|
||||
|
||||
files = []
|
||||
for attachment in attachments:
|
||||
mime_type = attachment.get('mime_type')
|
||||
mime_type = attachment.get("mime_type")
|
||||
|
||||
if mime_type in self.get_supported_attachment_types():
|
||||
try:
|
||||
file_uri = self._upload_file_to_google(attachment)
|
||||
logging.info(f"GoogleLLM: Successfully uploaded file, got URI: {file_uri}")
|
||||
logging.info(
|
||||
f"GoogleLLM: Successfully uploaded file, got URI: {file_uri}"
|
||||
)
|
||||
files.append({"file_uri": file_uri, "mime_type": mime_type})
|
||||
except Exception as e:
|
||||
logging.error(f"GoogleLLM: Error uploading file: {e}", exc_info=True)
|
||||
if 'content' in attachment:
|
||||
prepared_messages[user_message_index]["content"].append({
|
||||
"type": "text",
|
||||
"text": f"[File could not be processed: {attachment.get('path', 'unknown')}]"
|
||||
})
|
||||
logging.error(
|
||||
f"GoogleLLM: Error uploading file: {e}", exc_info=True
|
||||
)
|
||||
if "content" in attachment:
|
||||
prepared_messages[user_message_index]["content"].append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": f"[File could not be processed: {attachment.get('path', 'unknown')}]",
|
||||
}
|
||||
)
|
||||
|
||||
if files:
|
||||
logging.info(f"GoogleLLM: Adding {len(files)} files to message")
|
||||
prepared_messages[user_message_index]["content"].append({
|
||||
"files": files
|
||||
})
|
||||
prepared_messages[user_message_index]["content"].append({"files": files})
|
||||
|
||||
return prepared_messages
|
||||
|
||||
@@ -103,10 +109,10 @@ class GoogleLLM(BaseLLM):
|
||||
Returns:
|
||||
str: Google AI file URI for the uploaded file.
|
||||
"""
|
||||
if 'google_file_uri' in attachment:
|
||||
return attachment['google_file_uri']
|
||||
if "google_file_uri" in attachment:
|
||||
return attachment["google_file_uri"]
|
||||
|
||||
file_path = attachment.get('path')
|
||||
file_path = attachment.get("path")
|
||||
if not file_path:
|
||||
raise ValueError("No file path provided in attachment")
|
||||
|
||||
@@ -116,17 +122,19 @@ class GoogleLLM(BaseLLM):
|
||||
try:
|
||||
file_uri = self.storage.process_file(
|
||||
file_path,
|
||||
lambda local_path, **kwargs: self.client.files.upload(file=local_path).uri
|
||||
lambda local_path, **kwargs: self.client.files.upload(
|
||||
file=local_path
|
||||
).uri,
|
||||
)
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
attachments_collection = db["attachments"]
|
||||
if '_id' in attachment:
|
||||
if "_id" in attachment:
|
||||
attachments_collection.update_one(
|
||||
{"_id": attachment['_id']},
|
||||
{"$set": {"google_file_uri": file_uri}}
|
||||
{"_id": attachment["_id"]}, {"$set": {"google_file_uri": file_uri}}
|
||||
)
|
||||
|
||||
return file_uri
|
||||
@@ -166,13 +174,13 @@ class GoogleLLM(BaseLLM):
|
||||
)
|
||||
)
|
||||
elif "files" in item:
|
||||
for file_data in item["files"]:
|
||||
parts.append(
|
||||
types.Part.from_uri(
|
||||
file_uri=file_data["file_uri"],
|
||||
mime_type=file_data["mime_type"]
|
||||
)
|
||||
for file_data in item["files"]:
|
||||
parts.append(
|
||||
types.Part.from_uri(
|
||||
file_uri=file_data["file_uri"],
|
||||
mime_type=file_data["mime_type"],
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unexpected content dictionary format:{item}"
|
||||
@@ -231,6 +239,7 @@ class GoogleLLM(BaseLLM):
|
||||
stream=False,
|
||||
tools=None,
|
||||
formatting="openai",
|
||||
response_schema=None,
|
||||
**kwargs,
|
||||
):
|
||||
client = genai.Client(api_key=self.api_key)
|
||||
@@ -244,16 +253,21 @@ class GoogleLLM(BaseLLM):
|
||||
if tools:
|
||||
cleaned_tools = self._clean_tools_format(tools)
|
||||
config.tools = cleaned_tools
|
||||
response = client.models.generate_content(
|
||||
model=model,
|
||||
contents=messages,
|
||||
config=config,
|
||||
)
|
||||
|
||||
# Add response schema for structured output if provided
|
||||
if response_schema:
|
||||
config.response_schema = response_schema
|
||||
config.response_mime_type = "application/json"
|
||||
|
||||
response = client.models.generate_content(
|
||||
model=model,
|
||||
contents=messages,
|
||||
config=config,
|
||||
)
|
||||
|
||||
if tools:
|
||||
return response
|
||||
else:
|
||||
response = client.models.generate_content(
|
||||
model=model, contents=messages, config=config
|
||||
)
|
||||
return response.text
|
||||
|
||||
def _raw_gen_stream(
|
||||
@@ -264,6 +278,7 @@ class GoogleLLM(BaseLLM):
|
||||
stream=True,
|
||||
tools=None,
|
||||
formatting="openai",
|
||||
response_schema=None,
|
||||
**kwargs,
|
||||
):
|
||||
client = genai.Client(api_key=self.api_key)
|
||||
@@ -278,17 +293,24 @@ class GoogleLLM(BaseLLM):
|
||||
cleaned_tools = self._clean_tools_format(tools)
|
||||
config.tools = cleaned_tools
|
||||
|
||||
# Add response schema for structured output if provided
|
||||
if response_schema:
|
||||
config.response_schema = response_schema
|
||||
config.response_mime_type = "application/json"
|
||||
|
||||
# Check if we have both tools and file attachments
|
||||
has_attachments = False
|
||||
for message in messages:
|
||||
for part in message.parts:
|
||||
if hasattr(part, 'file_data') and part.file_data is not None:
|
||||
if hasattr(part, "file_data") and part.file_data is not None:
|
||||
has_attachments = True
|
||||
break
|
||||
if has_attachments:
|
||||
break
|
||||
|
||||
logging.info(f"GoogleLLM: Starting stream generation. Model: {model}, Messages: {json.dumps(messages, default=str)}, Has attachments: {has_attachments}")
|
||||
logging.info(
|
||||
f"GoogleLLM: Starting stream generation. Model: {model}, Messages: {json.dumps(messages, default=str)}, Has attachments: {has_attachments}"
|
||||
)
|
||||
|
||||
response = client.models.generate_content_stream(
|
||||
model=model,
|
||||
@@ -296,7 +318,6 @@ class GoogleLLM(BaseLLM):
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
for chunk in response:
|
||||
if hasattr(chunk, "candidates") and chunk.candidates:
|
||||
for candidate in chunk.candidates:
|
||||
@@ -311,3 +332,75 @@ class GoogleLLM(BaseLLM):
|
||||
|
||||
def _supports_tools(self):
|
||||
return True
|
||||
|
||||
def _supports_structured_output(self):
|
||||
return True
|
||||
|
||||
def prepare_structured_output_format(self, json_schema):
|
||||
if not json_schema:
|
||||
return None
|
||||
|
||||
type_map = {
|
||||
"object": "OBJECT",
|
||||
"array": "ARRAY",
|
||||
"string": "STRING",
|
||||
"integer": "INTEGER",
|
||||
"number": "NUMBER",
|
||||
"boolean": "BOOLEAN",
|
||||
}
|
||||
|
||||
def convert(schema):
|
||||
if not isinstance(schema, dict):
|
||||
return schema
|
||||
|
||||
result = {}
|
||||
schema_type = schema.get("type")
|
||||
if schema_type:
|
||||
result["type"] = type_map.get(schema_type.lower(), schema_type.upper())
|
||||
|
||||
for key in [
|
||||
"description",
|
||||
"nullable",
|
||||
"enum",
|
||||
"minItems",
|
||||
"maxItems",
|
||||
"required",
|
||||
"propertyOrdering",
|
||||
]:
|
||||
if key in schema:
|
||||
result[key] = schema[key]
|
||||
|
||||
if "format" in schema:
|
||||
format_value = schema["format"]
|
||||
if schema_type == "string":
|
||||
if format_value == "date":
|
||||
result["format"] = "date-time"
|
||||
elif format_value in ["enum", "date-time"]:
|
||||
result["format"] = format_value
|
||||
else:
|
||||
result["format"] = format_value
|
||||
|
||||
if "properties" in schema:
|
||||
result["properties"] = {
|
||||
k: convert(v) for k, v in schema["properties"].items()
|
||||
}
|
||||
if "propertyOrdering" not in result and result.get("type") == "OBJECT":
|
||||
result["propertyOrdering"] = list(result["properties"].keys())
|
||||
|
||||
if "items" in schema:
|
||||
result["items"] = convert(schema["items"])
|
||||
|
||||
for field in ["anyOf", "oneOf", "allOf"]:
|
||||
if field in schema:
|
||||
result[field] = [convert(s) for s in schema[field]]
|
||||
|
||||
return result
|
||||
|
||||
try:
|
||||
return convert(json_schema)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error preparing structured output format for Google: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import json
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
|
||||
from application.core.settings import settings
|
||||
@@ -13,7 +13,10 @@ class OpenAILLM(BaseLLM):
|
||||
from openai import OpenAI
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
if isinstance(settings.OPENAI_BASE_URL, str) and settings.OPENAI_BASE_URL.strip():
|
||||
if (
|
||||
isinstance(settings.OPENAI_BASE_URL, str)
|
||||
and settings.OPENAI_BASE_URL.strip()
|
||||
):
|
||||
self.client = OpenAI(api_key=api_key, base_url=settings.OPENAI_BASE_URL)
|
||||
else:
|
||||
DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1"
|
||||
@@ -73,14 +76,30 @@ class OpenAILLM(BaseLLM):
|
||||
elif isinstance(item, dict):
|
||||
content_parts = []
|
||||
if "text" in item:
|
||||
content_parts.append({"type": "text", "text": item["text"]})
|
||||
elif "type" in item and item["type"] == "text" and "text" in item:
|
||||
content_parts.append(
|
||||
{"type": "text", "text": item["text"]}
|
||||
)
|
||||
elif (
|
||||
"type" in item
|
||||
and item["type"] == "text"
|
||||
and "text" in item
|
||||
):
|
||||
content_parts.append(item)
|
||||
elif "type" in item and item["type"] == "file" and "file" in item:
|
||||
elif (
|
||||
"type" in item
|
||||
and item["type"] == "file"
|
||||
and "file" in item
|
||||
):
|
||||
content_parts.append(item)
|
||||
elif "type" in item and item["type"] == "image_url" and "image_url" in item:
|
||||
elif (
|
||||
"type" in item
|
||||
and item["type"] == "image_url"
|
||||
and "image_url" in item
|
||||
):
|
||||
content_parts.append(item)
|
||||
cleaned_messages.append({"role": role, "content": content_parts})
|
||||
cleaned_messages.append(
|
||||
{"role": role, "content": content_parts}
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unexpected content dictionary format: {item}"
|
||||
@@ -98,22 +117,29 @@ class OpenAILLM(BaseLLM):
|
||||
stream=False,
|
||||
tools=None,
|
||||
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
response_format=None,
|
||||
**kwargs,
|
||||
):
|
||||
messages = self._clean_messages_openai(messages)
|
||||
|
||||
request_params = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"stream": stream,
|
||||
**kwargs,
|
||||
}
|
||||
|
||||
if tools:
|
||||
request_params["tools"] = tools
|
||||
|
||||
if response_format:
|
||||
request_params["response_format"] = response_format
|
||||
|
||||
response = self.client.chat.completions.create(**request_params)
|
||||
|
||||
if tools:
|
||||
response = self.client.chat.completions.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
stream=stream,
|
||||
tools=tools,
|
||||
**kwargs,
|
||||
)
|
||||
return response.choices[0]
|
||||
else:
|
||||
response = self.client.chat.completions.create(
|
||||
model=model, messages=messages, stream=stream, **kwargs
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
|
||||
def _raw_gen_stream(
|
||||
@@ -124,24 +150,32 @@ class OpenAILLM(BaseLLM):
|
||||
stream=True,
|
||||
tools=None,
|
||||
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
response_format=None,
|
||||
**kwargs,
|
||||
):
|
||||
messages = self._clean_messages_openai(messages)
|
||||
|
||||
request_params = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"stream": stream,
|
||||
**kwargs,
|
||||
}
|
||||
|
||||
if tools:
|
||||
response = self.client.chat.completions.create(
|
||||
model=model,
|
||||
messages=messages,
|
||||
stream=stream,
|
||||
tools=tools,
|
||||
**kwargs,
|
||||
)
|
||||
else:
|
||||
response = self.client.chat.completions.create(
|
||||
model=model, messages=messages, stream=stream, **kwargs
|
||||
)
|
||||
request_params["tools"] = tools
|
||||
|
||||
if response_format:
|
||||
request_params["response_format"] = response_format
|
||||
|
||||
response = self.client.chat.completions.create(**request_params)
|
||||
|
||||
for line in response:
|
||||
if len(line.choices) > 0 and line.choices[0].delta.content is not None and len(line.choices[0].delta.content) > 0:
|
||||
if (
|
||||
len(line.choices) > 0
|
||||
and line.choices[0].delta.content is not None
|
||||
and len(line.choices[0].delta.content) > 0
|
||||
):
|
||||
yield line.choices[0].delta.content
|
||||
elif len(line.choices) > 0:
|
||||
yield line.choices[0]
|
||||
@@ -149,6 +183,66 @@ class OpenAILLM(BaseLLM):
|
||||
def _supports_tools(self):
|
||||
return True
|
||||
|
||||
def _supports_structured_output(self):
|
||||
return True
|
||||
|
||||
def prepare_structured_output_format(self, json_schema):
|
||||
if not json_schema:
|
||||
return None
|
||||
|
||||
try:
|
||||
|
||||
def add_additional_properties_false(schema_obj):
|
||||
if isinstance(schema_obj, dict):
|
||||
schema_copy = schema_obj.copy()
|
||||
|
||||
if schema_copy.get("type") == "object":
|
||||
schema_copy["additionalProperties"] = False
|
||||
# Ensure 'required' includes all properties for OpenAI strict mode
|
||||
if "properties" in schema_copy:
|
||||
schema_copy["required"] = list(
|
||||
schema_copy["properties"].keys()
|
||||
)
|
||||
|
||||
for key, value in schema_copy.items():
|
||||
if key == "properties" and isinstance(value, dict):
|
||||
schema_copy[key] = {
|
||||
prop_name: add_additional_properties_false(prop_schema)
|
||||
for prop_name, prop_schema in value.items()
|
||||
}
|
||||
elif key == "items" and isinstance(value, dict):
|
||||
schema_copy[key] = add_additional_properties_false(value)
|
||||
elif key in ["anyOf", "oneOf", "allOf"] and isinstance(
|
||||
value, list
|
||||
):
|
||||
schema_copy[key] = [
|
||||
add_additional_properties_false(sub_schema)
|
||||
for sub_schema in value
|
||||
]
|
||||
|
||||
return schema_copy
|
||||
return schema_obj
|
||||
|
||||
processed_schema = add_additional_properties_false(json_schema)
|
||||
|
||||
result = {
|
||||
"type": "json_schema",
|
||||
"json_schema": {
|
||||
"name": processed_schema.get("name", "response"),
|
||||
"description": processed_schema.get(
|
||||
"description", "Structured response"
|
||||
),
|
||||
"schema": processed_schema,
|
||||
"strict": True,
|
||||
},
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error preparing structured output format: {e}")
|
||||
return None
|
||||
|
||||
def get_supported_attachment_types(self):
|
||||
"""
|
||||
Return a list of MIME types supported by OpenAI for file uploads.
|
||||
@@ -157,12 +251,12 @@ class OpenAILLM(BaseLLM):
|
||||
list: List of supported MIME types
|
||||
"""
|
||||
return [
|
||||
'application/pdf',
|
||||
'image/png',
|
||||
'image/jpeg',
|
||||
'image/jpg',
|
||||
'image/webp',
|
||||
'image/gif'
|
||||
"application/pdf",
|
||||
"image/png",
|
||||
"image/jpeg",
|
||||
"image/jpg",
|
||||
"image/webp",
|
||||
"image/gif",
|
||||
]
|
||||
|
||||
def prepare_messages_with_attachments(self, messages, attachments=None):
|
||||
@@ -202,39 +296,46 @@ class OpenAILLM(BaseLLM):
|
||||
prepared_messages[user_message_index]["content"] = []
|
||||
|
||||
for attachment in attachments:
|
||||
mime_type = attachment.get('mime_type')
|
||||
mime_type = attachment.get("mime_type")
|
||||
|
||||
if mime_type and mime_type.startswith('image/'):
|
||||
if mime_type and mime_type.startswith("image/"):
|
||||
try:
|
||||
base64_image = self._get_base64_image(attachment)
|
||||
prepared_messages[user_message_index]["content"].append({
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:{mime_type};base64,{base64_image}"
|
||||
prepared_messages[user_message_index]["content"].append(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": f"data:{mime_type};base64,{base64_image}"
|
||||
},
|
||||
}
|
||||
})
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing image attachment: {e}", exc_info=True)
|
||||
if 'content' in attachment:
|
||||
prepared_messages[user_message_index]["content"].append({
|
||||
"type": "text",
|
||||
"text": f"[Image could not be processed: {attachment.get('path', 'unknown')}]"
|
||||
})
|
||||
logging.error(
|
||||
f"Error processing image attachment: {e}", exc_info=True
|
||||
)
|
||||
if "content" in attachment:
|
||||
prepared_messages[user_message_index]["content"].append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": f"[Image could not be processed: {attachment.get('path', 'unknown')}]",
|
||||
}
|
||||
)
|
||||
# Handle PDFs using the file API
|
||||
elif mime_type == 'application/pdf':
|
||||
elif mime_type == "application/pdf":
|
||||
try:
|
||||
file_id = self._upload_file_to_openai(attachment)
|
||||
prepared_messages[user_message_index]["content"].append({
|
||||
"type": "file",
|
||||
"file": {"file_id": file_id}
|
||||
})
|
||||
prepared_messages[user_message_index]["content"].append(
|
||||
{"type": "file", "file": {"file_id": file_id}}
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error uploading PDF to OpenAI: {e}", exc_info=True)
|
||||
if 'content' in attachment:
|
||||
prepared_messages[user_message_index]["content"].append({
|
||||
"type": "text",
|
||||
"text": f"File content:\n\n{attachment['content']}"
|
||||
})
|
||||
if "content" in attachment:
|
||||
prepared_messages[user_message_index]["content"].append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": f"File content:\n\n{attachment['content']}",
|
||||
}
|
||||
)
|
||||
|
||||
return prepared_messages
|
||||
|
||||
@@ -248,13 +349,13 @@ class OpenAILLM(BaseLLM):
|
||||
Returns:
|
||||
str: Base64-encoded image data.
|
||||
"""
|
||||
file_path = attachment.get('path')
|
||||
file_path = attachment.get("path")
|
||||
if not file_path:
|
||||
raise ValueError("No file path provided in attachment")
|
||||
|
||||
try:
|
||||
with self.storage.get_file(file_path) as image_file:
|
||||
return base64.b64encode(image_file.read()).decode('utf-8')
|
||||
return base64.b64encode(image_file.read()).decode("utf-8")
|
||||
except FileNotFoundError:
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
@@ -273,10 +374,10 @@ class OpenAILLM(BaseLLM):
|
||||
"""
|
||||
import logging
|
||||
|
||||
if 'openai_file_id' in attachment:
|
||||
return attachment['openai_file_id']
|
||||
if "openai_file_id" in attachment:
|
||||
return attachment["openai_file_id"]
|
||||
|
||||
file_path = attachment.get('path')
|
||||
file_path = attachment.get("path")
|
||||
|
||||
if not self.storage.file_exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
@@ -285,19 +386,18 @@ class OpenAILLM(BaseLLM):
|
||||
file_id = self.storage.process_file(
|
||||
file_path,
|
||||
lambda local_path, **kwargs: self.client.files.create(
|
||||
file=open(local_path, 'rb'),
|
||||
purpose="assistants"
|
||||
).id
|
||||
file=open(local_path, "rb"), purpose="assistants"
|
||||
).id,
|
||||
)
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
attachments_collection = db["attachments"]
|
||||
if '_id' in attachment:
|
||||
if "_id" in attachment:
|
||||
attachments_collection.update_one(
|
||||
{"_id": attachment['_id']},
|
||||
{"$set": {"openai_file_id": file_id}}
|
||||
{"_id": attachment["_id"]}, {"$set": {"openai_file_id": file_id}}
|
||||
)
|
||||
|
||||
return file_id
|
||||
@@ -308,9 +408,7 @@ class OpenAILLM(BaseLLM):
|
||||
|
||||
class AzureOpenAILLM(OpenAILLM):
|
||||
|
||||
def __init__(
|
||||
self, api_key, user_api_key, *args, **kwargs
|
||||
):
|
||||
def __init__(self, api_key, user_api_key, *args, **kwargs):
|
||||
|
||||
super().__init__(api_key)
|
||||
self.api_base = (settings.OPENAI_API_BASE,)
|
||||
@@ -321,5 +419,5 @@ class AzureOpenAILLM(OpenAILLM):
|
||||
self.client = AzureOpenAI(
|
||||
api_key=api_key,
|
||||
api_version=settings.OPENAI_API_VERSION,
|
||||
azure_endpoint=settings.OPENAI_API_BASE
|
||||
azure_endpoint=settings.OPENAI_API_BASE,
|
||||
)
|
||||
|
||||
@@ -136,6 +136,8 @@ def _log_to_mongodb(
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
user_logs_collection = db["stack_logs"]
|
||||
|
||||
|
||||
|
||||
log_entry = {
|
||||
"endpoint": endpoint,
|
||||
@@ -147,6 +149,11 @@ def _log_to_mongodb(
|
||||
"stacks": stacks,
|
||||
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
||||
}
|
||||
# clean up text fields to be no longer than 10000 characters
|
||||
for key, value in log_entry.items():
|
||||
if isinstance(value, str) and len(value) > 10000:
|
||||
log_entry[key] = value[:10000]
|
||||
|
||||
user_logs_collection.insert_one(log_entry)
|
||||
logging.debug(f"Logged activity to MongoDB: {activity_id}")
|
||||
|
||||
|
||||
@@ -32,16 +32,7 @@ class Chunker:
|
||||
header, body = "", text # No header, treat entire text as body
|
||||
return header, body
|
||||
|
||||
def combine_documents(self, doc: Document, next_doc: Document) -> Document:
|
||||
combined_text = doc.text + " " + next_doc.text
|
||||
combined_token_count = len(self.encoding.encode(combined_text))
|
||||
new_doc = Document(
|
||||
text=combined_text,
|
||||
doc_id=doc.doc_id,
|
||||
embedding=doc.embedding,
|
||||
extra_info={**(doc.extra_info or {}), "token_count": combined_token_count}
|
||||
)
|
||||
return new_doc
|
||||
|
||||
|
||||
def split_document(self, doc: Document) -> List[Document]:
|
||||
split_docs = []
|
||||
@@ -82,26 +73,11 @@ class Chunker:
|
||||
processed_docs.append(doc)
|
||||
i += 1
|
||||
elif token_count < self.min_tokens:
|
||||
if i + 1 < len(documents):
|
||||
next_doc = documents[i + 1]
|
||||
next_tokens = self.encoding.encode(next_doc.text)
|
||||
if token_count + len(next_tokens) <= self.max_tokens:
|
||||
# Combine small documents
|
||||
combined_doc = self.combine_documents(doc, next_doc)
|
||||
processed_docs.append(combined_doc)
|
||||
i += 2
|
||||
else:
|
||||
# Keep the small document as is if adding next_doc would exceed max_tokens
|
||||
doc.extra_info = doc.extra_info or {}
|
||||
doc.extra_info["token_count"] = token_count
|
||||
processed_docs.append(doc)
|
||||
i += 1
|
||||
else:
|
||||
# No next document to combine with; add the small document as is
|
||||
doc.extra_info = doc.extra_info or {}
|
||||
doc.extra_info["token_count"] = token_count
|
||||
processed_docs.append(doc)
|
||||
i += 1
|
||||
|
||||
doc.extra_info = doc.extra_info or {}
|
||||
doc.extra_info["token_count"] = token_count
|
||||
processed_docs.append(doc)
|
||||
i += 1
|
||||
else:
|
||||
# Split large documents
|
||||
processed_docs.extend(self.split_document(doc))
|
||||
|
||||
18
application/parser/connectors/__init__.py
Normal file
18
application/parser/connectors/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""
|
||||
External knowledge base connectors for DocsGPT.
|
||||
|
||||
This module contains connectors for external knowledge bases and document storage systems
|
||||
that require authentication and specialized handling, separate from simple web scrapers.
|
||||
"""
|
||||
|
||||
from .base import BaseConnectorAuth, BaseConnectorLoader
|
||||
from .connector_creator import ConnectorCreator
|
||||
from .google_drive import GoogleDriveAuth, GoogleDriveLoader
|
||||
|
||||
__all__ = [
|
||||
'BaseConnectorAuth',
|
||||
'BaseConnectorLoader',
|
||||
'ConnectorCreator',
|
||||
'GoogleDriveAuth',
|
||||
'GoogleDriveLoader'
|
||||
]
|
||||
129
application/parser/connectors/base.py
Normal file
129
application/parser/connectors/base.py
Normal file
@@ -0,0 +1,129 @@
|
||||
"""
|
||||
Base classes for external knowledge base connectors.
|
||||
|
||||
This module provides minimal abstract base classes that define the essential
|
||||
interface for external knowledge base connectors.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from application.parser.schema.base import Document
|
||||
|
||||
|
||||
class BaseConnectorAuth(ABC):
|
||||
"""
|
||||
Abstract base class for connector authentication.
|
||||
|
||||
Defines the minimal interface that all connector authentication
|
||||
implementations must follow.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_authorization_url(self, state: Optional[str] = None) -> str:
|
||||
"""
|
||||
Generate authorization URL for OAuth flows.
|
||||
|
||||
Args:
|
||||
state: Optional state parameter for CSRF protection
|
||||
|
||||
Returns:
|
||||
Authorization URL
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def exchange_code_for_tokens(self, authorization_code: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Exchange authorization code for access tokens.
|
||||
|
||||
Args:
|
||||
authorization_code: Authorization code from OAuth callback
|
||||
|
||||
Returns:
|
||||
Dictionary containing token information
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def refresh_access_token(self, refresh_token: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Refresh an expired access token.
|
||||
|
||||
Args:
|
||||
refresh_token: Refresh token
|
||||
|
||||
Returns:
|
||||
Dictionary containing refreshed token information
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def is_token_expired(self, token_info: Dict[str, Any]) -> bool:
|
||||
"""
|
||||
Check if a token is expired.
|
||||
|
||||
Args:
|
||||
token_info: Token information dictionary
|
||||
|
||||
Returns:
|
||||
True if token is expired, False otherwise
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class BaseConnectorLoader(ABC):
|
||||
"""
|
||||
Abstract base class for connector loaders.
|
||||
|
||||
Defines the minimal interface that all connector loader
|
||||
implementations must follow.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self, session_token: str):
|
||||
"""
|
||||
Initialize the connector loader.
|
||||
|
||||
Args:
|
||||
session_token: Authentication session token
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def load_data(self, inputs: Dict[str, Any]) -> List[Document]:
|
||||
"""
|
||||
Load documents from the external knowledge base.
|
||||
|
||||
Args:
|
||||
inputs: Configuration dictionary containing:
|
||||
- file_ids: Optional list of specific file IDs to load
|
||||
- folder_ids: Optional list of folder IDs to browse/download
|
||||
- limit: Maximum number of items to return
|
||||
- list_only: If True, return metadata without content
|
||||
- recursive: Whether to recursively process folders
|
||||
|
||||
Returns:
|
||||
List of Document objects
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def download_to_directory(self, local_dir: str, source_config: Dict[str, Any] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Download files/folders to a local directory.
|
||||
|
||||
Args:
|
||||
local_dir: Local directory path to download files to
|
||||
source_config: Configuration for what to download
|
||||
|
||||
Returns:
|
||||
Dictionary containing download results:
|
||||
- files_downloaded: Number of files downloaded
|
||||
- directory_path: Path where files were downloaded
|
||||
- empty_result: Whether no files were downloaded
|
||||
- source_type: Type of connector
|
||||
- config_used: Configuration that was used
|
||||
- error: Error message if download failed (optional)
|
||||
"""
|
||||
pass
|
||||
81
application/parser/connectors/connector_creator.py
Normal file
81
application/parser/connectors/connector_creator.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from application.parser.connectors.google_drive.loader import GoogleDriveLoader
|
||||
from application.parser.connectors.google_drive.auth import GoogleDriveAuth
|
||||
|
||||
|
||||
class ConnectorCreator:
|
||||
"""
|
||||
Factory class for creating external knowledge base connectors and auth providers.
|
||||
|
||||
These are different from remote loaders as they typically require
|
||||
authentication and connect to external document storage systems.
|
||||
"""
|
||||
|
||||
connectors = {
|
||||
"google_drive": GoogleDriveLoader,
|
||||
}
|
||||
|
||||
auth_providers = {
|
||||
"google_drive": GoogleDriveAuth,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def create_connector(cls, connector_type, *args, **kwargs):
|
||||
"""
|
||||
Create a connector instance for the specified type.
|
||||
|
||||
Args:
|
||||
connector_type: Type of connector to create (e.g., 'google_drive')
|
||||
*args, **kwargs: Arguments to pass to the connector constructor
|
||||
|
||||
Returns:
|
||||
Connector instance
|
||||
|
||||
Raises:
|
||||
ValueError: If connector type is not supported
|
||||
"""
|
||||
connector_class = cls.connectors.get(connector_type.lower())
|
||||
if not connector_class:
|
||||
raise ValueError(f"No connector class found for type {connector_type}")
|
||||
return connector_class(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def create_auth(cls, connector_type):
|
||||
"""
|
||||
Create an auth provider instance for the specified connector type.
|
||||
|
||||
Args:
|
||||
connector_type: Type of connector auth to create (e.g., 'google_drive')
|
||||
|
||||
Returns:
|
||||
Auth provider instance
|
||||
|
||||
Raises:
|
||||
ValueError: If connector type is not supported for auth
|
||||
"""
|
||||
auth_class = cls.auth_providers.get(connector_type.lower())
|
||||
if not auth_class:
|
||||
raise ValueError(f"No auth class found for type {connector_type}")
|
||||
return auth_class()
|
||||
|
||||
@classmethod
|
||||
def get_supported_connectors(cls):
|
||||
"""
|
||||
Get list of supported connector types.
|
||||
|
||||
Returns:
|
||||
List of supported connector type strings
|
||||
"""
|
||||
return list(cls.connectors.keys())
|
||||
|
||||
@classmethod
|
||||
def is_supported(cls, connector_type):
|
||||
"""
|
||||
Check if a connector type is supported.
|
||||
|
||||
Args:
|
||||
connector_type: Type of connector to check
|
||||
|
||||
Returns:
|
||||
True if supported, False otherwise
|
||||
"""
|
||||
return connector_type.lower() in cls.connectors
|
||||
10
application/parser/connectors/google_drive/__init__.py
Normal file
10
application/parser/connectors/google_drive/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""
|
||||
Google Drive connector for DocsGPT.
|
||||
|
||||
This module provides authentication and document loading capabilities for Google Drive.
|
||||
"""
|
||||
|
||||
from .auth import GoogleDriveAuth
|
||||
from .loader import GoogleDriveLoader
|
||||
|
||||
__all__ = ['GoogleDriveAuth', 'GoogleDriveLoader']
|
||||
268
application/parser/connectors/google_drive/auth.py
Normal file
268
application/parser/connectors/google_drive/auth.py
Normal file
@@ -0,0 +1,268 @@
|
||||
import logging
|
||||
import datetime
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from google.oauth2.credentials import Credentials
|
||||
from google_auth_oauthlib.flow import Flow
|
||||
from googleapiclient.discovery import build
|
||||
from googleapiclient.errors import HttpError
|
||||
|
||||
from application.core.settings import settings
|
||||
from application.parser.connectors.base import BaseConnectorAuth
|
||||
|
||||
|
||||
class GoogleDriveAuth(BaseConnectorAuth):
|
||||
"""
|
||||
Handles Google OAuth 2.0 authentication for Google Drive access.
|
||||
"""
|
||||
|
||||
SCOPES = [
|
||||
'https://www.googleapis.com/auth/drive.readonly',
|
||||
'https://www.googleapis.com/auth/drive.metadata.readonly'
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.client_id = settings.GOOGLE_CLIENT_ID
|
||||
self.client_secret = settings.GOOGLE_CLIENT_SECRET
|
||||
self.redirect_uri = f"{settings.CONNECTOR_REDIRECT_BASE_URI}?provider=google_drive"
|
||||
|
||||
if not self.client_id or not self.client_secret:
|
||||
raise ValueError("Google OAuth credentials not configured. Please set GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET in settings.")
|
||||
|
||||
|
||||
|
||||
def get_authorization_url(self, state: Optional[str] = None) -> str:
|
||||
try:
|
||||
flow = Flow.from_client_config(
|
||||
{
|
||||
"web": {
|
||||
"client_id": self.client_id,
|
||||
"client_secret": self.client_secret,
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://oauth2.googleapis.com/token",
|
||||
"redirect_uris": [self.redirect_uri]
|
||||
}
|
||||
},
|
||||
scopes=self.SCOPES
|
||||
)
|
||||
flow.redirect_uri = self.redirect_uri
|
||||
|
||||
authorization_url, _ = flow.authorization_url(
|
||||
access_type='offline',
|
||||
prompt='consent',
|
||||
include_granted_scopes='true',
|
||||
state=state
|
||||
)
|
||||
|
||||
return authorization_url
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error generating authorization URL: {e}")
|
||||
raise
|
||||
|
||||
def exchange_code_for_tokens(self, authorization_code: str) -> Dict[str, Any]:
|
||||
try:
|
||||
if not authorization_code:
|
||||
raise ValueError("Authorization code is required")
|
||||
|
||||
flow = Flow.from_client_config(
|
||||
{
|
||||
"web": {
|
||||
"client_id": self.client_id,
|
||||
"client_secret": self.client_secret,
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://oauth2.googleapis.com/token",
|
||||
"redirect_uris": [self.redirect_uri]
|
||||
}
|
||||
},
|
||||
scopes=self.SCOPES
|
||||
)
|
||||
flow.redirect_uri = self.redirect_uri
|
||||
|
||||
flow.fetch_token(code=authorization_code)
|
||||
|
||||
credentials = flow.credentials
|
||||
|
||||
if not credentials.refresh_token:
|
||||
logging.warning("OAuth flow did not return a refresh_token.")
|
||||
if not credentials.token:
|
||||
raise ValueError("OAuth flow did not return an access token")
|
||||
|
||||
if not credentials.token_uri:
|
||||
credentials.token_uri = "https://oauth2.googleapis.com/token"
|
||||
|
||||
if not credentials.client_id:
|
||||
credentials.client_id = self.client_id
|
||||
|
||||
if not credentials.client_secret:
|
||||
credentials.client_secret = self.client_secret
|
||||
|
||||
if not credentials.refresh_token:
|
||||
raise ValueError(
|
||||
"No refresh token received. This typically happens when offline access wasn't granted. "
|
||||
)
|
||||
|
||||
return {
|
||||
'access_token': credentials.token,
|
||||
'refresh_token': credentials.refresh_token,
|
||||
'token_uri': credentials.token_uri,
|
||||
'client_id': credentials.client_id,
|
||||
'client_secret': credentials.client_secret,
|
||||
'scopes': credentials.scopes,
|
||||
'expiry': credentials.expiry.isoformat() if credentials.expiry else None
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error exchanging code for tokens: {e}")
|
||||
raise
|
||||
|
||||
def refresh_access_token(self, refresh_token: str) -> Dict[str, Any]:
|
||||
try:
|
||||
if not refresh_token:
|
||||
raise ValueError("Refresh token is required")
|
||||
|
||||
credentials = Credentials(
|
||||
token=None,
|
||||
refresh_token=refresh_token,
|
||||
token_uri="https://oauth2.googleapis.com/token",
|
||||
client_id=self.client_id,
|
||||
client_secret=self.client_secret
|
||||
)
|
||||
|
||||
from google.auth.transport.requests import Request
|
||||
credentials.refresh(Request())
|
||||
|
||||
return {
|
||||
'access_token': credentials.token,
|
||||
'refresh_token': refresh_token,
|
||||
'token_uri': credentials.token_uri,
|
||||
'client_id': credentials.client_id,
|
||||
'client_secret': credentials.client_secret,
|
||||
'scopes': credentials.scopes,
|
||||
'expiry': credentials.expiry.isoformat() if credentials.expiry else None
|
||||
}
|
||||
except Exception as e:
|
||||
logging.error(f"Error refreshing access token: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def create_credentials_from_token_info(self, token_info: Dict[str, Any]) -> Credentials:
|
||||
from application.core.settings import settings
|
||||
|
||||
access_token = token_info.get('access_token')
|
||||
if not access_token:
|
||||
raise ValueError("No access token found in token_info")
|
||||
|
||||
credentials = Credentials(
|
||||
token=access_token,
|
||||
refresh_token=token_info.get('refresh_token'),
|
||||
token_uri= 'https://oauth2.googleapis.com/token',
|
||||
client_id=settings.GOOGLE_CLIENT_ID,
|
||||
client_secret=settings.GOOGLE_CLIENT_SECRET,
|
||||
scopes=token_info.get('scopes', ['https://www.googleapis.com/auth/drive.readonly'])
|
||||
)
|
||||
|
||||
if not credentials.token:
|
||||
raise ValueError("Credentials created without valid access token")
|
||||
|
||||
return credentials
|
||||
|
||||
def build_drive_service(self, credentials: Credentials):
|
||||
try:
|
||||
if not credentials:
|
||||
raise ValueError("No credentials provided")
|
||||
|
||||
if not credentials.token and not credentials.refresh_token:
|
||||
raise ValueError("No access token or refresh token available. User must re-authorize with offline access.")
|
||||
|
||||
needs_refresh = credentials.expired or not credentials.token
|
||||
if needs_refresh:
|
||||
if credentials.refresh_token:
|
||||
try:
|
||||
from google.auth.transport.requests import Request
|
||||
credentials.refresh(Request())
|
||||
except Exception as refresh_error:
|
||||
raise ValueError(f"Failed to refresh credentials: {refresh_error}")
|
||||
else:
|
||||
raise ValueError("No access token or refresh token available. User must re-authorize with offline access.")
|
||||
|
||||
return build('drive', 'v3', credentials=credentials)
|
||||
|
||||
except HttpError as e:
|
||||
raise ValueError(f"Failed to build Google Drive service: HTTP {e.resp.status}")
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to build Google Drive service: {str(e)}")
|
||||
|
||||
def is_token_expired(self, token_info):
|
||||
if 'expiry' in token_info and token_info['expiry']:
|
||||
try:
|
||||
from dateutil import parser
|
||||
# Google Drive provides timezone-aware ISO8601 dates
|
||||
expiry_dt = parser.parse(token_info['expiry'])
|
||||
current_time = datetime.datetime.now(datetime.timezone.utc)
|
||||
return current_time >= expiry_dt - datetime.timedelta(seconds=60)
|
||||
except Exception:
|
||||
return True
|
||||
|
||||
if 'access_token' in token_info and token_info['access_token']:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_token_info_from_session(self, session_token: str) -> Dict[str, Any]:
|
||||
try:
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
|
||||
mongo = MongoDB.get_client()
|
||||
db = mongo[settings.MONGO_DB_NAME]
|
||||
|
||||
sessions_collection = db["connector_sessions"]
|
||||
session = sessions_collection.find_one({"session_token": session_token})
|
||||
if not session:
|
||||
raise ValueError(f"Invalid session token: {session_token}")
|
||||
|
||||
if "token_info" not in session:
|
||||
raise ValueError("Session missing token information")
|
||||
|
||||
token_info = session["token_info"]
|
||||
if not token_info:
|
||||
raise ValueError("Invalid token information")
|
||||
|
||||
required_fields = ["access_token", "refresh_token"]
|
||||
missing_fields = [field for field in required_fields if field not in token_info or not token_info.get(field)]
|
||||
if missing_fields:
|
||||
raise ValueError(f"Missing required token fields: {missing_fields}")
|
||||
|
||||
if 'client_id' not in token_info:
|
||||
token_info['client_id'] = settings.GOOGLE_CLIENT_ID
|
||||
if 'client_secret' not in token_info:
|
||||
token_info['client_secret'] = settings.GOOGLE_CLIENT_SECRET
|
||||
if 'token_uri' not in token_info:
|
||||
token_info['token_uri'] = 'https://oauth2.googleapis.com/token'
|
||||
|
||||
return token_info
|
||||
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to retrieve Google Drive token information: {str(e)}")
|
||||
|
||||
def validate_credentials(self, credentials: Credentials) -> bool:
|
||||
"""
|
||||
Validate Google Drive credentials by making a test API call.
|
||||
|
||||
Args:
|
||||
credentials: Google credentials object
|
||||
|
||||
Returns:
|
||||
True if credentials are valid, False otherwise
|
||||
"""
|
||||
try:
|
||||
service = self.build_drive_service(credentials)
|
||||
service.about().get(fields="user").execute()
|
||||
return True
|
||||
|
||||
except HttpError as e:
|
||||
logging.error(f"HTTP error validating credentials: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logging.error(f"Error validating credentials: {e}")
|
||||
return False
|
||||
536
application/parser/connectors/google_drive/loader.py
Normal file
536
application/parser/connectors/google_drive/loader.py
Normal file
@@ -0,0 +1,536 @@
|
||||
"""
|
||||
Google Drive loader for DocsGPT.
|
||||
Loads documents from Google Drive using Google Drive API.
|
||||
"""
|
||||
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
from googleapiclient.http import MediaIoBaseDownload
|
||||
from googleapiclient.errors import HttpError
|
||||
|
||||
from application.parser.connectors.base import BaseConnectorLoader
|
||||
from application.parser.connectors.google_drive.auth import GoogleDriveAuth
|
||||
from application.parser.schema.base import Document
|
||||
|
||||
|
||||
class GoogleDriveLoader(BaseConnectorLoader):
|
||||
|
||||
SUPPORTED_MIME_TYPES = {
|
||||
'application/pdf': '.pdf',
|
||||
'application/vnd.google-apps.document': '.docx',
|
||||
'application/vnd.google-apps.presentation': '.pptx',
|
||||
'application/vnd.google-apps.spreadsheet': '.xlsx',
|
||||
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': '.docx',
|
||||
'application/vnd.openxmlformats-officedocument.presentationml.presentation': '.pptx',
|
||||
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': '.xlsx',
|
||||
'application/msword': '.doc',
|
||||
'application/vnd.ms-powerpoint': '.ppt',
|
||||
'application/vnd.ms-excel': '.xls',
|
||||
'text/plain': '.txt',
|
||||
'text/csv': '.csv',
|
||||
'text/html': '.html',
|
||||
'application/rtf': '.rtf',
|
||||
'image/jpeg': '.jpg',
|
||||
'image/jpg': '.jpg',
|
||||
'image/png': '.png',
|
||||
}
|
||||
|
||||
EXPORT_FORMATS = {
|
||||
'application/vnd.google-apps.document': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
||||
'application/vnd.google-apps.presentation': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
|
||||
'application/vnd.google-apps.spreadsheet': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
|
||||
}
|
||||
|
||||
def __init__(self, session_token: str):
|
||||
self.auth = GoogleDriveAuth()
|
||||
self.session_token = session_token
|
||||
|
||||
token_info = self.auth.get_token_info_from_session(session_token)
|
||||
self.credentials = self.auth.create_credentials_from_token_info(token_info)
|
||||
|
||||
try:
|
||||
self.service = self.auth.build_drive_service(self.credentials)
|
||||
except Exception as e:
|
||||
logging.warning(f"Could not build Google Drive service: {e}")
|
||||
self.service = None
|
||||
|
||||
self.next_page_token = None
|
||||
|
||||
|
||||
|
||||
def _process_file(self, file_metadata: Dict[str, Any], load_content: bool = True) -> Optional[Document]:
|
||||
try:
|
||||
file_id = file_metadata.get('id')
|
||||
file_name = file_metadata.get('name', 'Unknown')
|
||||
mime_type = file_metadata.get('mimeType', 'application/octet-stream')
|
||||
|
||||
if mime_type not in self.SUPPORTED_MIME_TYPES and not mime_type.startswith('application/vnd.google-apps.'):
|
||||
return None
|
||||
if mime_type not in self.SUPPORTED_MIME_TYPES and not mime_type.startswith('application/vnd.google-apps.'):
|
||||
logging.info(f"Skipping unsupported file type: {mime_type} for file {file_name}")
|
||||
return None
|
||||
# Google Drive provides timezone-aware ISO8601 dates
|
||||
doc_metadata = {
|
||||
'file_name': file_name,
|
||||
'mime_type': mime_type,
|
||||
'size': file_metadata.get('size', None),
|
||||
'created_time': file_metadata.get('createdTime'),
|
||||
'modified_time': file_metadata.get('modifiedTime'),
|
||||
'parents': file_metadata.get('parents', []),
|
||||
'source': 'google_drive'
|
||||
}
|
||||
|
||||
if not load_content:
|
||||
return Document(
|
||||
text="",
|
||||
doc_id=file_id,
|
||||
extra_info=doc_metadata
|
||||
)
|
||||
|
||||
content = self._download_file_content(file_id, mime_type)
|
||||
if content is None:
|
||||
logging.warning(f"Could not load content for file {file_name} ({file_id})")
|
||||
return None
|
||||
|
||||
return Document(
|
||||
text=content,
|
||||
doc_id=file_id,
|
||||
extra_info=doc_metadata
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing file: {e}")
|
||||
return None
|
||||
|
||||
def load_data(self, inputs: Dict[str, Any]) -> List[Document]:
|
||||
session_token = inputs.get('session_token')
|
||||
if session_token and session_token != self.session_token:
|
||||
logging.warning("Session token in inputs differs from loader's session token. Using loader's session token.")
|
||||
self.config = inputs
|
||||
|
||||
try:
|
||||
documents: List[Document] = []
|
||||
|
||||
folder_id = inputs.get('folder_id')
|
||||
file_ids = inputs.get('file_ids', [])
|
||||
limit = inputs.get('limit', 100)
|
||||
list_only = inputs.get('list_only', False)
|
||||
load_content = not list_only
|
||||
page_token = inputs.get('page_token')
|
||||
self.next_page_token = None
|
||||
|
||||
if file_ids:
|
||||
# Specific files requested: load them
|
||||
for file_id in file_ids:
|
||||
try:
|
||||
doc = self._load_file_by_id(file_id, load_content=load_content)
|
||||
if doc:
|
||||
documents.append(doc)
|
||||
elif hasattr(self, '_credential_refreshed') and self._credential_refreshed:
|
||||
self._credential_refreshed = False
|
||||
logging.info(f"Retrying load of file {file_id} after credential refresh")
|
||||
doc = self._load_file_by_id(file_id, load_content=load_content)
|
||||
if doc:
|
||||
documents.append(doc)
|
||||
except Exception as e:
|
||||
logging.error(f"Error loading file {file_id}: {e}")
|
||||
continue
|
||||
else:
|
||||
# Browsing mode: list immediate children of provided folder or root
|
||||
parent_id = folder_id if folder_id else 'root'
|
||||
documents = self._list_items_in_parent(parent_id, limit=limit, load_content=load_content, page_token=page_token)
|
||||
|
||||
logging.info(f"Loaded {len(documents)} documents from Google Drive")
|
||||
return documents
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error loading data from Google Drive: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
|
||||
def _load_file_by_id(self, file_id: str, load_content: bool = True) -> Optional[Document]:
|
||||
self._ensure_service()
|
||||
|
||||
try:
|
||||
file_metadata = self.service.files().get(
|
||||
fileId=file_id,
|
||||
fields='id,name,mimeType,size,createdTime,modifiedTime,parents'
|
||||
).execute()
|
||||
|
||||
return self._process_file(file_metadata, load_content=load_content)
|
||||
|
||||
except HttpError as e:
|
||||
logging.error(f"HTTP error loading file {file_id}: {e.resp.status} - {e.content}")
|
||||
|
||||
if e.resp.status in [401, 403]:
|
||||
if hasattr(self.credentials, 'refresh_token') and self.credentials.refresh_token:
|
||||
try:
|
||||
from google.auth.transport.requests import Request
|
||||
self.credentials.refresh(Request())
|
||||
self._ensure_service()
|
||||
return None
|
||||
except Exception as refresh_error:
|
||||
raise ValueError(f"Authentication failed and could not be refreshed: {refresh_error}")
|
||||
else:
|
||||
raise ValueError("Authentication failed and cannot be refreshed: missing refresh_token")
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
logging.error(f"Error loading file {file_id}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _list_items_in_parent(self, parent_id: str, limit: int = 100, load_content: bool = False, page_token: Optional[str] = None) -> List[Document]:
|
||||
self._ensure_service()
|
||||
|
||||
documents: List[Document] = []
|
||||
|
||||
try:
|
||||
query = f"'{parent_id}' in parents and trashed=false"
|
||||
next_token_out: Optional[str] = None
|
||||
|
||||
while True:
|
||||
page_size = 100
|
||||
if limit:
|
||||
remaining = max(0, limit - len(documents))
|
||||
if remaining == 0:
|
||||
break
|
||||
page_size = min(100, remaining)
|
||||
|
||||
results = self.service.files().list(
|
||||
q=query,
|
||||
fields='nextPageToken,files(id,name,mimeType,size,createdTime,modifiedTime,parents)',
|
||||
pageToken=page_token,
|
||||
pageSize=page_size
|
||||
).execute()
|
||||
|
||||
items = results.get('files', [])
|
||||
for item in items:
|
||||
mime_type = item.get('mimeType')
|
||||
if mime_type == 'application/vnd.google-apps.folder':
|
||||
doc_metadata = {
|
||||
'file_name': item.get('name', 'Unknown'),
|
||||
'mime_type': mime_type,
|
||||
'size': item.get('size', None),
|
||||
'created_time': item.get('createdTime'),
|
||||
'modified_time': item.get('modifiedTime'),
|
||||
'parents': item.get('parents', []),
|
||||
'source': 'google_drive',
|
||||
'is_folder': True
|
||||
}
|
||||
documents.append(Document(text="", doc_id=item.get('id'), extra_info=doc_metadata))
|
||||
else:
|
||||
doc = self._process_file(item, load_content=load_content)
|
||||
if doc:
|
||||
documents.append(doc)
|
||||
|
||||
if limit and len(documents) >= limit:
|
||||
self.next_page_token = results.get('nextPageToken')
|
||||
return documents
|
||||
|
||||
page_token = results.get('nextPageToken')
|
||||
next_token_out = page_token
|
||||
if not page_token:
|
||||
break
|
||||
|
||||
self.next_page_token = next_token_out
|
||||
return documents
|
||||
except Exception as e:
|
||||
logging.error(f"Error listing items under parent {parent_id}: {e}")
|
||||
return documents
|
||||
|
||||
|
||||
|
||||
|
||||
def _download_file_content(self, file_id: str, mime_type: str) -> Optional[str]:
|
||||
if not self.credentials.token:
|
||||
logging.warning("No access token in credentials, attempting to refresh")
|
||||
if hasattr(self.credentials, 'refresh_token') and self.credentials.refresh_token:
|
||||
try:
|
||||
from google.auth.transport.requests import Request
|
||||
self.credentials.refresh(Request())
|
||||
logging.info("Credentials refreshed successfully")
|
||||
self._ensure_service()
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to refresh credentials: {e}")
|
||||
raise ValueError("Authentication failed and cannot be refreshed: missing or invalid refresh_token")
|
||||
else:
|
||||
logging.error("No access token and no refresh_token available")
|
||||
raise ValueError("Authentication failed and cannot be refreshed: missing refresh_token")
|
||||
|
||||
if self.credentials.expired:
|
||||
logging.warning("Credentials are expired, attempting to refresh")
|
||||
if hasattr(self.credentials, 'refresh_token') and self.credentials.refresh_token:
|
||||
try:
|
||||
from google.auth.transport.requests import Request
|
||||
self.credentials.refresh(Request())
|
||||
logging.info("Credentials refreshed successfully")
|
||||
self._ensure_service()
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to refresh expired credentials: {e}")
|
||||
raise ValueError("Authentication failed and cannot be refreshed: expired credentials")
|
||||
else:
|
||||
logging.error("Credentials expired and no refresh_token available")
|
||||
raise ValueError("Authentication failed and cannot be refreshed: missing refresh_token")
|
||||
|
||||
try:
|
||||
if mime_type in self.EXPORT_FORMATS:
|
||||
export_mime_type = self.EXPORT_FORMATS[mime_type]
|
||||
request = self.service.files().export_media(
|
||||
fileId=file_id,
|
||||
mimeType=export_mime_type
|
||||
)
|
||||
else:
|
||||
request = self.service.files().get_media(fileId=file_id)
|
||||
|
||||
file_io = io.BytesIO()
|
||||
downloader = MediaIoBaseDownload(file_io, request)
|
||||
|
||||
done = False
|
||||
while done is False:
|
||||
try:
|
||||
_, done = downloader.next_chunk()
|
||||
except HttpError as e:
|
||||
logging.error(f"HTTP error downloading file {file_id}: {e.resp.status} - {e.content}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logging.error(f"Error during download of file {file_id}: {e}")
|
||||
return None
|
||||
|
||||
content_bytes = file_io.getvalue()
|
||||
|
||||
try:
|
||||
content = content_bytes.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
try:
|
||||
content = content_bytes.decode('latin-1')
|
||||
except UnicodeDecodeError:
|
||||
logging.error(f"Could not decode file {file_id} as text")
|
||||
return None
|
||||
|
||||
return content
|
||||
|
||||
except HttpError as e:
|
||||
logging.error(f"HTTP error downloading file {file_id}: {e.resp.status} - {e.content}")
|
||||
|
||||
if e.resp.status in [401, 403]:
|
||||
logging.error(f"Authentication error downloading file {file_id}")
|
||||
|
||||
if hasattr(self.credentials, 'refresh_token') and self.credentials.refresh_token:
|
||||
logging.info(f"Attempting to refresh credentials for file {file_id}")
|
||||
try:
|
||||
from google.auth.transport.requests import Request
|
||||
self.credentials.refresh(Request())
|
||||
logging.info("Credentials refreshed successfully")
|
||||
self._credential_refreshed = True
|
||||
self._ensure_service()
|
||||
return None
|
||||
except Exception as refresh_error:
|
||||
logging.error(f"Error refreshing credentials: {refresh_error}")
|
||||
raise ValueError(f"Authentication failed and could not be refreshed: {refresh_error}")
|
||||
else:
|
||||
logging.error("Cannot refresh credentials: missing refresh_token")
|
||||
raise ValueError("Authentication failed and cannot be refreshed: missing refresh_token")
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
logging.error(f"Error downloading file {file_id}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _download_file_to_directory(self, file_id: str, local_dir: str) -> bool:
|
||||
try:
|
||||
self._ensure_service()
|
||||
return self._download_single_file(file_id, local_dir)
|
||||
except Exception as e:
|
||||
logging.error(f"Error downloading file {file_id}: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
def _ensure_service(self):
|
||||
if not self.service:
|
||||
try:
|
||||
self.service = self.auth.build_drive_service(self.credentials)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Cannot access Google Drive: {e}")
|
||||
|
||||
def _download_single_file(self, file_id: str, local_dir: str) -> bool:
|
||||
file_metadata = self.service.files().get(
|
||||
fileId=file_id,
|
||||
fields='name,mimeType'
|
||||
).execute()
|
||||
|
||||
file_name = file_metadata['name']
|
||||
mime_type = file_metadata['mimeType']
|
||||
|
||||
if mime_type not in self.SUPPORTED_MIME_TYPES and not mime_type.startswith('application/vnd.google-apps.'):
|
||||
return False
|
||||
|
||||
os.makedirs(local_dir, exist_ok=True)
|
||||
full_path = os.path.join(local_dir, file_name)
|
||||
|
||||
if mime_type in self.EXPORT_FORMATS:
|
||||
export_mime_type = self.EXPORT_FORMATS[mime_type]
|
||||
request = self.service.files().export_media(
|
||||
fileId=file_id,
|
||||
mimeType=export_mime_type
|
||||
)
|
||||
extension = self._get_extension_for_mime_type(export_mime_type)
|
||||
if not full_path.endswith(extension):
|
||||
full_path += extension
|
||||
else:
|
||||
request = self.service.files().get_media(fileId=file_id)
|
||||
|
||||
with open(full_path, 'wb') as f:
|
||||
downloader = MediaIoBaseDownload(f, request)
|
||||
done = False
|
||||
while not done:
|
||||
_, done = downloader.next_chunk()
|
||||
|
||||
return True
|
||||
|
||||
def _download_folder_recursive(self, folder_id: str, local_dir: str, recursive: bool = True) -> int:
|
||||
files_downloaded = 0
|
||||
try:
|
||||
os.makedirs(local_dir, exist_ok=True)
|
||||
|
||||
query = f"'{folder_id}' in parents and trashed=false"
|
||||
page_token = None
|
||||
|
||||
while True:
|
||||
results = self.service.files().list(
|
||||
q=query,
|
||||
fields='nextPageToken, files(id, name, mimeType)',
|
||||
pageToken=page_token,
|
||||
pageSize=1000
|
||||
).execute()
|
||||
|
||||
items = results.get('files', [])
|
||||
logging.info(f"Found {len(items)} items in folder {folder_id}")
|
||||
|
||||
for item in items:
|
||||
item_name = item['name']
|
||||
item_id = item['id']
|
||||
mime_type = item['mimeType']
|
||||
|
||||
if mime_type == 'application/vnd.google-apps.folder':
|
||||
if recursive:
|
||||
# Create subfolder and recurse
|
||||
subfolder_path = os.path.join(local_dir, item_name)
|
||||
os.makedirs(subfolder_path, exist_ok=True)
|
||||
subfolder_files = self._download_folder_recursive(
|
||||
item_id,
|
||||
subfolder_path,
|
||||
recursive
|
||||
)
|
||||
files_downloaded += subfolder_files
|
||||
logging.info(f"Downloaded {subfolder_files} files from subfolder {item_name}")
|
||||
else:
|
||||
# Download file
|
||||
success = self._download_single_file(item_id, local_dir)
|
||||
if success:
|
||||
files_downloaded += 1
|
||||
logging.info(f"Downloaded file: {item_name}")
|
||||
else:
|
||||
logging.warning(f"Failed to download file: {item_name}")
|
||||
|
||||
page_token = results.get('nextPageToken')
|
||||
if not page_token:
|
||||
break
|
||||
|
||||
return files_downloaded
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error in _download_folder_recursive for folder {folder_id}: {e}", exc_info=True)
|
||||
return files_downloaded
|
||||
|
||||
def _get_extension_for_mime_type(self, mime_type: str) -> str:
|
||||
extensions = {
|
||||
'application/pdf': '.pdf',
|
||||
'text/plain': '.txt',
|
||||
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': '.docx',
|
||||
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': '.xlsx',
|
||||
'application/vnd.openxmlformats-officedocument.presentationml.presentation': '.pptx',
|
||||
'text/html': '.html',
|
||||
'text/markdown': '.md',
|
||||
}
|
||||
return extensions.get(mime_type, '.bin')
|
||||
|
||||
def _download_folder_contents(self, folder_id: str, local_dir: str, recursive: bool = True) -> int:
|
||||
try:
|
||||
self._ensure_service()
|
||||
return self._download_folder_recursive(folder_id, local_dir, recursive)
|
||||
except Exception as e:
|
||||
logging.error(f"Error downloading folder {folder_id}: {e}", exc_info=True)
|
||||
return 0
|
||||
|
||||
def download_to_directory(self, local_dir: str, source_config: dict = None) -> dict:
|
||||
if source_config is None:
|
||||
source_config = {}
|
||||
|
||||
config = source_config if source_config else getattr(self, 'config', {})
|
||||
files_downloaded = 0
|
||||
|
||||
try:
|
||||
folder_ids = config.get('folder_ids', [])
|
||||
file_ids = config.get('file_ids', [])
|
||||
recursive = config.get('recursive', True)
|
||||
|
||||
self._ensure_service()
|
||||
|
||||
if file_ids:
|
||||
if isinstance(file_ids, str):
|
||||
file_ids = [file_ids]
|
||||
|
||||
for file_id in file_ids:
|
||||
if self._download_file_to_directory(file_id, local_dir):
|
||||
files_downloaded += 1
|
||||
|
||||
# Process folders
|
||||
if folder_ids:
|
||||
if isinstance(folder_ids, str):
|
||||
folder_ids = [folder_ids]
|
||||
|
||||
for folder_id in folder_ids:
|
||||
try:
|
||||
folder_metadata = self.service.files().get(
|
||||
fileId=folder_id,
|
||||
fields='name'
|
||||
).execute()
|
||||
folder_name = folder_metadata.get('name', '')
|
||||
folder_path = os.path.join(local_dir, folder_name)
|
||||
os.makedirs(folder_path, exist_ok=True)
|
||||
|
||||
folder_files = self._download_folder_recursive(
|
||||
folder_id,
|
||||
folder_path,
|
||||
recursive
|
||||
)
|
||||
files_downloaded += folder_files
|
||||
logging.info(f"Downloaded {folder_files} files from folder {folder_name}")
|
||||
except Exception as e:
|
||||
logging.error(f"Error downloading folder {folder_id}: {e}", exc_info=True)
|
||||
|
||||
if not file_ids and not folder_ids:
|
||||
raise ValueError("No folder_ids or file_ids provided for download")
|
||||
|
||||
return {
|
||||
"files_downloaded": files_downloaded,
|
||||
"directory_path": local_dir,
|
||||
"empty_result": files_downloaded == 0,
|
||||
"source_type": "google_drive",
|
||||
"config_used": config
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"files_downloaded": files_downloaded,
|
||||
"directory_path": local_dir,
|
||||
"empty_result": True,
|
||||
"source_type": "google_drive",
|
||||
"config_used": config,
|
||||
"error": str(e)
|
||||
}
|
||||
@@ -6,6 +6,21 @@ from application.core.settings import settings
|
||||
from application.vectorstore.vector_creator import VectorCreator
|
||||
|
||||
|
||||
def sanitize_content(content: str) -> str:
|
||||
"""
|
||||
Remove NUL characters that can cause vector store ingestion to fail.
|
||||
|
||||
Args:
|
||||
content (str): Raw content that may contain NUL characters
|
||||
|
||||
Returns:
|
||||
str: Sanitized content with NUL characters removed
|
||||
"""
|
||||
if not content:
|
||||
return content
|
||||
return content.replace('\x00', '')
|
||||
|
||||
|
||||
@retry(tries=10, delay=60)
|
||||
def add_text_to_store_with_retry(store, doc, source_id):
|
||||
"""
|
||||
@@ -16,6 +31,9 @@ def add_text_to_store_with_retry(store, doc, source_id):
|
||||
source_id: Unique identifier for the source.
|
||||
"""
|
||||
try:
|
||||
# Sanitize content to remove NUL characters that cause ingestion failures
|
||||
doc.page_content = sanitize_content(doc.page_content)
|
||||
|
||||
doc.metadata["source_id"] = str(source_id)
|
||||
store.add_texts([doc.page_content], metadatas=[doc.metadata])
|
||||
except Exception as e:
|
||||
@@ -46,7 +64,7 @@ def embed_and_store_documents(docs, folder_name, source_id, task_status):
|
||||
store = VectorCreator.create_vectorstore(
|
||||
settings.VECTOR_STORE,
|
||||
docs_init=docs_init,
|
||||
source_id=folder_name,
|
||||
source_id=source_id,
|
||||
embeddings_key=os.getenv("EMBEDDINGS_KEY"),
|
||||
)
|
||||
else:
|
||||
|
||||
@@ -15,6 +15,7 @@ from application.parser.file.json_parser import JSONParser
|
||||
from application.parser.file.pptx_parser import PPTXParser
|
||||
from application.parser.file.image_parser import ImageParser
|
||||
from application.parser.schema.base import Document
|
||||
from application.utils import num_tokens_from_string
|
||||
|
||||
DEFAULT_FILE_EXTRACTOR: Dict[str, BaseParser] = {
|
||||
".pdf": PDFParser(),
|
||||
@@ -141,11 +142,12 @@ class SimpleDirectoryReader(BaseReader):
|
||||
|
||||
Returns:
|
||||
List[Document]: A list of documents.
|
||||
|
||||
"""
|
||||
data: Union[str, List[str]] = ""
|
||||
data_list: List[str] = []
|
||||
metadata_list = []
|
||||
self.file_token_counts = {}
|
||||
|
||||
for input_file in self.input_files:
|
||||
if input_file.suffix in self.file_extractor:
|
||||
parser = self.file_extractor[input_file.suffix]
|
||||
@@ -156,24 +158,48 @@ class SimpleDirectoryReader(BaseReader):
|
||||
# do standard read
|
||||
with open(input_file, "r", errors=self.errors) as f:
|
||||
data = f.read()
|
||||
# Prepare metadata for this file
|
||||
if self.file_metadata is not None:
|
||||
file_metadata = self.file_metadata(input_file.name)
|
||||
|
||||
# Calculate token count for this file
|
||||
if isinstance(data, List):
|
||||
file_tokens = sum(num_tokens_from_string(str(d)) for d in data)
|
||||
else:
|
||||
# Provide a default empty metadata
|
||||
file_metadata = {'title': '', 'store': ''}
|
||||
# TODO: Find a case with no metadata and check if breaks anything
|
||||
file_tokens = num_tokens_from_string(str(data))
|
||||
|
||||
full_path = str(input_file.resolve())
|
||||
self.file_token_counts[full_path] = file_tokens
|
||||
|
||||
base_metadata = {
|
||||
'title': input_file.name,
|
||||
'token_count': file_tokens,
|
||||
}
|
||||
|
||||
if hasattr(self, 'input_dir'):
|
||||
try:
|
||||
relative_path = str(input_file.relative_to(self.input_dir))
|
||||
base_metadata['source'] = relative_path
|
||||
except ValueError:
|
||||
base_metadata['source'] = str(input_file)
|
||||
else:
|
||||
base_metadata['source'] = str(input_file)
|
||||
|
||||
if self.file_metadata is not None:
|
||||
custom_metadata = self.file_metadata(input_file.name)
|
||||
base_metadata.update(custom_metadata)
|
||||
|
||||
if isinstance(data, List):
|
||||
# Extend data_list with each item in the data list
|
||||
data_list.extend([str(d) for d in data])
|
||||
# For each item in the data list, add the file's metadata to metadata_list
|
||||
metadata_list.extend([file_metadata for _ in data])
|
||||
metadata_list.extend([base_metadata for _ in data])
|
||||
else:
|
||||
# Add the single piece of data to data_list
|
||||
data_list.append(str(data))
|
||||
# Add the file's metadata to metadata_list
|
||||
metadata_list.append(file_metadata)
|
||||
metadata_list.append(base_metadata)
|
||||
|
||||
# Build directory structure if input_dir is provided
|
||||
if hasattr(self, 'input_dir'):
|
||||
self.directory_structure = self.build_directory_structure(self.input_dir)
|
||||
logging.info("Directory structure built successfully")
|
||||
else:
|
||||
self.directory_structure = {}
|
||||
|
||||
if concatenate:
|
||||
return [Document("\n".join(data_list))]
|
||||
@@ -181,3 +207,48 @@ class SimpleDirectoryReader(BaseReader):
|
||||
return [Document(d, extra_info=m) for d, m in zip(data_list, metadata_list)]
|
||||
else:
|
||||
return [Document(d) for d in data_list]
|
||||
|
||||
def build_directory_structure(self, base_path):
|
||||
"""Build a dictionary representing the directory structure.
|
||||
|
||||
Args:
|
||||
base_path: The base path to start building the structure from.
|
||||
|
||||
Returns:
|
||||
dict: A nested dictionary representing the directory structure.
|
||||
"""
|
||||
import mimetypes
|
||||
|
||||
def build_tree(path):
|
||||
"""Helper function to recursively build the directory tree."""
|
||||
result = {}
|
||||
|
||||
for item in path.iterdir():
|
||||
if self.exclude_hidden and item.name.startswith('.'):
|
||||
continue
|
||||
|
||||
if item.is_dir():
|
||||
subtree = build_tree(item)
|
||||
if subtree:
|
||||
result[item.name] = subtree
|
||||
else:
|
||||
if self.required_exts is not None and item.suffix not in self.required_exts:
|
||||
continue
|
||||
|
||||
full_path = str(item.resolve())
|
||||
file_size_bytes = item.stat().st_size
|
||||
mime_type = mimetypes.guess_type(item.name)[0] or "application/octet-stream"
|
||||
|
||||
file_info = {
|
||||
"type": mime_type,
|
||||
"size_bytes": file_size_bytes
|
||||
}
|
||||
|
||||
if hasattr(self, 'file_token_counts') and full_path in self.file_token_counts:
|
||||
file_info["token_count"] = self.file_token_counts[full_path]
|
||||
|
||||
result[item.name] = file_info
|
||||
|
||||
return result
|
||||
|
||||
return build_tree(Path(base_path))
|
||||
@@ -8,6 +8,7 @@ import requests
|
||||
from typing import Dict, Union
|
||||
|
||||
from application.parser.file.base_parser import BaseParser
|
||||
from application.core.settings import settings
|
||||
|
||||
|
||||
class ImageParser(BaseParser):
|
||||
@@ -18,10 +19,13 @@ class ImageParser(BaseParser):
|
||||
return {}
|
||||
|
||||
def parse_file(self, file: Path, errors: str = "ignore") -> Union[str, list[str]]:
|
||||
doc2md_service = "https://llm.arc53.com/doc2md"
|
||||
# alternatively you can use local vision capable LLM
|
||||
with open(file, "rb") as file_loaded:
|
||||
files = {'file': file_loaded}
|
||||
response = requests.post(doc2md_service, files=files)
|
||||
data = response.json()["markdown"]
|
||||
if settings.PARSE_IMAGE_REMOTE:
|
||||
doc2md_service = "https://llm.arc53.com/doc2md"
|
||||
# alternatively you can use local vision capable LLM
|
||||
with open(file, "rb") as file_loaded:
|
||||
files = {'file': file_loaded}
|
||||
response = requests.post(doc2md_service, files=files)
|
||||
data = response.json()["markdown"]
|
||||
else:
|
||||
data = ""
|
||||
return data
|
||||
|
||||
@@ -6,6 +6,16 @@ from application.parser.remote.github_loader import GitHubLoader
|
||||
|
||||
|
||||
class RemoteCreator:
|
||||
"""
|
||||
Factory class for creating remote content loaders.
|
||||
|
||||
These loaders fetch content from remote web sources like URLs,
|
||||
sitemaps, web crawlers, social media platforms, etc.
|
||||
|
||||
For external knowledge base connectors (like Google Drive),
|
||||
use ConnectorCreator instead.
|
||||
"""
|
||||
|
||||
loaders = {
|
||||
"url": WebLoader,
|
||||
"sitemap": SitemapLoader,
|
||||
@@ -18,5 +28,5 @@ class RemoteCreator:
|
||||
def create_loader(cls, type, *args, **kwargs):
|
||||
loader_class = cls.loaders.get(type.lower())
|
||||
if not loader_class:
|
||||
raise ValueError(f"No LLM class found for type {type}")
|
||||
raise ValueError(f"No loader class found for type {type}")
|
||||
return loader_class(*args, **kwargs)
|
||||
|
||||
@@ -13,6 +13,9 @@ Flask==3.1.1
|
||||
faiss-cpu==1.9.0.post1
|
||||
flask-restx==1.3.0
|
||||
google-genai==1.3.0
|
||||
google-api-python-client==2.179.0
|
||||
google-auth-httplib2==0.2.0
|
||||
google-auth-oauthlib==1.2.2
|
||||
gTTS==2.5.4
|
||||
gunicorn==23.0.0
|
||||
javalang==0.13.0
|
||||
|
||||
0
application/storage/__init__.py
Normal file
0
application/storage/__init__.py
Normal file
@@ -93,3 +93,32 @@ class BaseStorage(ABC):
|
||||
List[str]: List of file paths
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def is_directory(self, path: str) -> bool:
|
||||
"""
|
||||
Check if a path is a directory.
|
||||
|
||||
Args:
|
||||
path: Path to check
|
||||
|
||||
Returns:
|
||||
bool: True if the path is a directory
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def remove_directory(self, directory: str) -> bool:
|
||||
"""
|
||||
Remove a directory and all its contents.
|
||||
|
||||
For local storage, this removes the directory and all files/subdirectories within it.
|
||||
For S3 storage, this removes all objects with the directory path as a prefix.
|
||||
|
||||
Args:
|
||||
directory: Directory path to remove
|
||||
|
||||
Returns:
|
||||
bool: True if removal was successful, False otherwise
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -101,3 +101,40 @@ class LocalStorage(BaseStorage):
|
||||
raise FileNotFoundError(f"File not found: {full_path}")
|
||||
|
||||
return processor_func(local_path=full_path, **kwargs)
|
||||
|
||||
def is_directory(self, path: str) -> bool:
|
||||
"""
|
||||
Check if a path is a directory in local storage.
|
||||
|
||||
Args:
|
||||
path: Path to check
|
||||
|
||||
Returns:
|
||||
bool: True if the path is a directory, False otherwise
|
||||
"""
|
||||
full_path = self._get_full_path(path)
|
||||
return os.path.isdir(full_path)
|
||||
|
||||
def remove_directory(self, directory: str) -> bool:
|
||||
"""
|
||||
Remove a directory and all its contents from local storage.
|
||||
|
||||
Args:
|
||||
directory: Directory path to remove
|
||||
|
||||
Returns:
|
||||
bool: True if removal was successful, False otherwise
|
||||
"""
|
||||
full_path = self._get_full_path(directory)
|
||||
|
||||
if not os.path.exists(full_path):
|
||||
return False
|
||||
|
||||
if not os.path.isdir(full_path):
|
||||
return False
|
||||
|
||||
try:
|
||||
shutil.rmtree(full_path)
|
||||
return True
|
||||
except (OSError, PermissionError):
|
||||
return False
|
||||
|
||||
@@ -130,3 +130,77 @@ class S3Storage(BaseStorage):
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing S3 file {path}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def is_directory(self, path: str) -> bool:
|
||||
"""
|
||||
Check if a path is a directory in S3 storage.
|
||||
|
||||
In S3, directories are virtual concepts. A path is considered a directory
|
||||
if there are objects with the path as a prefix.
|
||||
|
||||
Args:
|
||||
path: Path to check
|
||||
|
||||
Returns:
|
||||
bool: True if the path is a directory, False otherwise
|
||||
"""
|
||||
# Ensure path ends with a slash if not empty
|
||||
if path and not path.endswith('/'):
|
||||
path += '/'
|
||||
|
||||
response = self.s3.list_objects_v2(
|
||||
Bucket=self.bucket_name,
|
||||
Prefix=path,
|
||||
MaxKeys=1
|
||||
)
|
||||
|
||||
return 'Contents' in response
|
||||
|
||||
def remove_directory(self, directory: str) -> bool:
|
||||
"""
|
||||
Remove a directory and all its contents from S3 storage.
|
||||
|
||||
In S3, this removes all objects with the directory path as a prefix.
|
||||
Since S3 doesn't have actual directories, this effectively removes
|
||||
all files within the virtual directory structure.
|
||||
|
||||
Args:
|
||||
directory: Directory path to remove
|
||||
|
||||
Returns:
|
||||
bool: True if removal was successful, False otherwise
|
||||
"""
|
||||
# Ensure directory ends with a slash if not empty
|
||||
if directory and not directory.endswith('/'):
|
||||
directory += '/'
|
||||
|
||||
try:
|
||||
# Get all objects with the directory prefix
|
||||
objects_to_delete = []
|
||||
paginator = self.s3.get_paginator('list_objects_v2')
|
||||
pages = paginator.paginate(Bucket=self.bucket_name, Prefix=directory)
|
||||
|
||||
for page in pages:
|
||||
if 'Contents' in page:
|
||||
for obj in page['Contents']:
|
||||
objects_to_delete.append({'Key': obj['Key']})
|
||||
|
||||
if not objects_to_delete:
|
||||
return False
|
||||
|
||||
batch_size = 1000
|
||||
for i in range(0, len(objects_to_delete), batch_size):
|
||||
batch = objects_to_delete[i:i + batch_size]
|
||||
|
||||
response = self.s3.delete_objects(
|
||||
Bucket=self.bucket_name,
|
||||
Delete={'Objects': batch}
|
||||
)
|
||||
|
||||
if 'Errors' in response and response['Errors']:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
except ClientError:
|
||||
return False
|
||||
|
||||
@@ -6,6 +6,7 @@ import uuid
|
||||
import tiktoken
|
||||
from flask import jsonify, make_response
|
||||
from werkzeug.utils import secure_filename
|
||||
|
||||
from application.core.settings import settings
|
||||
|
||||
|
||||
@@ -19,6 +20,17 @@ def get_encoding():
|
||||
return _encoding
|
||||
|
||||
|
||||
def get_gpt_model() -> str:
|
||||
"""Get the appropriate GPT model based on provider"""
|
||||
model_map = {
|
||||
"openai": "gpt-4o-mini",
|
||||
"anthropic": "claude-2",
|
||||
"groq": "llama3-8b-8192",
|
||||
"novita": "deepseek/deepseek-r1",
|
||||
}
|
||||
return settings.LLM_NAME or model_map.get(settings.LLM_PROVIDER, "")
|
||||
|
||||
|
||||
def safe_filename(filename):
|
||||
"""
|
||||
Creates a safe filename that preserves the original extension.
|
||||
@@ -32,15 +44,14 @@ def safe_filename(filename):
|
||||
"""
|
||||
if not filename:
|
||||
return str(uuid.uuid4())
|
||||
|
||||
_, extension = os.path.splitext(filename)
|
||||
|
||||
safe_name = secure_filename(filename)
|
||||
|
||||
# If secure_filename returns just the extension or an empty string
|
||||
|
||||
if not safe_name or safe_name == extension.lstrip("."):
|
||||
return f"{str(uuid.uuid4())}{extension}"
|
||||
|
||||
return safe_name
|
||||
|
||||
|
||||
@@ -68,7 +79,6 @@ def count_tokens_docs(docs):
|
||||
docs_content = ""
|
||||
for doc in docs:
|
||||
docs_content += doc.page_content
|
||||
|
||||
tokens = num_tokens_from_string(docs_content)
|
||||
return tokens
|
||||
|
||||
@@ -97,13 +107,11 @@ def validate_required_fields(data, required_fields):
|
||||
missing_fields.append(field)
|
||||
elif not data[field]:
|
||||
empty_fields.append(field)
|
||||
|
||||
errors = []
|
||||
if missing_fields:
|
||||
errors.append(f"Missing required fields: {', '.join(missing_fields)}")
|
||||
if empty_fields:
|
||||
errors.append(f"Empty values in required fields: {', '.join(empty_fields)}")
|
||||
|
||||
if errors:
|
||||
return make_response(
|
||||
jsonify({"success": False, "message": " | ".join(errors)}), 400
|
||||
@@ -132,7 +140,6 @@ def limit_chat_history(history, max_token_limit=None, gpt_model="docsgpt"):
|
||||
|
||||
if not history:
|
||||
return []
|
||||
|
||||
trimmed_history = []
|
||||
tokens_current_history = 0
|
||||
|
||||
@@ -141,18 +148,15 @@ def limit_chat_history(history, max_token_limit=None, gpt_model="docsgpt"):
|
||||
if "prompt" in message and "response" in message:
|
||||
tokens_batch += num_tokens_from_string(message["prompt"])
|
||||
tokens_batch += num_tokens_from_string(message["response"])
|
||||
|
||||
if "tool_calls" in message:
|
||||
for tool_call in message["tool_calls"]:
|
||||
tool_call_string = f"Tool: {tool_call.get('tool_name')} | Action: {tool_call.get('action_name')} | Args: {tool_call.get('arguments')} | Response: {tool_call.get('result')}"
|
||||
tokens_batch += num_tokens_from_string(tool_call_string)
|
||||
|
||||
if tokens_current_history + tokens_batch < max_token_limit:
|
||||
tokens_current_history += tokens_batch
|
||||
trimmed_history.insert(0, message)
|
||||
else:
|
||||
break
|
||||
|
||||
return trimmed_history
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
import tempfile
|
||||
import io
|
||||
|
||||
from langchain_community.vectorstores import FAISS
|
||||
|
||||
@@ -66,8 +67,37 @@ class FaissStore(BaseVectorStore):
|
||||
def add_texts(self, *args, **kwargs):
|
||||
return self.docsearch.add_texts(*args, **kwargs)
|
||||
|
||||
def save_local(self, *args, **kwargs):
|
||||
return self.docsearch.save_local(*args, **kwargs)
|
||||
def _save_to_storage(self):
|
||||
"""
|
||||
Save the FAISS index to storage using temporary directory pattern.
|
||||
Works consistently for both local and S3 storage.
|
||||
"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
self.docsearch.save_local(temp_dir)
|
||||
|
||||
faiss_path = os.path.join(temp_dir, "index.faiss")
|
||||
pkl_path = os.path.join(temp_dir, "index.pkl")
|
||||
|
||||
with open(faiss_path, "rb") as f_faiss:
|
||||
faiss_data = f_faiss.read()
|
||||
|
||||
with open(pkl_path, "rb") as f_pkl:
|
||||
pkl_data = f_pkl.read()
|
||||
|
||||
storage_path = get_vectorstore(self.source_id)
|
||||
self.storage.save_file(io.BytesIO(faiss_data), f"{storage_path}/index.faiss")
|
||||
self.storage.save_file(io.BytesIO(pkl_data), f"{storage_path}/index.pkl")
|
||||
|
||||
return True
|
||||
|
||||
def save_local(self, path=None):
|
||||
if path:
|
||||
os.makedirs(path, exist_ok=True)
|
||||
self.docsearch.save_local(path)
|
||||
|
||||
self._save_to_storage()
|
||||
|
||||
return True
|
||||
|
||||
def delete_index(self, *args, **kwargs):
|
||||
return self.docsearch.delete(*args, **kwargs)
|
||||
@@ -103,13 +133,17 @@ class FaissStore(BaseVectorStore):
|
||||
return chunks
|
||||
|
||||
def add_chunk(self, text, metadata=None):
|
||||
"""Add a new chunk and save to storage."""
|
||||
metadata = metadata or {}
|
||||
doc = Document(text=text, extra_info=metadata).to_langchain_format()
|
||||
doc_id = self.docsearch.add_documents([doc])
|
||||
self.save_local(self.path)
|
||||
self._save_to_storage()
|
||||
return doc_id
|
||||
|
||||
|
||||
|
||||
def delete_chunk(self, chunk_id):
|
||||
"""Delete a chunk and save to storage."""
|
||||
self.delete_index([chunk_id])
|
||||
self.save_local(self.path)
|
||||
self._save_to_storage()
|
||||
return True
|
||||
|
||||
303
application/vectorstore/pgvector.py
Normal file
303
application/vectorstore/pgvector.py
Normal file
@@ -0,0 +1,303 @@
|
||||
import logging
|
||||
from typing import List, Optional, Any, Dict
|
||||
from application.core.settings import settings
|
||||
from application.vectorstore.base import BaseVectorStore
|
||||
from application.vectorstore.document_class import Document
|
||||
|
||||
|
||||
class PGVectorStore(BaseVectorStore):
|
||||
def __init__(
|
||||
self,
|
||||
source_id: str = "",
|
||||
embeddings_key: str = "embeddings",
|
||||
table_name: str = "documents",
|
||||
vector_column: str = "embedding",
|
||||
text_column: str = "text",
|
||||
metadata_column: str = "metadata",
|
||||
connection_string: str = None,
|
||||
):
|
||||
super().__init__()
|
||||
# Store the source_id for use in add_chunk
|
||||
self._source_id = str(source_id).replace("application/indexes/", "").rstrip("/")
|
||||
self._embeddings_key = embeddings_key
|
||||
self._table_name = table_name
|
||||
self._vector_column = vector_column
|
||||
self._text_column = text_column
|
||||
self._metadata_column = metadata_column
|
||||
self._embedding = self._get_embeddings(settings.EMBEDDINGS_NAME, embeddings_key)
|
||||
|
||||
# Use provided connection string or fall back to settings
|
||||
self._connection_string = connection_string or getattr(settings, 'PGVECTOR_CONNECTION_STRING', None)
|
||||
|
||||
if not self._connection_string:
|
||||
raise ValueError(
|
||||
"PostgreSQL connection string is required. "
|
||||
"Set PGVECTOR_CONNECTION_STRING in settings or pass connection_string parameter."
|
||||
)
|
||||
|
||||
try:
|
||||
import psycopg2
|
||||
from psycopg2.extras import Json
|
||||
import pgvector.psycopg2
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Could not import required packages. "
|
||||
"Please install with `pip install psycopg2-binary pgvector`."
|
||||
)
|
||||
|
||||
self._psycopg2 = psycopg2
|
||||
self._Json = Json
|
||||
self._pgvector = pgvector.psycopg2
|
||||
self._connection = None
|
||||
self._ensure_table_exists()
|
||||
|
||||
def _get_connection(self):
|
||||
"""Get or create database connection"""
|
||||
if self._connection is None or self._connection.closed:
|
||||
self._connection = self._psycopg2.connect(self._connection_string)
|
||||
# Register pgvector types
|
||||
self._pgvector.register_vector(self._connection)
|
||||
return self._connection
|
||||
|
||||
def _ensure_table_exists(self):
|
||||
"""Create table and enable pgvector extension if they don't exist"""
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
# Enable pgvector extension
|
||||
cursor.execute("CREATE EXTENSION IF NOT EXISTS vector;")
|
||||
|
||||
# Get embedding dimension
|
||||
embedding_dim = getattr(self._embedding, 'dimension', 1536) # Default to OpenAI dimension
|
||||
|
||||
# Create table with vector column
|
||||
create_table_query = f"""
|
||||
CREATE TABLE IF NOT EXISTS {self._table_name} (
|
||||
id SERIAL PRIMARY KEY,
|
||||
{self._text_column} TEXT NOT NULL,
|
||||
{self._vector_column} vector({embedding_dim}),
|
||||
{self._metadata_column} JSONB,
|
||||
source_id TEXT NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
"""
|
||||
cursor.execute(create_table_query)
|
||||
|
||||
# Create index for vector similarity search
|
||||
index_query = f"""
|
||||
CREATE INDEX IF NOT EXISTS {self._table_name}_{self._vector_column}_idx
|
||||
ON {self._table_name} USING ivfflat ({self._vector_column} vector_cosine_ops)
|
||||
WITH (lists = 100);
|
||||
"""
|
||||
cursor.execute(index_query)
|
||||
|
||||
# Create index for source_id filtering
|
||||
source_index_query = f"""
|
||||
CREATE INDEX IF NOT EXISTS {self._table_name}_source_id_idx
|
||||
ON {self._table_name} (source_id);
|
||||
"""
|
||||
cursor.execute(source_index_query)
|
||||
|
||||
conn.commit()
|
||||
except Exception as e:
|
||||
conn.rollback()
|
||||
logging.error(f"Error creating table: {e}")
|
||||
raise
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def search(self, question: str, k: int = 2, *args, **kwargs) -> List[Document]:
|
||||
"""Search for similar documents using vector similarity"""
|
||||
query_vector = self._embedding.embed_query(question)
|
||||
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
# Use cosine distance for similarity search with proper vector formatting
|
||||
search_query = f"""
|
||||
SELECT {self._text_column}, {self._metadata_column},
|
||||
({self._vector_column} <=> %s::vector) as distance
|
||||
FROM {self._table_name}
|
||||
WHERE source_id = %s
|
||||
ORDER BY {self._vector_column} <=> %s::vector
|
||||
LIMIT %s;
|
||||
"""
|
||||
|
||||
cursor.execute(search_query, (query_vector, self._source_id, query_vector, k))
|
||||
results = cursor.fetchall()
|
||||
|
||||
|
||||
documents = []
|
||||
for text, metadata, distance in results:
|
||||
metadata = metadata or {}
|
||||
documents.append(Document(page_content=text, metadata=metadata))
|
||||
|
||||
return documents
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error searching documents: {e}", exc_info=True)
|
||||
return []
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def add_texts(
|
||||
self,
|
||||
texts: List[str],
|
||||
metadatas: Optional[List[Dict[str, Any]]] = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> List[str]:
|
||||
"""Add texts with their embeddings to the vector store"""
|
||||
if not texts:
|
||||
return []
|
||||
|
||||
embeddings = self._embedding.embed_documents(texts)
|
||||
metadatas = metadatas or [{}] * len(texts)
|
||||
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
insert_query = f"""
|
||||
INSERT INTO {self._table_name} ({self._text_column}, {self._vector_column}, {self._metadata_column}, source_id)
|
||||
VALUES (%s, %s, %s, %s)
|
||||
RETURNING id;
|
||||
"""
|
||||
|
||||
inserted_ids = []
|
||||
for text, embedding, metadata in zip(texts, embeddings, metadatas):
|
||||
cursor.execute(
|
||||
insert_query,
|
||||
(text, embedding, self._Json(metadata), self._source_id)
|
||||
)
|
||||
inserted_id = cursor.fetchone()[0]
|
||||
inserted_ids.append(str(inserted_id))
|
||||
|
||||
conn.commit()
|
||||
return inserted_ids
|
||||
|
||||
except Exception as e:
|
||||
conn.rollback()
|
||||
logging.error(f"Error adding texts: {e}")
|
||||
raise
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def delete_index(self, *args, **kwargs):
|
||||
"""Delete all documents for this source_id"""
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
delete_query = f"DELETE FROM {self._table_name} WHERE source_id = %s;"
|
||||
cursor.execute(delete_query, (self._source_id,))
|
||||
conn.commit()
|
||||
|
||||
except Exception as e:
|
||||
conn.rollback()
|
||||
logging.error(f"Error deleting index: {e}")
|
||||
raise
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def save_local(self, *args, **kwargs):
|
||||
"""No-op for PostgreSQL - data is already persisted"""
|
||||
pass
|
||||
|
||||
def get_chunks(self) -> List[Dict[str, Any]]:
|
||||
"""Get all chunks for this source_id"""
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
select_query = f"""
|
||||
SELECT id, {self._text_column}, {self._metadata_column}
|
||||
FROM {self._table_name}
|
||||
WHERE source_id = %s;
|
||||
"""
|
||||
cursor.execute(select_query, (self._source_id,))
|
||||
results = cursor.fetchall()
|
||||
|
||||
chunks = []
|
||||
for doc_id, text, metadata in results:
|
||||
chunks.append({
|
||||
"doc_id": str(doc_id),
|
||||
"text": text,
|
||||
"metadata": metadata or {}
|
||||
})
|
||||
|
||||
return chunks
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error getting chunks: {e}")
|
||||
return []
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def add_chunk(self, text: str, metadata: Optional[Dict[str, Any]] = None) -> str:
|
||||
"""Add a single chunk to the vector store"""
|
||||
metadata = metadata or {}
|
||||
|
||||
# Create a copy to avoid modifying the original metadata
|
||||
final_metadata = metadata.copy()
|
||||
|
||||
# Ensure the source_id is in the metadata so the chunk can be found by filters
|
||||
final_metadata["source_id"] = self._source_id
|
||||
|
||||
embeddings = self._embedding.embed_documents([text])
|
||||
|
||||
if not embeddings:
|
||||
raise ValueError("Could not generate embedding for chunk")
|
||||
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
insert_query = f"""
|
||||
INSERT INTO {self._table_name} ({self._text_column}, {self._vector_column}, {self._metadata_column}, source_id)
|
||||
VALUES (%s, %s, %s, %s)
|
||||
RETURNING id;
|
||||
"""
|
||||
|
||||
cursor.execute(
|
||||
insert_query,
|
||||
(text, embeddings[0], self._Json(final_metadata), self._source_id)
|
||||
)
|
||||
inserted_id = cursor.fetchone()[0]
|
||||
conn.commit()
|
||||
|
||||
return str(inserted_id)
|
||||
|
||||
except Exception as e:
|
||||
conn.rollback()
|
||||
logging.error(f"Error adding chunk: {e}")
|
||||
raise
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def delete_chunk(self, chunk_id: str) -> bool:
|
||||
"""Delete a specific chunk by its ID"""
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
delete_query = f"DELETE FROM {self._table_name} WHERE id = %s AND source_id = %s;"
|
||||
cursor.execute(delete_query, (int(chunk_id), self._source_id))
|
||||
deleted_count = cursor.rowcount
|
||||
conn.commit()
|
||||
|
||||
return deleted_count > 0
|
||||
|
||||
except Exception as e:
|
||||
conn.rollback()
|
||||
logging.error(f"Error deleting chunk: {e}")
|
||||
return False
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def __del__(self):
|
||||
"""Close database connection when object is destroyed"""
|
||||
if hasattr(self, '_connection') and self._connection and not self._connection.closed:
|
||||
self._connection.close()
|
||||
@@ -1,5 +1,7 @@
|
||||
import logging
|
||||
from application.vectorstore.base import BaseVectorStore
|
||||
from application.core.settings import settings
|
||||
from application.vectorstore.document_class import Document
|
||||
|
||||
|
||||
class QdrantStore(BaseVectorStore):
|
||||
@@ -7,18 +9,22 @@ class QdrantStore(BaseVectorStore):
|
||||
from qdrant_client import models
|
||||
from langchain_community.vectorstores.qdrant import Qdrant
|
||||
|
||||
# Store the source_id for use in add_chunk
|
||||
self._source_id = str(source_id).replace("application/indexes/", "").rstrip("/")
|
||||
|
||||
self._filter = models.Filter(
|
||||
must=[
|
||||
models.FieldCondition(
|
||||
key="metadata.source_id",
|
||||
match=models.MatchValue(value=source_id.replace("application/indexes/", "").rstrip("/")),
|
||||
match=models.MatchValue(value=self._source_id),
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
embedding=self._get_embeddings(settings.EMBEDDINGS_NAME, embeddings_key)
|
||||
self._docsearch = Qdrant.construct_instance(
|
||||
["TEXT_TO_OBTAIN_EMBEDDINGS_DIMENSION"],
|
||||
embedding=self._get_embeddings(settings.EMBEDDINGS_NAME, embeddings_key),
|
||||
embedding=embedding,
|
||||
collection_name=settings.QDRANT_COLLECTION_NAME,
|
||||
location=settings.QDRANT_LOCATION,
|
||||
url=settings.QDRANT_URL,
|
||||
@@ -32,6 +38,32 @@ class QdrantStore(BaseVectorStore):
|
||||
path=settings.QDRANT_PATH,
|
||||
distance_func=settings.QDRANT_DISTANCE_FUNC,
|
||||
)
|
||||
try:
|
||||
collections = self._docsearch.client.get_collections()
|
||||
collection_exists = settings.QDRANT_COLLECTION_NAME in [
|
||||
collection.name for collection in collections.collections
|
||||
]
|
||||
|
||||
if not collection_exists:
|
||||
self._docsearch.client.recreate_collection(
|
||||
collection_name=settings.QDRANT_COLLECTION_NAME,
|
||||
vectors_config=models.VectorParams(size=embedding.client[1].word_embedding_dimension, distance=models.Distance.COSINE),
|
||||
)
|
||||
|
||||
# Ensure the required index exists for metadata.source_id
|
||||
try:
|
||||
self._docsearch.client.create_payload_index(
|
||||
collection_name=settings.QDRANT_COLLECTION_NAME,
|
||||
field_name="metadata.source_id",
|
||||
field_schema=models.PayloadSchemaType.KEYWORD,
|
||||
)
|
||||
except Exception as index_error:
|
||||
# Index might already exist, which is fine
|
||||
if "already exists" not in str(index_error).lower():
|
||||
logging.warning(f"Could not create index for metadata.source_id: {index_error}")
|
||||
|
||||
except Exception as e:
|
||||
logging.warning(f"Could not check for collection: {e}")
|
||||
|
||||
def search(self, *args, **kwargs):
|
||||
return self._docsearch.similarity_search(filter=self._filter, *args, **kwargs)
|
||||
@@ -46,3 +78,59 @@ class QdrantStore(BaseVectorStore):
|
||||
return self._docsearch.client.delete(
|
||||
collection_name=settings.QDRANT_COLLECTION_NAME, points_selector=self._filter
|
||||
)
|
||||
|
||||
def get_chunks(self):
|
||||
try:
|
||||
|
||||
chunks = []
|
||||
offset = None
|
||||
while True:
|
||||
records, offset = self._docsearch.client.scroll(
|
||||
collection_name=settings.QDRANT_COLLECTION_NAME,
|
||||
scroll_filter=self._filter,
|
||||
limit=10,
|
||||
with_payload=True,
|
||||
with_vectors=False,
|
||||
offset=offset,
|
||||
)
|
||||
for record in records:
|
||||
doc_id = record.id
|
||||
text = record.payload.get("page_content")
|
||||
metadata = record.payload.get("metadata")
|
||||
chunks.append(
|
||||
{"doc_id": doc_id, "text": text, "metadata": metadata}
|
||||
)
|
||||
if offset is None:
|
||||
break
|
||||
return chunks
|
||||
except Exception as e:
|
||||
logging.error(f"Error getting chunks: {e}", exc_info=True)
|
||||
return []
|
||||
|
||||
def add_chunk(self, text, metadata=None):
|
||||
import uuid
|
||||
metadata = metadata or {}
|
||||
|
||||
# Create a copy to avoid modifying the original metadata
|
||||
final_metadata = metadata.copy()
|
||||
|
||||
# Ensure the source_id is in the metadata so the chunk can be found by filters
|
||||
final_metadata["source_id"] = self._source_id
|
||||
|
||||
doc = Document(page_content=text, metadata=final_metadata)
|
||||
# Generate a unique ID for the document
|
||||
doc_id = str(uuid.uuid4())
|
||||
doc.id = doc_id
|
||||
doc_ids = self._docsearch.add_documents([doc])
|
||||
return doc_ids[0] if doc_ids else doc_id
|
||||
|
||||
def delete_chunk(self, chunk_id):
|
||||
try:
|
||||
self._docsearch.client.delete(
|
||||
collection_name=settings.QDRANT_COLLECTION_NAME,
|
||||
points_selector=[chunk_id],
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.error(f"Error deleting chunk: {e}", exc_info=True)
|
||||
return False
|
||||
|
||||
@@ -3,6 +3,7 @@ from application.vectorstore.elasticsearch import ElasticsearchStore
|
||||
from application.vectorstore.milvus import MilvusStore
|
||||
from application.vectorstore.mongodb import MongoDBVectorStore
|
||||
from application.vectorstore.qdrant import QdrantStore
|
||||
from application.vectorstore.pgvector import PGVectorStore
|
||||
|
||||
|
||||
class VectorCreator:
|
||||
@@ -12,6 +13,7 @@ class VectorCreator:
|
||||
"mongodb": MongoDBVectorStore,
|
||||
"qdrant": QdrantStore,
|
||||
"milvus": MilvusStore,
|
||||
"pgvector": PGVectorStore
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -6,6 +6,7 @@ import os
|
||||
import shutil
|
||||
import string
|
||||
import tempfile
|
||||
from typing import Any, Dict
|
||||
import zipfile
|
||||
|
||||
from collections import Counter
|
||||
@@ -16,11 +17,12 @@ from bson.dbref import DBRef
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
from application.agents.agent_creator import AgentCreator
|
||||
from application.api.answer.routes import get_prompt
|
||||
from application.api.answer.services.stream_processor import get_prompt
|
||||
|
||||
from application.core.mongo_db import MongoDB
|
||||
from application.core.settings import settings
|
||||
from application.parser.chunking import Chunker
|
||||
from application.parser.connectors.connector_creator import ConnectorCreator
|
||||
from application.parser.embedding_pipeline import embed_and_store_documents
|
||||
from application.parser.file.bulk import SimpleDirectoryReader
|
||||
from application.parser.remote.remote_creator import RemoteCreator
|
||||
@@ -35,17 +37,22 @@ db = mongo[settings.MONGO_DB_NAME]
|
||||
sources_collection = db["sources"]
|
||||
|
||||
# Constants
|
||||
|
||||
MIN_TOKENS = 150
|
||||
MAX_TOKENS = 1250
|
||||
RECURSION_DEPTH = 2
|
||||
|
||||
|
||||
# Define a function to extract metadata from a given filename.
|
||||
|
||||
|
||||
def metadata_from_filename(title):
|
||||
return {"title": title}
|
||||
|
||||
|
||||
# Define a function to generate a random string of a given length.
|
||||
|
||||
|
||||
def generate_random_string(length):
|
||||
return "".join([string.ascii_letters[i % 52] for i in range(length)])
|
||||
|
||||
@@ -68,7 +75,6 @@ def extract_zip_recursive(zip_path, extract_to, current_depth=0, max_depth=5):
|
||||
if current_depth > max_depth:
|
||||
logging.warning(f"Reached maximum recursion depth of {max_depth}")
|
||||
return
|
||||
|
||||
try:
|
||||
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
||||
zip_ref.extractall(extract_to)
|
||||
@@ -76,12 +82,13 @@ def extract_zip_recursive(zip_path, extract_to, current_depth=0, max_depth=5):
|
||||
except Exception as e:
|
||||
logging.error(f"Error extracting zip file {zip_path}: {e}", exc_info=True)
|
||||
return
|
||||
|
||||
# Check for nested zip files and extract them
|
||||
|
||||
for root, dirs, files in os.walk(extract_to):
|
||||
for file in files:
|
||||
if file.endswith(".zip"):
|
||||
# If a nested zip file is found, extract it recursively
|
||||
|
||||
file_path = os.path.join(root, file)
|
||||
extract_zip_recursive(file_path, root, current_depth + 1, max_depth)
|
||||
|
||||
@@ -98,11 +105,23 @@ def download_file(url, params, dest_path):
|
||||
|
||||
|
||||
def upload_index(full_path, file_data):
|
||||
files = None
|
||||
try:
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
faiss_path = full_path + "/index.faiss"
|
||||
pkl_path = full_path + "/index.pkl"
|
||||
|
||||
if not os.path.exists(faiss_path):
|
||||
logging.error(f"FAISS index file not found: {faiss_path}")
|
||||
raise FileNotFoundError(f"FAISS index file not found: {faiss_path}")
|
||||
|
||||
if not os.path.exists(pkl_path):
|
||||
logging.error(f"FAISS pickle file not found: {pkl_path}")
|
||||
raise FileNotFoundError(f"FAISS pickle file not found: {pkl_path}")
|
||||
|
||||
files = {
|
||||
"file_faiss": open(full_path + "/index.faiss", "rb"),
|
||||
"file_pkl": open(full_path + "/index.pkl", "rb"),
|
||||
"file_faiss": open(faiss_path, "rb"),
|
||||
"file_pkl": open(pkl_path, "rb"),
|
||||
}
|
||||
response = requests.post(
|
||||
urljoin(settings.API_URL, "/api/upload_index"),
|
||||
@@ -114,11 +133,11 @@ def upload_index(full_path, file_data):
|
||||
urljoin(settings.API_URL, "/api/upload_index"), data=file_data
|
||||
)
|
||||
response.raise_for_status()
|
||||
except requests.RequestException as e:
|
||||
except (requests.RequestException, FileNotFoundError) as e:
|
||||
logging.error(f"Error uploading index: {e}")
|
||||
raise
|
||||
finally:
|
||||
if settings.VECTOR_STORE == "faiss":
|
||||
if settings.VECTOR_STORE == "faiss" and files is not None:
|
||||
for file in files.values():
|
||||
file.close()
|
||||
|
||||
@@ -139,7 +158,7 @@ def run_agent_logic(agent_config, input_data):
|
||||
user_api_key = agent_config["key"]
|
||||
agent_type = agent_config.get("agent_type", "classic")
|
||||
decoded_token = {"sub": agent_config.get("user")}
|
||||
prompt = get_prompt(prompt_id)
|
||||
prompt = get_prompt(prompt_id, db["prompts"])
|
||||
agent = AgentCreator.create_agent(
|
||||
agent_type,
|
||||
endpoint="webhook",
|
||||
@@ -178,7 +197,6 @@ def run_agent_logic(agent_config, input_data):
|
||||
tool_calls.extend(line["tool_calls"])
|
||||
elif "thought" in line:
|
||||
thought += line["thought"]
|
||||
|
||||
result = {
|
||||
"answer": response_full,
|
||||
"sources": source_log_docs,
|
||||
@@ -193,8 +211,11 @@ def run_agent_logic(agent_config, input_data):
|
||||
|
||||
|
||||
# Define the main function for ingesting and processing documents.
|
||||
|
||||
|
||||
def ingest_worker(
|
||||
self, directory, formats, job_name, filename, user, dir_name=None, user_dir=None, retriever="classic"
|
||||
self, directory, formats, job_name, file_path, filename, user,
|
||||
retriever="classic"
|
||||
):
|
||||
"""
|
||||
Ingest and process documents.
|
||||
@@ -204,10 +225,9 @@ def ingest_worker(
|
||||
directory (str): Specifies the directory for ingesting ('inputs' or 'temp').
|
||||
formats (list of str): List of file extensions to consider for ingestion (e.g., [".rst", ".md"]).
|
||||
job_name (str): Name of the job for this ingestion task (original, unsanitized).
|
||||
filename (str): Name of the file to be ingested.
|
||||
file_path (str): Complete file path to use consistently throughout the pipeline.
|
||||
filename (str): Original unsanitized filename provided by the user.
|
||||
user (str): Identifier for the user initiating the ingestion (original, unsanitized).
|
||||
dir_name (str, optional): Sanitized directory name for filesystem operations.
|
||||
user_dir (str, optional): Sanitized user ID for filesystem operations.
|
||||
retriever (str): Type of retriever to use for processing the documents.
|
||||
|
||||
Returns:
|
||||
@@ -218,38 +238,59 @@ def ingest_worker(
|
||||
limit = None
|
||||
exclude = True
|
||||
sample = False
|
||||
|
||||
|
||||
storage = StorageCreator.get_storage()
|
||||
|
||||
full_path = os.path.join(directory, user_dir, dir_name)
|
||||
source_file_path = os.path.join(full_path, filename)
|
||||
|
||||
logging.info(f"Ingest file: {full_path}", extra={"user": user, "job": job_name})
|
||||
|
||||
logging.info(f"Ingest path: {file_path}", extra={"user": user, "job": job_name})
|
||||
|
||||
# Create temporary working directory
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
try:
|
||||
os.makedirs(temp_dir, exist_ok=True)
|
||||
|
||||
# Download file from storage to temp directory
|
||||
temp_file_path = os.path.join(temp_dir, filename)
|
||||
file_data = storage.get_file(source_file_path)
|
||||
if storage.is_directory(file_path):
|
||||
# Handle directory case
|
||||
logging.info(f"Processing directory: {file_path}")
|
||||
files_list = storage.list_files(file_path)
|
||||
|
||||
for storage_file_path in files_list:
|
||||
if storage.is_directory(storage_file_path):
|
||||
continue
|
||||
|
||||
# Create relative path structure in temp directory
|
||||
rel_path = os.path.relpath(storage_file_path, file_path)
|
||||
local_file_path = os.path.join(temp_dir, rel_path)
|
||||
|
||||
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
|
||||
|
||||
# Download file
|
||||
try:
|
||||
file_data = storage.get_file(storage_file_path)
|
||||
with open(local_file_path, "wb") as f:
|
||||
f.write(file_data.read())
|
||||
except Exception as e:
|
||||
logging.error(f"Error downloading file {storage_file_path}: {e}")
|
||||
continue
|
||||
else:
|
||||
# Handle single file case
|
||||
temp_filename = os.path.basename(file_path)
|
||||
temp_file_path = os.path.join(temp_dir, temp_filename)
|
||||
|
||||
file_data = storage.get_file(file_path)
|
||||
with open(temp_file_path, "wb") as f:
|
||||
f.write(file_data.read())
|
||||
|
||||
with open(temp_file_path, "wb") as f:
|
||||
f.write(file_data.read())
|
||||
# Handle zip files
|
||||
if temp_filename.endswith(".zip"):
|
||||
logging.info(f"Extracting zip file: {temp_filename}")
|
||||
extract_zip_recursive(
|
||||
temp_file_path, temp_dir, current_depth=0, max_depth=RECURSION_DEPTH
|
||||
)
|
||||
|
||||
self.update_state(state="PROGRESS", meta={"current": 1})
|
||||
|
||||
# Handle zip files
|
||||
if filename.endswith(".zip"):
|
||||
logging.info(f"Extracting zip file: {filename}")
|
||||
extract_zip_recursive(
|
||||
temp_file_path, temp_dir, current_depth=0, max_depth=RECURSION_DEPTH
|
||||
)
|
||||
|
||||
if sample:
|
||||
logging.info(f"Sample mode enabled. Using {limit} documents.")
|
||||
|
||||
reader = SimpleDirectoryReader(
|
||||
input_dir=temp_dir,
|
||||
input_files=input_files,
|
||||
@@ -259,6 +300,9 @@ def ingest_worker(
|
||||
file_metadata=metadata_from_filename,
|
||||
)
|
||||
raw_docs = reader.load_data()
|
||||
|
||||
directory_structure = getattr(reader, 'directory_structure', {})
|
||||
logging.info(f"Directory structure from reader: {directory_structure}")
|
||||
|
||||
chunker = Chunker(
|
||||
chunking_strategy="classic_chunk",
|
||||
@@ -285,22 +329,21 @@ def ingest_worker(
|
||||
for i in range(min(5, len(raw_docs))):
|
||||
logging.info(f"Sample document {i}: {raw_docs[i]}")
|
||||
file_data = {
|
||||
"name": job_name, # Use original job_name
|
||||
"name": job_name,
|
||||
"file": filename,
|
||||
"user": user, # Use original user
|
||||
"user": user,
|
||||
"tokens": tokens,
|
||||
"retriever": retriever,
|
||||
"id": str(id),
|
||||
"type": "local",
|
||||
"original_file_path": source_file_path,
|
||||
"file_path": file_path,
|
||||
"directory_structure": json.dumps(directory_structure),
|
||||
}
|
||||
|
||||
upload_index(vector_store_path, file_data)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error in ingest_worker: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
return {
|
||||
"directory": directory,
|
||||
"formats": formats,
|
||||
@@ -311,6 +354,252 @@ def ingest_worker(
|
||||
}
|
||||
|
||||
|
||||
def reingest_source_worker(self, source_id, user):
|
||||
"""
|
||||
Re-ingestion worker that handles incremental updates by:
|
||||
1. Adding chunks from newly added files
|
||||
2. Removing chunks from deleted files
|
||||
|
||||
Args:
|
||||
self: Task instance
|
||||
source_id: ID of the source to re-ingest
|
||||
user: User identifier
|
||||
|
||||
Returns:
|
||||
dict: Information about the re-ingestion task
|
||||
"""
|
||||
try:
|
||||
from application.vectorstore.vector_creator import VectorCreator
|
||||
|
||||
self.update_state(state="PROGRESS", meta={"current": 10, "status": "Initializing re-ingestion scan"})
|
||||
|
||||
source = sources_collection.find_one({"_id": ObjectId(source_id), "user": user})
|
||||
if not source:
|
||||
raise ValueError(f"Source {source_id} not found or access denied")
|
||||
|
||||
storage = StorageCreator.get_storage()
|
||||
source_file_path = source.get("file_path", "")
|
||||
|
||||
self.update_state(state="PROGRESS", meta={"current": 20, "status": "Scanning current files"})
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
# Download all files from storage to temp directory, preserving directory structure
|
||||
if storage.is_directory(source_file_path):
|
||||
files_list = storage.list_files(source_file_path)
|
||||
|
||||
for storage_file_path in files_list:
|
||||
if storage.is_directory(storage_file_path):
|
||||
continue
|
||||
|
||||
|
||||
rel_path = os.path.relpath(storage_file_path, source_file_path)
|
||||
local_file_path = os.path.join(temp_dir, rel_path)
|
||||
|
||||
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
|
||||
|
||||
# Download file
|
||||
try:
|
||||
file_data = storage.get_file(storage_file_path)
|
||||
with open(local_file_path, "wb") as f:
|
||||
f.write(file_data.read())
|
||||
except Exception as e:
|
||||
logging.error(f"Error downloading file {storage_file_path}: {e}")
|
||||
continue
|
||||
|
||||
reader = SimpleDirectoryReader(
|
||||
input_dir=temp_dir,
|
||||
recursive=True,
|
||||
required_exts=[
|
||||
".rst", ".md", ".pdf", ".txt", ".docx", ".csv", ".epub",
|
||||
".html", ".mdx", ".json", ".xlsx", ".pptx", ".png",
|
||||
".jpg", ".jpeg",
|
||||
],
|
||||
exclude_hidden=True,
|
||||
file_metadata=metadata_from_filename,
|
||||
)
|
||||
reader.load_data()
|
||||
directory_structure = reader.directory_structure
|
||||
logging.info(f"Directory structure built with token counts: {directory_structure}")
|
||||
|
||||
try:
|
||||
old_directory_structure = source.get("directory_structure") or {}
|
||||
if isinstance(old_directory_structure, str):
|
||||
try:
|
||||
old_directory_structure = json.loads(old_directory_structure)
|
||||
except Exception:
|
||||
old_directory_structure = {}
|
||||
|
||||
def _flatten_directory_structure(struct, prefix=""):
|
||||
files = set()
|
||||
if isinstance(struct, dict):
|
||||
for name, meta in struct.items():
|
||||
current_path = os.path.join(prefix, name) if prefix else name
|
||||
if isinstance(meta, dict) and ("type" in meta and "size_bytes" in meta):
|
||||
files.add(current_path)
|
||||
elif isinstance(meta, dict):
|
||||
files |= _flatten_directory_structure(meta, current_path)
|
||||
return files
|
||||
|
||||
old_files = _flatten_directory_structure(old_directory_structure)
|
||||
new_files = _flatten_directory_structure(directory_structure)
|
||||
|
||||
added_files = sorted(new_files - old_files)
|
||||
removed_files = sorted(old_files - new_files)
|
||||
|
||||
if added_files:
|
||||
logging.info(f"Files added since last ingest: {added_files}")
|
||||
else:
|
||||
logging.info("No files added since last ingest.")
|
||||
|
||||
if removed_files:
|
||||
logging.info(f"Files removed since last ingest: {removed_files}")
|
||||
else:
|
||||
logging.info("No files removed since last ingest.")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error comparing directory structures: {e}", exc_info=True)
|
||||
added_files = []
|
||||
removed_files = []
|
||||
try:
|
||||
if not added_files and not removed_files:
|
||||
logging.info("No changes detected.")
|
||||
return {
|
||||
"source_id": source_id,
|
||||
"user": user,
|
||||
"status": "no_changes",
|
||||
"added_files": [],
|
||||
"removed_files": [],
|
||||
}
|
||||
|
||||
vector_store = VectorCreator.create_vectorstore(
|
||||
settings.VECTOR_STORE,
|
||||
source_id,
|
||||
settings.EMBEDDINGS_KEY,
|
||||
)
|
||||
|
||||
self.update_state(state="PROGRESS", meta={"current": 40, "status": "Processing file changes"})
|
||||
|
||||
# 1) Delete chunks from removed files
|
||||
deleted = 0
|
||||
if removed_files:
|
||||
try:
|
||||
for ch in vector_store.get_chunks() or []:
|
||||
metadata = ch.get("metadata", {}) if isinstance(ch, dict) else getattr(ch, "metadata", {})
|
||||
raw_source = metadata.get("source")
|
||||
|
||||
source_file = str(raw_source) if raw_source else ""
|
||||
|
||||
if source_file in removed_files:
|
||||
cid = ch.get("doc_id")
|
||||
if cid:
|
||||
try:
|
||||
vector_store.delete_chunk(cid)
|
||||
deleted += 1
|
||||
except Exception as de:
|
||||
logging.error(f"Failed deleting chunk {cid}: {de}")
|
||||
logging.info(f"Deleted {deleted} chunks from {len(removed_files)} removed files")
|
||||
except Exception as e:
|
||||
logging.error(f"Error during deletion of removed file chunks: {e}", exc_info=True)
|
||||
|
||||
# 2) Add chunks from new files
|
||||
added = 0
|
||||
if added_files:
|
||||
try:
|
||||
# Build list of local files for added files only
|
||||
added_local_files = []
|
||||
for rel_path in added_files:
|
||||
local_path = os.path.join(temp_dir, rel_path)
|
||||
if os.path.isfile(local_path):
|
||||
added_local_files.append(local_path)
|
||||
|
||||
if added_local_files:
|
||||
reader_new = SimpleDirectoryReader(
|
||||
input_files=added_local_files,
|
||||
exclude_hidden=True,
|
||||
errors="ignore",
|
||||
file_metadata=metadata_from_filename,
|
||||
)
|
||||
raw_docs_new = reader_new.load_data()
|
||||
chunker_new = Chunker(
|
||||
chunking_strategy="classic_chunk",
|
||||
max_tokens=MAX_TOKENS,
|
||||
min_tokens=MIN_TOKENS,
|
||||
duplicate_headers=False,
|
||||
)
|
||||
chunked_new = chunker_new.chunk(documents=raw_docs_new)
|
||||
|
||||
for file_path, token_count in reader_new.file_token_counts.items():
|
||||
try:
|
||||
rel_path = os.path.relpath(file_path, start=temp_dir)
|
||||
path_parts = rel_path.split(os.sep)
|
||||
current_dir = directory_structure
|
||||
|
||||
for part in path_parts[:-1]:
|
||||
if part in current_dir and isinstance(current_dir[part], dict):
|
||||
current_dir = current_dir[part]
|
||||
else:
|
||||
break
|
||||
|
||||
filename = path_parts[-1]
|
||||
if filename in current_dir and isinstance(current_dir[filename], dict):
|
||||
current_dir[filename]["token_count"] = token_count
|
||||
logging.info(f"Updated token count for {rel_path}: {token_count}")
|
||||
except Exception as e:
|
||||
logging.warning(f"Could not update token count for {file_path}: {e}")
|
||||
|
||||
for d in chunked_new:
|
||||
meta = dict(d.extra_info or {})
|
||||
try:
|
||||
raw_src = meta.get("source")
|
||||
if isinstance(raw_src, str) and os.path.isabs(raw_src):
|
||||
meta["source"] = os.path.relpath(raw_src, start=temp_dir)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
vector_store.add_chunk(d.text, metadata=meta)
|
||||
added += 1
|
||||
logging.info(f"Added {added} chunks from {len(added_files)} new files")
|
||||
except Exception as e:
|
||||
logging.error(f"Error during ingestion of new files: {e}", exc_info=True)
|
||||
|
||||
# 3) Update source directory structure timestamp
|
||||
try:
|
||||
total_tokens = sum(reader.file_token_counts.values())
|
||||
|
||||
sources_collection.update_one(
|
||||
{"_id": ObjectId(source_id)},
|
||||
{
|
||||
"$set": {
|
||||
"directory_structure": directory_structure,
|
||||
"date": datetime.datetime.now(),
|
||||
"tokens": total_tokens
|
||||
}
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error updating directory_structure in DB: {e}", exc_info=True)
|
||||
|
||||
self.update_state(state="PROGRESS", meta={"current": 100, "status": "Re-ingestion completed"})
|
||||
|
||||
return {
|
||||
"source_id": source_id,
|
||||
"user": user,
|
||||
"status": "completed",
|
||||
"added_files": added_files,
|
||||
"removed_files": removed_files,
|
||||
"chunks_added": added,
|
||||
"chunks_deleted": deleted,
|
||||
}
|
||||
except Exception as e:
|
||||
logging.error(f"Error while processing file changes: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error in reingest_source_worker: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def remote_worker(
|
||||
self,
|
||||
source_data,
|
||||
@@ -326,7 +615,6 @@ def remote_worker(
|
||||
full_path = os.path.join(directory, user, name_job)
|
||||
if not os.path.exists(full_path):
|
||||
os.makedirs(full_path)
|
||||
|
||||
self.update_state(state="PROGRESS", meta={"current": 1})
|
||||
try:
|
||||
logging.info("Initializing remote loader with type: %s", loader)
|
||||
@@ -353,7 +641,6 @@ def remote_worker(
|
||||
raise ValueError("doc_id must be provided for sync operation.")
|
||||
id = ObjectId(doc_id)
|
||||
embed_and_store_documents(docs, full_path, id, self)
|
||||
|
||||
self.update_state(state="PROGRESS", meta={"current": 100})
|
||||
|
||||
file_data = {
|
||||
@@ -364,18 +651,18 @@ def remote_worker(
|
||||
"id": str(id),
|
||||
"type": loader,
|
||||
"remote_data": source_data,
|
||||
"sync_frequency": sync_frequency,
|
||||
"sync_frequency": sync_frequency
|
||||
}
|
||||
upload_index(full_path, file_data)
|
||||
|
||||
if operation_mode == "sync":
|
||||
file_data["last_sync"] = datetime.datetime.now()
|
||||
upload_index(full_path, file_data)
|
||||
except Exception as e:
|
||||
logging.error("Error in remote_worker task: %s", str(e), exc_info=True)
|
||||
raise
|
||||
|
||||
finally:
|
||||
if os.path.exists(full_path):
|
||||
shutil.rmtree(full_path)
|
||||
|
||||
logging.info("remote_worker task completed successfully")
|
||||
return {"urls": source_data, "name_job": name_job, "user": user, "limited": False}
|
||||
|
||||
@@ -425,10 +712,9 @@ def sync_worker(self, frequency):
|
||||
self, source_data, name, user, source_type, frequency, retriever, doc_id
|
||||
)
|
||||
sync_counts["total_sync_count"] += 1
|
||||
sync_counts[
|
||||
sync_counts[
|
||||
"sync_success" if resp["status"] == "success" else "sync_failure"
|
||||
] += 1
|
||||
|
||||
return {
|
||||
key: sync_counts[key]
|
||||
for key in ["total_sync_count", "sync_success", "sync_failure"]
|
||||
@@ -463,11 +749,15 @@ def attachment_worker(self, file_info, user):
|
||||
input_files=[local_path], exclude_hidden=True, errors="ignore"
|
||||
)
|
||||
.load_data()[0]
|
||||
.text,
|
||||
.text,
|
||||
)
|
||||
|
||||
|
||||
|
||||
token_count = num_tokens_from_string(content)
|
||||
|
||||
if token_count > 100000:
|
||||
content = content[:250000]
|
||||
token_count = num_tokens_from_string(content)
|
||||
|
||||
self.update_state(
|
||||
state="PROGRESS", meta={"current": 80, "status": "Storing in database"}
|
||||
)
|
||||
@@ -503,7 +793,6 @@ def attachment_worker(self, file_info, user):
|
||||
"mime_type": mime_type,
|
||||
"metadata": metadata,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error processing file {filename}: {e}",
|
||||
@@ -539,7 +828,6 @@ def agent_webhook_worker(self, agent_id, payload):
|
||||
except Exception as e:
|
||||
logging.error(f"Error processing agent webhook: {e}", exc_info=True)
|
||||
return {"status": "error", "error": str(e)}
|
||||
|
||||
self.update_state(state="PROGRESS", meta={"current": 50})
|
||||
try:
|
||||
result = run_agent_logic(agent_config, input_data)
|
||||
@@ -552,3 +840,174 @@ def agent_webhook_worker(self, agent_id, payload):
|
||||
f"Webhook processed for agent {agent_id}", extra={"agent_id": agent_id}
|
||||
)
|
||||
return {"status": "success", "result": result}
|
||||
|
||||
|
||||
def ingest_connector(
|
||||
self,
|
||||
job_name: str,
|
||||
user: str,
|
||||
source_type: str,
|
||||
session_token=None,
|
||||
file_ids=None,
|
||||
folder_ids=None,
|
||||
recursive=True,
|
||||
retriever: str = "classic",
|
||||
operation_mode: str = "upload",
|
||||
doc_id=None,
|
||||
sync_frequency: str = "never",
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Ingestion for internal knowledge bases (GoogleDrive, etc.).
|
||||
|
||||
Args:
|
||||
job_name: Name of the ingestion job
|
||||
user: User identifier
|
||||
source_type: Type of remote source ("google_drive", "dropbox", etc.)
|
||||
session_token: Authentication token for the service
|
||||
file_ids: List of file IDs to download
|
||||
folder_ids: List of folder IDs to download
|
||||
recursive: Whether to recursively download folders
|
||||
retriever: Type of retriever to use
|
||||
operation_mode: "upload" for initial ingestion, "sync" for incremental sync
|
||||
doc_id: Document ID for sync operations (required when operation_mode="sync")
|
||||
sync_frequency: How often to sync ("never", "daily", "weekly", "monthly")
|
||||
"""
|
||||
logging.info(f"Starting remote ingestion from {source_type} for user: {user}, job: {job_name}")
|
||||
self.update_state(state="PROGRESS", meta={"current": 1})
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
try:
|
||||
# Step 1: Initialize the appropriate loader
|
||||
self.update_state(state="PROGRESS", meta={"current": 10, "status": "Initializing connector"})
|
||||
|
||||
if not session_token:
|
||||
raise ValueError(f"{source_type} connector requires session_token")
|
||||
|
||||
if not ConnectorCreator.is_supported(source_type):
|
||||
raise ValueError(f"Unsupported connector type: {source_type}. Supported types: {ConnectorCreator.get_supported_connectors()}")
|
||||
|
||||
remote_loader = ConnectorCreator.create_connector(source_type, session_token)
|
||||
|
||||
# Create a clean config for storage
|
||||
api_source_config = {
|
||||
"file_ids": file_ids or [],
|
||||
"folder_ids": folder_ids or [],
|
||||
"recursive": recursive
|
||||
}
|
||||
|
||||
# Step 2: Download files to temp directory
|
||||
self.update_state(state="PROGRESS", meta={"current": 20, "status": "Downloading files"})
|
||||
download_info = remote_loader.download_to_directory(
|
||||
temp_dir,
|
||||
api_source_config
|
||||
)
|
||||
|
||||
if download_info.get("empty_result", False) or not download_info.get("files_downloaded", 0):
|
||||
logging.warning(f"No files were downloaded from {source_type}")
|
||||
# Create empty result directly instead of calling a separate method
|
||||
return {
|
||||
"name": job_name,
|
||||
"user": user,
|
||||
"tokens": 0,
|
||||
"type": source_type,
|
||||
"source_config": api_source_config,
|
||||
"directory_structure": "{}",
|
||||
}
|
||||
|
||||
# Step 3: Use SimpleDirectoryReader to process downloaded files
|
||||
self.update_state(state="PROGRESS", meta={"current": 40, "status": "Processing files"})
|
||||
reader = SimpleDirectoryReader(
|
||||
input_dir=temp_dir,
|
||||
recursive=True,
|
||||
required_exts=[
|
||||
".rst", ".md", ".pdf", ".txt", ".docx", ".csv", ".epub",
|
||||
".html", ".mdx", ".json", ".xlsx", ".pptx", ".png",
|
||||
".jpg", ".jpeg",
|
||||
],
|
||||
exclude_hidden=True,
|
||||
file_metadata=metadata_from_filename,
|
||||
)
|
||||
raw_docs = reader.load_data()
|
||||
directory_structure = getattr(reader, 'directory_structure', {})
|
||||
|
||||
|
||||
|
||||
# Step 4: Process documents (chunking, embedding, etc.)
|
||||
self.update_state(state="PROGRESS", meta={"current": 60, "status": "Processing documents"})
|
||||
|
||||
chunker = Chunker(
|
||||
chunking_strategy="classic_chunk",
|
||||
max_tokens=MAX_TOKENS,
|
||||
min_tokens=MIN_TOKENS,
|
||||
duplicate_headers=False,
|
||||
)
|
||||
raw_docs = chunker.chunk(documents=raw_docs)
|
||||
|
||||
# Preserve source information in document metadata
|
||||
for doc in raw_docs:
|
||||
if hasattr(doc, 'extra_info') and doc.extra_info:
|
||||
source = doc.extra_info.get('source')
|
||||
if source and os.path.isabs(source):
|
||||
# Convert absolute path to relative path
|
||||
doc.extra_info['source'] = os.path.relpath(source, start=temp_dir)
|
||||
|
||||
docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
|
||||
|
||||
if operation_mode == "upload":
|
||||
id = ObjectId()
|
||||
elif operation_mode == "sync":
|
||||
if not doc_id or not ObjectId.is_valid(doc_id):
|
||||
logging.error("Invalid doc_id provided for sync operation: %s", doc_id)
|
||||
raise ValueError("doc_id must be provided for sync operation.")
|
||||
id = ObjectId(doc_id)
|
||||
else:
|
||||
raise ValueError(f"Invalid operation_mode: {operation_mode}")
|
||||
|
||||
vector_store_path = os.path.join(temp_dir, "vector_store")
|
||||
os.makedirs(vector_store_path, exist_ok=True)
|
||||
|
||||
self.update_state(state="PROGRESS", meta={"current": 80, "status": "Storing documents"})
|
||||
embed_and_store_documents(docs, vector_store_path, id, self)
|
||||
|
||||
tokens = count_tokens_docs(docs)
|
||||
|
||||
# Step 6: Upload index files
|
||||
file_data = {
|
||||
"user": user,
|
||||
"name": job_name,
|
||||
"tokens": tokens,
|
||||
"retriever": retriever,
|
||||
"id": str(id),
|
||||
"type": "connector",
|
||||
"remote_data": json.dumps({
|
||||
"provider": source_type,
|
||||
**api_source_config
|
||||
}),
|
||||
"directory_structure": json.dumps(directory_structure),
|
||||
"sync_frequency": sync_frequency
|
||||
}
|
||||
|
||||
if operation_mode == "sync":
|
||||
file_data["last_sync"] = datetime.datetime.now()
|
||||
else:
|
||||
file_data["last_sync"] = datetime.datetime.now()
|
||||
|
||||
upload_index(vector_store_path, file_data)
|
||||
|
||||
# Ensure we mark the task as complete
|
||||
self.update_state(state="PROGRESS", meta={"current": 100, "status": "Complete"})
|
||||
|
||||
logging.info(f"Remote ingestion completed: {job_name}")
|
||||
|
||||
return {
|
||||
"user": user,
|
||||
"name": job_name,
|
||||
"tokens": tokens,
|
||||
"type": source_type,
|
||||
"id": str(id),
|
||||
"status": "complete"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error during remote ingestion: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
74
deployment/docker-compose-hub.yaml
Normal file
74
deployment/docker-compose-hub.yaml
Normal file
@@ -0,0 +1,74 @@
|
||||
name: docsgpt-oss
|
||||
services:
|
||||
|
||||
frontend:
|
||||
image: arc53/docsgpt-fe:develop
|
||||
environment:
|
||||
- VITE_API_HOST=http://localhost:7091
|
||||
- VITE_API_STREAMING=$VITE_API_STREAMING
|
||||
ports:
|
||||
- "5173:5173"
|
||||
depends_on:
|
||||
- backend
|
||||
|
||||
|
||||
backend:
|
||||
user: root
|
||||
image: arc53/docsgpt:develop
|
||||
environment:
|
||||
- API_KEY=$API_KEY
|
||||
- EMBEDDINGS_KEY=$API_KEY
|
||||
- LLM_PROVIDER=$LLM_PROVIDER
|
||||
- LLM_NAME=$LLM_NAME
|
||||
- CELERY_BROKER_URL=redis://redis:6379/0
|
||||
- CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
- MONGO_URI=mongodb://mongo:27017/docsgpt
|
||||
- CACHE_REDIS_URL=redis://redis:6379/2
|
||||
- OPENAI_BASE_URL=$OPENAI_BASE_URL
|
||||
ports:
|
||||
- "7091:7091"
|
||||
volumes:
|
||||
- ../application/indexes:/app/indexes
|
||||
- ../application/inputs:/app/inputs
|
||||
- ../application/vectors:/app/vectors
|
||||
depends_on:
|
||||
- redis
|
||||
- mongo
|
||||
|
||||
|
||||
worker:
|
||||
user: root
|
||||
image: arc53/docsgpt:develop
|
||||
command: celery -A application.app.celery worker -l INFO -B
|
||||
environment:
|
||||
- API_KEY=$API_KEY
|
||||
- EMBEDDINGS_KEY=$API_KEY
|
||||
- LLM_PROVIDER=$LLM_PROVIDER
|
||||
- LLM_NAME=$LLM_NAME
|
||||
- CELERY_BROKER_URL=redis://redis:6379/0
|
||||
- CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
- MONGO_URI=mongodb://mongo:27017/docsgpt
|
||||
- API_URL=http://backend:7091
|
||||
- CACHE_REDIS_URL=redis://redis:6379/2
|
||||
volumes:
|
||||
- ../application/indexes:/app/indexes
|
||||
- ../application/inputs:/app/inputs
|
||||
- ../application/vectors:/app/vectors
|
||||
depends_on:
|
||||
- redis
|
||||
- mongo
|
||||
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
mongo:
|
||||
image: mongo:6
|
||||
ports:
|
||||
- 27017:27017
|
||||
volumes:
|
||||
- mongodb_data_container:/data/db
|
||||
|
||||
volumes:
|
||||
mongodb_data_container:
|
||||
@@ -37,33 +37,33 @@ While modifying `settings.py` offers more flexibility, it's generally recommende
|
||||
|
||||
Here are some of the most fundamental settings you'll likely want to configure:
|
||||
|
||||
- **`LLM_PROVIDER`**: This setting determines which Large Language Model (LLM) provider DocsGPT will use. It tells DocsGPT which API to interact with.
|
||||
- **`LLM_PROVIDER`**: This setting determines which Large Language Model (LLM) provider DocsGPT will use. It tells DocsGPT which API to interact with.
|
||||
|
||||
- **Common values:**
|
||||
- `docsgpt`: Use the DocsGPT Public API Endpoint (simple and free, as offered in `setup.sh` option 1).
|
||||
- `openai`: Use OpenAI's API (requires an API key).
|
||||
- `google`: Use Google's Vertex AI or Gemini models.
|
||||
- `anthropic`: Use Anthropic's Claude models.
|
||||
- `groq`: Use Groq's models.
|
||||
- `huggingface`: Use HuggingFace Inference API.
|
||||
- `azure_openai`: Use Azure OpenAI Service.
|
||||
- `openai` (when using local inference engines like Ollama, Llama.cpp, TGI, etc.): This signals DocsGPT to use an OpenAI-compatible API format, even if the actual LLM is running locally.
|
||||
- **Common values:**
|
||||
- `docsgpt`: Use the DocsGPT Public API Endpoint (simple and free, as offered in `setup.sh` option 1).
|
||||
- `openai`: Use OpenAI's API (requires an API key).
|
||||
- `google`: Use Google's Vertex AI or Gemini models.
|
||||
- `anthropic`: Use Anthropic's Claude models.
|
||||
- `groq`: Use Groq's models.
|
||||
- `huggingface`: Use HuggingFace Inference API.
|
||||
- `azure_openai`: Use Azure OpenAI Service.
|
||||
- `openai` (when using local inference engines like Ollama, Llama.cpp, TGI, etc.): This signals DocsGPT to use an OpenAI-compatible API format, even if the actual LLM is running locally.
|
||||
|
||||
- **`LLM_NAME`**: Specifies the specific model to use from the chosen LLM provider. The available models depend on the `LLM_PROVIDER` you've selected.
|
||||
- **`LLM_NAME`**: Specifies the specific model to use from the chosen LLM provider. The available models depend on the `LLM_PROVIDER` you've selected.
|
||||
|
||||
- **Examples:**
|
||||
- For `LLM_PROVIDER=openai`: `gpt-4o`
|
||||
- For `LLM_PROVIDER=google`: `gemini-2.0-flash`
|
||||
- For local models (e.g., Ollama): `llama3.2:1b` (or any model name available in your setup).
|
||||
- **Examples:**
|
||||
- For `LLM_PROVIDER=openai`: `gpt-4o`
|
||||
- For `LLM_PROVIDER=google`: `gemini-2.0-flash`
|
||||
- For local models (e.g., Ollama): `llama3.2:1b` (or any model name available in your setup).
|
||||
|
||||
- **`EMBEDDINGS_NAME`**: This setting defines which embedding model DocsGPT will use to generate vector embeddings for your documents. Embeddings are numerical representations of text that allow DocsGPT to understand the semantic meaning of your documents for efficient search and retrieval.
|
||||
- **`EMBEDDINGS_NAME`**: This setting defines which embedding model DocsGPT will use to generate vector embeddings for your documents. Embeddings are numerical representations of text that allow DocsGPT to understand the semantic meaning of your documents for efficient search and retrieval.
|
||||
|
||||
- **Default value:** `huggingface_sentence-transformers/all-mpnet-base-v2` (a good general-purpose embedding model).
|
||||
- **Other options:** You can explore other embedding models from Hugging Face Sentence Transformers or other providers if needed.
|
||||
- **Default value:** `huggingface_sentence-transformers/all-mpnet-base-v2` (a good general-purpose embedding model).
|
||||
- **Other options:** You can explore other embedding models from Hugging Face Sentence Transformers or other providers if needed.
|
||||
|
||||
- **`API_KEY`**: Required for most cloud-based LLM providers. This is your authentication key to access the LLM provider's API. You'll need to obtain this key from your chosen provider's platform.
|
||||
- **`API_KEY`**: Required for most cloud-based LLM providers. This is your authentication key to access the LLM provider's API. You'll need to obtain this key from your chosen provider's platform.
|
||||
|
||||
- **`OPENAI_BASE_URL`**: Specifically used when `LLM_PROVIDER` is set to `openai` but you are connecting to a local inference engine (like Ollama, Llama.cpp, etc.) that exposes an OpenAI-compatible API. This setting tells DocsGPT where to find your local LLM server.
|
||||
- **`OPENAI_BASE_URL`**: Specifically used when `LLM_PROVIDER` is set to `openai` but you are connecting to a local inference engine (like Ollama, Llama.cpp, etc.) that exposes an OpenAI-compatible API. This setting tells DocsGPT where to find your local LLM server.
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
@@ -93,51 +93,82 @@ OPENAI_BASE_URL=http://host.docker.internal:11434/v1 # Default Ollama API URL wi
|
||||
EMBEDDINGS_NAME=huggingface_sentence-transformers/all-mpnet-base-v2 # You can also run embeddings locally if needed
|
||||
```
|
||||
|
||||
In this case, even though you are using Ollama locally, `LLM_PROVIDER` is set to `openai` because Ollama (and many other local inference engines) are designed to be API-compatible with OpenAI. `OPENAI_BASE_URL` points DocsGPT to the local Ollama server.
|
||||
In this case, even though you are using Ollama locally, `LLM_PROVIDER` is set to `openai` because Ollama (and many other local inference engines) are designed to be API-compatible with OpenAI. `OPENAI_BASE_URL` points DocsGPT to the local Ollama server.
|
||||
|
||||
## Authentication Settings
|
||||
|
||||
DocsGPT includes a JWT (JSON Web Token) based authentication feature for managing sessions or securing local deployments while allowing access.
|
||||
|
||||
- **`AUTH_TYPE`**: This setting in your `.env` file or `settings.py` determines the authentication method.
|
||||
|
||||
- **Possible values:**
|
||||
- `None` (or not set): No authentication is used.
|
||||
- `simple_jwt`: A single, long-lived JWT token is generated and used for all authenticated requests. This is useful for securing a local deployment with a shared secret.
|
||||
- `session_jwt`: Unique JWT tokens are generated for sessions, typically for individual users or temporary access.
|
||||
- If `AUTH_TYPE` is set to `simple_jwt` or `session_jwt`, then a `JWT_SECRET_KEY` is required.
|
||||
- **`JWT_SECRET_KEY`**: This is a crucial secret key used to sign and verify JWTs.
|
||||
|
||||
- It can be set directly in your `.env` file or `settings.py`.
|
||||
- **Automatic Key Generation**: If `AUTH_TYPE` is `simple_jwt` or `session_jwt` and `JWT_SECRET_KEY` is _not_ set in your environment variables or `settings.py`, DocsGPT will attempt to:
|
||||
1. Read the key from a file named `.jwt_secret_key` in the project's root directory.
|
||||
2. If the file doesn't exist, it will generate a new 32-byte random key, save it to `.jwt_secret_key`, and use it for the session. This ensures that the key persists across application restarts.
|
||||
- **Security Note**: It's vital to keep this key secure. If you set it manually, choose a strong, random string.
|
||||
### `AUTH_TYPE` Overview
|
||||
|
||||
**How it works:**
|
||||
The `AUTH_TYPE` setting in your `.env` file or `settings.py` determines the authentication method used by DocsGPT. This allows you to control how users authenticate with your DocsGPT instance.
|
||||
|
||||
- When `AUTH_TYPE` is set to `simple_jwt`, a token is generated at startup (if not already present or configured) and printed to the console. This token should be included in the `Authorization` header of your API requests as a Bearer token (e.g., `Authorization: Bearer YOUR_SIMPLE_JWT_TOKEN`).
|
||||
- When `AUTH_TYPE` is set to `session_jwt`:
|
||||
- Clients can request a new token from the `/api/generate_token` endpoint.
|
||||
- This token should then be included in the `Authorization` header for subsequent requests.
|
||||
- The backend verifies the JWT token provided in the `Authorization` header for protected routes.
|
||||
- The `/api/config` endpoint can be used to check the current `auth_type` and whether authentication is required.
|
||||
| Value | Description |
|
||||
| ------------- | ------------------------------------------------------------------------------------------- |
|
||||
| `None` | No authentication is used. Anyone can access the app. |
|
||||
| `simple_jwt` | A single, long-lived JWT token is generated at startup. All requests use this shared token. |
|
||||
| `session_jwt` | Unique JWT tokens are generated for each session/user. |
|
||||
|
||||
**Frontend Token Input for `simple_jwt`:**
|
||||
#### How to Configure
|
||||
|
||||
<img
|
||||
src="/jwt-input.png"
|
||||
alt="Frontend prompt for JWT Token"
|
||||
style={{
|
||||
width: '500px',
|
||||
maxWidth: '100%',
|
||||
display: 'block',
|
||||
margin: '1em auto'
|
||||
}}
|
||||
Add the following to your `.env` file (or set in `settings.py`):
|
||||
|
||||
```env
|
||||
# No authentication (default)
|
||||
AUTH_TYPE=None
|
||||
|
||||
# OR: Simple JWT (shared token)
|
||||
AUTH_TYPE=simple_jwt
|
||||
JWT_SECRET_KEY=your_secret_key_here
|
||||
|
||||
# OR: Session JWT (per-user/session tokens)
|
||||
AUTH_TYPE=session_jwt
|
||||
JWT_SECRET_KEY=your_secret_key_here
|
||||
```
|
||||
|
||||
- If `AUTH_TYPE` is set to `simple_jwt` or `session_jwt`, a `JWT_SECRET_KEY` is required.
|
||||
- If `JWT_SECRET_KEY` is not set, DocsGPT will generate one and store it in `.jwt_secret_key` in the project root.
|
||||
|
||||
#### How Each Method Works
|
||||
|
||||
- **None**: No authentication. All API and UI access is open.
|
||||
- **simple_jwt**:
|
||||
- A single JWT token is generated at startup and printed to the console.
|
||||
- Use this token in the `Authorization` header for all API requests:
|
||||
```http
|
||||
Authorization: Bearer <SIMPLE_JWT_TOKEN>
|
||||
```
|
||||
- The frontend will prompt for this token if not already set.
|
||||
- **session_jwt**:
|
||||
- Clients can request a new token from `/api/generate_token`.
|
||||
- Use the received token in the `Authorization` header for subsequent requests.
|
||||
- Each user/session gets a unique token.
|
||||
|
||||
#### Security Notes
|
||||
|
||||
- Always keep your `JWT_SECRET_KEY` secure and private.
|
||||
- If you set it manually, use a strong, random string.
|
||||
- If not set, DocsGPT will generate a secure key and persist it in `.jwt_secret_key`.
|
||||
|
||||
#### Checking Current Auth Type
|
||||
|
||||
- Use the `/api/config` endpoint to check the current `auth_type` and whether authentication is required.
|
||||
|
||||
#### Frontend Token Input for `simple_jwt`
|
||||
|
||||
If you have configured `AUTH_TYPE=simple_jwt`, the DocsGPT frontend will prompt you to enter the JWT token if it's not already set or is invalid. Paste the `SIMPLE_JWT_TOKEN` (printed to your console when the backend starts) into this field to access the application.
|
||||
|
||||
<img
|
||||
src="/jwt-input.png"
|
||||
alt="Frontend prompt for JWT Token"
|
||||
style={{
|
||||
width: "500px",
|
||||
maxWidth: "100%",
|
||||
display: "block",
|
||||
margin: "1em auto",
|
||||
}}
|
||||
/>
|
||||
|
||||
If you have configured `AUTH_TYPE=simple_jwt`, the DocsGPT frontend will prompt you to enter the JWT token if it's not already set or is invalid. You'll need to paste the `SIMPLE_JWT_TOKEN` (which is printed to your console when the backend starts) into this field to access the application.
|
||||
|
||||
## Exploring More Settings
|
||||
|
||||
These are just the basic settings to get you started. The `settings.py` file contains many more advanced options that you can explore to further customize DocsGPT, such as:
|
||||
@@ -147,4 +178,4 @@ These are just the basic settings to get you started. The `settings.py` file con
|
||||
- Cache settings (`CACHE_REDIS_URL`)
|
||||
- And many more!
|
||||
|
||||
For a complete list of available settings and their descriptions, refer to the `settings.py` file in `application/core`. Remember to restart your Docker containers after making changes to your `.env` file or `settings.py` for the changes to take effect.
|
||||
For a complete list of available settings and their descriptions, refer to the `settings.py` file in `application/core`. Remember to restart your Docker containers after making changes to your `.env` file or `settings.py` for the changes to take effect.
|
||||
|
||||
@@ -60,7 +60,7 @@ const config = {
|
||||
GitHub
|
||||
</a>
|
||||
{' | '}
|
||||
<a href="https://www.blog.docsgpt.cloud/" target="_blank">
|
||||
<a href="https://blog.docsgpt.cloud/" target="_blank">
|
||||
Blog
|
||||
</a>
|
||||
</div>
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0,viewport-fit=cover" />
|
||||
<meta name="apple-mobile-web-app-capable" content="yes">
|
||||
<meta name="theme-color" content="#fbfbfb" media="(prefers-color-scheme: light)" />
|
||||
<meta name="theme-color" content="#161616" media="(prefers-color-scheme: dark)" />
|
||||
<title>DocsGPT</title>
|
||||
<link rel="shortcut icon" type="image/x-icon" href="/favicon.ico" />
|
||||
</head>
|
||||
|
||||
82
frontend/package-lock.json
generated
82
frontend/package-lock.json
generated
@@ -55,7 +55,7 @@
|
||||
"postcss": "^8.4.49",
|
||||
"prettier": "^3.5.3",
|
||||
"prettier-plugin-tailwindcss": "^0.6.13",
|
||||
"tailwindcss": "^4.1.10",
|
||||
"tailwindcss": "^4.1.11",
|
||||
"typescript": "^5.8.3",
|
||||
"vite": "^6.3.5",
|
||||
"vite-plugin-svgr": "^4.3.0"
|
||||
@@ -1696,6 +1696,13 @@
|
||||
"tailwindcss": "4.1.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/node/node_modules/tailwindcss": {
|
||||
"version": "4.1.10",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.10.tgz",
|
||||
"integrity": "sha512-P3nr6WkvKV/ONsTzj6Gb57sWPMX29EPNPopo7+FcpkQaNsrNpZ1pv8QmrYI2RqEKD7mlGqLnGovlcYnBK0IqUA==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide": {
|
||||
"version": "4.1.10",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.10.tgz",
|
||||
@@ -1908,6 +1915,66 @@
|
||||
"node": ">=14.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": {
|
||||
"version": "1.4.3",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@emnapi/wasi-threads": "1.0.2",
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": {
|
||||
"version": "1.4.3",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": {
|
||||
"version": "1.0.2",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": {
|
||||
"version": "0.2.10",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@emnapi/core": "^1.4.3",
|
||||
"@emnapi/runtime": "^1.4.3",
|
||||
"@tybys/wasm-util": "^0.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": {
|
||||
"version": "0.9.0",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": {
|
||||
"version": "2.8.0",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "0BSD",
|
||||
"optional": true
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
|
||||
"version": "4.1.10",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.10.tgz",
|
||||
@@ -1956,6 +2023,13 @@
|
||||
"tailwindcss": "4.1.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/postcss/node_modules/tailwindcss": {
|
||||
"version": "4.1.10",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.10.tgz",
|
||||
"integrity": "sha512-P3nr6WkvKV/ONsTzj6Gb57sWPMX29EPNPopo7+FcpkQaNsrNpZ1pv8QmrYI2RqEKD7mlGqLnGovlcYnBK0IqUA==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/babel__core": {
|
||||
"version": "7.20.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
|
||||
@@ -10402,9 +10476,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/tailwindcss": {
|
||||
"version": "4.1.10",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.10.tgz",
|
||||
"integrity": "sha512-P3nr6WkvKV/ONsTzj6Gb57sWPMX29EPNPopo7+FcpkQaNsrNpZ1pv8QmrYI2RqEKD7mlGqLnGovlcYnBK0IqUA==",
|
||||
"version": "4.1.11",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.11.tgz",
|
||||
"integrity": "sha512-2E9TBm6MDD/xKYe+dvJZAmg3yxIEDNRc0jwlNyDg/4Fil2QcSLjFKGVff0lAf1jjeaArlG/M75Ey/EYr/OJtBA==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
"postcss": "^8.4.49",
|
||||
"prettier": "^3.5.3",
|
||||
"prettier-plugin-tailwindcss": "^0.6.13",
|
||||
"tailwindcss": "^4.1.10",
|
||||
"tailwindcss": "^4.1.11",
|
||||
"typescript": "^5.8.3",
|
||||
"vite": "^6.3.5",
|
||||
"vite-plugin-svgr": "^4.3.0"
|
||||
|
||||
@@ -20,6 +20,7 @@ import {
|
||||
setSelectedAgent,
|
||||
} from '../preferences/preferenceSlice';
|
||||
import PromptsModal from '../preferences/PromptsModal';
|
||||
import Prompts from '../settings/Prompts';
|
||||
import { UserToolType } from '../settings/types';
|
||||
import AgentPreview from './AgentPreview';
|
||||
import { Agent } from './types';
|
||||
@@ -46,10 +47,11 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
source: '',
|
||||
chunks: '',
|
||||
retriever: '',
|
||||
prompt_id: '',
|
||||
prompt_id: 'default',
|
||||
tools: [],
|
||||
agent_type: '',
|
||||
status: '',
|
||||
json_schema: undefined,
|
||||
});
|
||||
const [imageFile, setImageFile] = useState<File | null>(null);
|
||||
const [prompts, setPrompts] = useState<
|
||||
@@ -71,6 +73,9 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
const [hasChanges, setHasChanges] = useState(false);
|
||||
const [draftLoading, setDraftLoading] = useState(false);
|
||||
const [publishLoading, setPublishLoading] = useState(false);
|
||||
const [jsonSchemaText, setJsonSchemaText] = useState('');
|
||||
const [jsonSchemaValid, setJsonSchemaValid] = useState(true);
|
||||
const [isJsonSchemaExpanded, setIsJsonSchemaExpanded] = useState(false);
|
||||
|
||||
const initialAgentRef = useRef<Agent | null>(null);
|
||||
const sourceAnchorButtonRef = useRef<HTMLButtonElement>(null);
|
||||
@@ -112,9 +117,15 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
];
|
||||
|
||||
const isPublishable = () => {
|
||||
return (
|
||||
agent.name && agent.description && agent.prompt_id && agent.agent_type
|
||||
);
|
||||
const hasRequiredFields =
|
||||
agent.name && agent.description && agent.prompt_id && agent.agent_type;
|
||||
const isJsonSchemaValidOrEmpty =
|
||||
jsonSchemaText.trim() === '' || jsonSchemaValid;
|
||||
return hasRequiredFields && isJsonSchemaValidOrEmpty;
|
||||
};
|
||||
|
||||
const isJsonSchemaInvalid = () => {
|
||||
return jsonSchemaText.trim() !== '' && !jsonSchemaValid;
|
||||
};
|
||||
|
||||
const handleUpload = useCallback((files: File[]) => {
|
||||
@@ -152,6 +163,10 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
formData.append('tools', JSON.stringify(agent.tools));
|
||||
else formData.append('tools', '[]');
|
||||
|
||||
if (agent.json_schema) {
|
||||
formData.append('json_schema', JSON.stringify(agent.json_schema));
|
||||
}
|
||||
|
||||
try {
|
||||
setDraftLoading(true);
|
||||
const response =
|
||||
@@ -193,6 +208,10 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
formData.append('tools', JSON.stringify(agent.tools));
|
||||
else formData.append('tools', '[]');
|
||||
|
||||
if (agent.json_schema) {
|
||||
formData.append('json_schema', JSON.stringify(agent.json_schema));
|
||||
}
|
||||
|
||||
try {
|
||||
setPublishLoading(true);
|
||||
const response =
|
||||
@@ -225,6 +244,22 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
}
|
||||
};
|
||||
|
||||
const validateAndSetJsonSchema = (text: string) => {
|
||||
setJsonSchemaText(text);
|
||||
if (text.trim() === '') {
|
||||
setAgent({ ...agent, json_schema: undefined });
|
||||
setJsonSchemaValid(true);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const parsed = JSON.parse(text);
|
||||
setAgent({ ...agent, json_schema: parsed });
|
||||
setJsonSchemaValid(true);
|
||||
} catch (error) {
|
||||
setJsonSchemaValid(false);
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const getTools = async () => {
|
||||
const response = await userService.getUserTools(token);
|
||||
@@ -263,6 +298,11 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
setSelectedSourceIds(new Set([data.retriever]));
|
||||
if (data.tools) setSelectedToolIds(new Set(data.tools));
|
||||
if (data.status === 'draft') setEffectiveMode('draft');
|
||||
if (data.json_schema) {
|
||||
const jsonText = JSON.stringify(data.json_schema, null, 2);
|
||||
setJsonSchemaText(jsonText);
|
||||
setJsonSchemaValid(true);
|
||||
}
|
||||
setAgent(data);
|
||||
initialAgentRef.current = data;
|
||||
};
|
||||
@@ -316,10 +356,17 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
setHasChanges(false);
|
||||
return;
|
||||
}
|
||||
|
||||
const initialJsonSchemaText = initialAgentRef.current.json_schema
|
||||
? JSON.stringify(initialAgentRef.current.json_schema, null, 2)
|
||||
: '';
|
||||
|
||||
const isChanged =
|
||||
!isEqual(agent, initialAgentRef.current) || imageFile !== null;
|
||||
!isEqual(agent, initialAgentRef.current) ||
|
||||
imageFile !== null ||
|
||||
jsonSchemaText !== initialJsonSchemaText;
|
||||
setHasChanges(isChanged);
|
||||
}, [agent, dispatch, effectiveMode, imageFile]);
|
||||
}, [agent, dispatch, effectiveMode, imageFile, jsonSchemaText]);
|
||||
return (
|
||||
<div className="p-4 md:p-12">
|
||||
<div className="flex items-center gap-3 px-4">
|
||||
@@ -355,7 +402,10 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
)}
|
||||
{modeConfig[effectiveMode].showSaveDraft && (
|
||||
<button
|
||||
className="hover:bg-vi</button>olets-are-blue border-violets-are-blue text-violets-are-blue hover:bg-violets-are-blue w-28 rounded-3xl border border-solid py-2 text-sm font-medium transition-colors hover:text-white"
|
||||
disabled={isJsonSchemaInvalid()}
|
||||
className={`border-violets-are-blue text-violets-are-blue hover:bg-violets-are-blue w-28 rounded-3xl border border-solid py-2 text-sm font-medium transition-colors hover:text-white ${
|
||||
isJsonSchemaInvalid() ? 'cursor-not-allowed opacity-30' : ''
|
||||
}`}
|
||||
onClick={handleSaveDraft}
|
||||
>
|
||||
<span className="flex items-center justify-center transition-all duration-200">
|
||||
@@ -504,32 +554,32 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
</div>
|
||||
</div>
|
||||
<div className="rounded-[30px] bg-[#F6F6F6] px-6 py-3 dark:bg-[#383838] dark:text-[#E0E0E0]">
|
||||
<h2 className="text-lg font-semibold">Prompt</h2>
|
||||
<div className="mt-3 flex flex-wrap items-center gap-1">
|
||||
<div className="flex flex-wrap items-end gap-1">
|
||||
<div className="min-w-20 grow basis-full sm:basis-0">
|
||||
<Dropdown
|
||||
options={prompts.map((prompt) => ({
|
||||
label: prompt.name,
|
||||
value: prompt.id,
|
||||
}))}
|
||||
selectedValue={
|
||||
agent.prompt_id
|
||||
? prompts.filter(
|
||||
(prompt) => prompt.id === agent.prompt_id,
|
||||
)[0]?.name || null
|
||||
: null
|
||||
<Prompts
|
||||
prompts={prompts}
|
||||
selectedPrompt={
|
||||
prompts.find((prompt) => prompt.id === agent.prompt_id) ||
|
||||
prompts[0]
|
||||
}
|
||||
onSelect={(option: { label: string; value: string }) =>
|
||||
setAgent({ ...agent, prompt_id: option.value })
|
||||
onSelectPrompt={(name, id, type) =>
|
||||
setAgent({ ...agent, prompt_id: id })
|
||||
}
|
||||
size="w-full"
|
||||
rounded="3xl"
|
||||
border="border"
|
||||
buttonClassName="bg-white dark:bg-[#222327] border-silver dark:border-[#7E7E7E]"
|
||||
optionsClassName="bg-white dark:bg-[#383838] border-silver dark:border-[#7E7E7E] dark:border-[#7E7E7E] dark:bg-dark-charcoal"
|
||||
placeholderClassName="text-gray-400 dark:text-silver"
|
||||
placeholder="Select a prompt"
|
||||
contentSize="text-sm"
|
||||
setPrompts={setPrompts}
|
||||
title="Prompt"
|
||||
titleClassName="text-lg font-semibold dark:text-[#E0E0E0]"
|
||||
showAddButton={false}
|
||||
dropdownProps={{
|
||||
size: 'w-full',
|
||||
rounded: '3xl',
|
||||
border: 'border',
|
||||
buttonClassName:
|
||||
'bg-white dark:bg-[#222327] border-silver dark:border-[#7E7E7E]',
|
||||
optionsClassName:
|
||||
'bg-white dark:bg-[#383838] border-silver dark:border-[#7E7E7E]',
|
||||
placeholderClassName: 'text-gray-400 dark:text-silver',
|
||||
contentSize: 'text-sm',
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<button
|
||||
@@ -601,6 +651,78 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="rounded-[30px] bg-[#F6F6F6] px-6 py-3 dark:bg-[#383838] dark:text-[#E0E0E0]">
|
||||
<button
|
||||
onClick={() => setIsJsonSchemaExpanded(!isJsonSchemaExpanded)}
|
||||
className="flex w-full items-center justify-between text-left focus:outline-none"
|
||||
>
|
||||
<div>
|
||||
<h2 className="text-lg font-semibold">Advanced</h2>
|
||||
</div>
|
||||
<div className="ml-4 flex items-center">
|
||||
<svg
|
||||
className={`h-5 w-5 transform transition-transform duration-200 ${
|
||||
isJsonSchemaExpanded ? 'rotate-180' : ''
|
||||
}`}
|
||||
fill="none"
|
||||
stroke="currentColor"
|
||||
viewBox="0 0 24 24"
|
||||
>
|
||||
<path
|
||||
strokeLinecap="round"
|
||||
strokeLinejoin="round"
|
||||
strokeWidth={2}
|
||||
d="M19 9l-7 7-7-7"
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
</button>
|
||||
{isJsonSchemaExpanded && (
|
||||
<div className="mt-3">
|
||||
<div>
|
||||
<h2 className="text-sm font-medium">JSON response schema</h2>
|
||||
<p className="mt-1 text-xs text-gray-600 dark:text-gray-400">
|
||||
Define a JSON schema to enforce structured output format
|
||||
</p>
|
||||
</div>
|
||||
<textarea
|
||||
value={jsonSchemaText}
|
||||
onChange={(e) => validateAndSetJsonSchema(e.target.value)}
|
||||
placeholder={`{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"email": {"type": "string"}
|
||||
},
|
||||
"required": ["name", "email"],
|
||||
"additionalProperties": false
|
||||
}`}
|
||||
rows={9}
|
||||
className={`border-silver text-jet dark:bg-raisin-black dark:text-bright-gray mt-2 w-full rounded-2xl border bg-white px-4 py-3 font-mono text-sm outline-hidden dark:border-[#7E7E7E]`}
|
||||
/>
|
||||
{jsonSchemaText.trim() !== '' && (
|
||||
<div
|
||||
className={`mt-2 flex items-center gap-2 text-sm ${
|
||||
jsonSchemaValid
|
||||
? 'text-green-600 dark:text-green-400'
|
||||
: 'text-red-600 dark:text-red-400'
|
||||
}`}
|
||||
>
|
||||
<span
|
||||
className={`h-4 w-4 bg-contain bg-center bg-no-repeat ${
|
||||
jsonSchemaValid
|
||||
? "bg-[url('/src/assets/circle-check.svg')]"
|
||||
: "bg-[url('/src/assets/circle-x.svg')]"
|
||||
}`}
|
||||
/>
|
||||
{jsonSchemaValid
|
||||
? 'Valid JSON'
|
||||
: 'Invalid JSON - fix to enable saving'}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
<div className="col-span-3 flex flex-col gap-3 rounded-[30px] bg-[#F6F6F6] px-6 py-3 dark:bg-[#383838] dark:text-[#E0E0E0]">
|
||||
<h2 className="text-lg font-semibold">Preview</h2>
|
||||
|
||||
@@ -57,12 +57,11 @@ export const fetchPreviewAnswer = createAsyncThunk<
|
||||
signal,
|
||||
state.preference.token,
|
||||
state.preference.selectedDocs!,
|
||||
state.agentPreview.queries,
|
||||
null, // No conversation ID for previews
|
||||
state.preference.prompt.id,
|
||||
state.preference.chunks,
|
||||
state.preference.token_limit,
|
||||
(event) => {
|
||||
(event: MessageEvent) => {
|
||||
const data = JSON.parse(event.data);
|
||||
const targetIndex = indx ?? state.agentPreview.queries.length - 1;
|
||||
|
||||
@@ -97,6 +96,17 @@ export const fetchPreviewAnswer = createAsyncThunk<
|
||||
message: data.error,
|
||||
}),
|
||||
);
|
||||
} else if (data.type === 'structured_answer') {
|
||||
dispatch(
|
||||
updateStreamingQuery({
|
||||
index: targetIndex,
|
||||
query: {
|
||||
response: data.answer,
|
||||
structured: data.structured,
|
||||
schema: data.schema,
|
||||
},
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
dispatch(
|
||||
updateStreamingQuery({
|
||||
@@ -118,7 +128,6 @@ export const fetchPreviewAnswer = createAsyncThunk<
|
||||
signal,
|
||||
state.preference.token,
|
||||
state.preference.selectedDocs!,
|
||||
state.agentPreview.queries,
|
||||
null, // No conversation ID for previews
|
||||
state.preference.prompt.id,
|
||||
state.preference.chunks,
|
||||
@@ -203,6 +212,14 @@ export const agentPreviewSlice = createSlice({
|
||||
state.queries[index].response =
|
||||
(state.queries[index].response || '') + query.response;
|
||||
}
|
||||
|
||||
if (query.structured !== undefined) {
|
||||
state.queries[index].structured = query.structured;
|
||||
}
|
||||
|
||||
if (query.schema !== undefined) {
|
||||
state.queries[index].schema = query.schema;
|
||||
}
|
||||
},
|
||||
updateThought(
|
||||
state,
|
||||
|
||||
@@ -26,4 +26,5 @@ export type Agent = {
|
||||
created_at?: string;
|
||||
updated_at?: string;
|
||||
last_used_at?: string;
|
||||
json_schema?: object;
|
||||
};
|
||||
|
||||
@@ -38,13 +38,25 @@ const endpoints = {
|
||||
UPDATE_TOOL_STATUS: '/api/update_tool_status',
|
||||
UPDATE_TOOL: '/api/update_tool',
|
||||
DELETE_TOOL: '/api/delete_tool',
|
||||
GET_CHUNKS: (docId: string, page: number, per_page: number) =>
|
||||
`/api/get_chunks?id=${docId}&page=${page}&per_page=${per_page}`,
|
||||
SYNC_CONNECTOR: '/api/connectors/sync',
|
||||
GET_CHUNKS: (
|
||||
docId: string,
|
||||
page: number,
|
||||
per_page: number,
|
||||
path?: string,
|
||||
search?: string,
|
||||
) =>
|
||||
`/api/get_chunks?id=${docId}&page=${page}&per_page=${per_page}${
|
||||
path ? `&path=${encodeURIComponent(path)}` : ''
|
||||
}${search ? `&search=${encodeURIComponent(search)}` : ''}`,
|
||||
ADD_CHUNK: '/api/add_chunk',
|
||||
DELETE_CHUNK: (docId: string, chunkId: string) =>
|
||||
`/api/delete_chunk?id=${docId}&chunk_id=${chunkId}`,
|
||||
UPDATE_CHUNK: '/api/update_chunk',
|
||||
STORE_ATTACHMENT: '/api/store_attachment',
|
||||
DIRECTORY_STRUCTURE: (docId: string) =>
|
||||
`/api/directory_structure?id=${docId}`,
|
||||
MANAGE_SOURCE_FILES: '/api/manage_source_files',
|
||||
},
|
||||
CONVERSATION: {
|
||||
ANSWER: '/api/answer',
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import apiClient from '../client';
|
||||
import endpoints from '../endpoints';
|
||||
import { getSessionToken } from '../../utils/providerUtils';
|
||||
|
||||
const userService = {
|
||||
getConfig: (): Promise<any> => apiClient.get(endpoints.USER.CONFIG, null),
|
||||
@@ -86,8 +87,10 @@ const userService = {
|
||||
page: number,
|
||||
perPage: number,
|
||||
token: string | null,
|
||||
path?: string,
|
||||
search?: string,
|
||||
): Promise<any> =>
|
||||
apiClient.get(endpoints.USER.GET_CHUNKS(docId, page, perPage), token),
|
||||
apiClient.get(endpoints.USER.GET_CHUNKS(docId, page, perPage, path, search), token),
|
||||
addChunk: (data: any, token: string | null): Promise<any> =>
|
||||
apiClient.post(endpoints.USER.ADD_CHUNK, data, token),
|
||||
deleteChunk: (
|
||||
@@ -98,6 +101,22 @@ const userService = {
|
||||
apiClient.delete(endpoints.USER.DELETE_CHUNK(docId, chunkId), token),
|
||||
updateChunk: (data: any, token: string | null): Promise<any> =>
|
||||
apiClient.put(endpoints.USER.UPDATE_CHUNK, data, token),
|
||||
getDirectoryStructure: (docId: string, token: string | null): Promise<any> =>
|
||||
apiClient.get(endpoints.USER.DIRECTORY_STRUCTURE(docId), token),
|
||||
manageSourceFiles: (data: FormData, token: string | null): Promise<any> =>
|
||||
apiClient.postFormData(endpoints.USER.MANAGE_SOURCE_FILES, data, token),
|
||||
syncConnector: (docId: string, provider: string, token: string | null): Promise<any> => {
|
||||
const sessionToken = getSessionToken(provider);
|
||||
return apiClient.post(
|
||||
endpoints.USER.SYNC_CONNECTOR,
|
||||
{
|
||||
source_id: docId,
|
||||
session_token: sessionToken,
|
||||
provider: provider
|
||||
},
|
||||
token
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
export default userService;
|
||||
|
||||
4
frontend/src/assets/calendar.svg
Normal file
4
frontend/src/assets/calendar.svg
Normal file
@@ -0,0 +1,4 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M6 7.5C6 7.36739 5.94732 7.24021 5.85355 7.14645C5.75979 7.05268 5.63261 7 5.5 7H4.5C4.36739 7 4.24021 7.05268 4.14645 7.14645C4.05268 7.24021 4 7.36739 4 7.5V8.5C4 8.63261 4.05268 8.75979 4.14645 8.85355C4.24021 8.94732 4.36739 9 4.5 9H5.5C5.63261 9 5.75979 8.94732 5.85355 8.85355C5.94732 8.75979 6 8.63261 6 8.5V7.5ZM6 10.5C6 10.3674 5.94732 10.2402 5.85355 10.1464C5.75979 10.0527 5.63261 10 5.5 10H4.5C4.36739 10 4.24021 10.0527 4.14645 10.1464C4.05268 10.2402 4 10.3674 4 10.5V11.5C4 11.6326 4.05268 11.7598 4.14645 11.8536C4.24021 11.9473 4.36739 12 4.5 12H5.5C5.63261 12 5.75979 11.9473 5.85355 11.8536C5.94732 11.7598 6 11.6326 6 11.5V10.5ZM7.5 7H8.5C8.63261 7 8.75979 7.05268 8.85355 7.14645C8.94732 7.24021 9 7.36739 9 7.5V8.5C9 8.63261 8.94732 8.75979 8.85355 8.85355C8.75979 8.94732 8.63261 9 8.5 9H7.5C7.36739 9 7.24021 8.94732 7.14645 8.85355C7.05268 8.75979 7 8.63261 7 8.5V7.5C7 7.36739 7.05268 7.24021 7.14645 7.14645C7.24021 7.05268 7.36739 7 7.5 7ZM8.5 10H7.5C7.36739 10 7.24021 10.0527 7.14645 10.1464C7.05268 10.2402 7 10.3674 7 10.5V11.5C7 11.6326 7.05268 11.7598 7.14645 11.8536C7.24021 11.9473 7.36739 12 7.5 12H8.5C8.63261 12 8.75979 11.9473 8.85355 11.8536C8.94732 11.7598 9 11.6326 9 11.5V10.5C9 10.3674 8.94732 10.2402 8.85355 10.1464C8.75979 10.0527 8.63261 10 8.5 10ZM10 7.5C10 7.36739 10.0527 7.24021 10.1464 7.14645C10.2402 7.05268 10.3674 7 10.5 7H11.5C11.6326 7 11.7598 7.05268 11.8536 7.14645C11.9473 7.24021 12 7.36739 12 7.5V8.5C12 8.63261 11.9473 8.75979 11.8536 8.85355C11.7598 8.94732 11.6326 9 11.5 9H10.5C10.3674 9 10.2402 8.94732 10.1464 8.85355C10.0527 8.75979 10 8.63261 10 8.5V7.5Z" fill="#848484"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M4.5 0C4.63261 0 4.75979 0.0526784 4.85355 0.146447C4.94732 0.240215 5 0.367392 5 0.5V1H11V0.5C11 0.367392 11.0527 0.240215 11.1464 0.146447C11.2402 0.0526784 11.3674 0 11.5 0C11.6326 0 11.7598 0.0526784 11.8536 0.146447C11.9473 0.240215 12 0.367392 12 0.5V1C13.66 1 15 2.34 15 4V12C15 13.66 13.66 15 12 15H4C2.34 15 1 13.66 1 12V4C1 2.34 2.34 1 4 1V0.5C4 0.367392 4.05268 0.240215 4.14645 0.146447C4.24021 0.0526784 4.36739 0 4.5 0ZM14 4V5H2V4C2 2.9 2.895 2 4 2V2.5C4 2.63261 4.05268 2.75979 4.14645 2.85355C4.24021 2.94732 4.36739 3 4.5 3C4.63261 3 4.75979 2.94732 4.85355 2.85355C4.94732 2.75979 5 2.63261 5 2.5V2H11V2.5C11 2.63261 11.0527 2.75979 11.1464 2.85355C11.2402 2.94732 11.3674 3 11.5 3C11.6326 3 11.7598 2.94732 11.8536 2.85355C11.9473 2.75979 12 2.63261 12 2.5V2C13.1 2 14 2.895 14 4ZM2 12V6H14V12C14 13.1 13.105 14 12 14H4C2.9 14 2 13.105 2 12Z" fill="#848484"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.6 KiB |
3
frontend/src/assets/disc.svg
Normal file
3
frontend/src/assets/disc.svg
Normal file
@@ -0,0 +1,3 @@
|
||||
<svg width="14" height="10" viewBox="0 0 14 10" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M7 0C8.8144 0 10.4902 0.332143 11.739 0.898571C12.362 1.18143 12.9108 1.53643 13.3119 1.96714C13.7172 2.4 14 2.94429 14 3.57143V6.42857C14 7.05571 13.7172 7.59929 13.3119 8.03286C12.9108 8.46357 12.3627 8.81857 11.739 9.10143C10.4902 9.66786 8.8144 10 7 10C5.1856 10 3.5098 9.66786 2.261 9.10143C1.638 8.81857 1.0892 8.46357 0.6881 8.03286C0.2828 7.6 0 7.05571 0 6.42857V3.57143C0 2.94429 0.2828 2.40071 0.6881 1.96714C1.0892 1.53643 1.6373 1.18143 2.261 0.898571C3.5098 0.332143 5.1856 0 7 0ZM12.6 5.77143C12.3375 5.94714 12.047 6.10429 11.739 6.24429C10.4902 6.81071 8.8144 7.14286 7 7.14286C5.1856 7.14286 3.5098 6.81071 2.261 6.24429C1.96243 6.10966 1.67456 5.95157 1.4 5.77143V6.42857C1.4 6.59071 1.47 6.79857 1.7024 7.04857C1.9383 7.30143 2.3128 7.56214 2.8294 7.79643C3.8612 8.26429 5.3354 8.57143 7 8.57143C8.6646 8.57143 10.1388 8.26429 11.1706 7.79643C11.6872 7.56214 12.0617 7.30143 12.2976 7.04857C12.5307 6.79857 12.6 6.59071 12.6 6.42857V5.77143ZM7 1.42857C5.3347 1.42857 3.8612 1.73571 2.8294 2.20357C2.3128 2.43786 1.9383 2.69857 1.7024 2.95143C1.4693 3.20143 1.4 3.40929 1.4 3.57143C1.4 3.73357 1.47 3.94143 1.7024 4.19143C1.9383 4.44429 2.3128 4.705 2.8294 4.93929C3.8612 5.40714 5.3354 5.71429 7 5.71429C8.6646 5.71429 10.1388 5.40714 11.1706 4.93929C11.6872 4.705 12.0617 4.44429 12.2976 4.19143C12.5307 3.94143 12.6 3.73357 12.6 3.57143C12.6 3.40929 12.53 3.20143 12.2976 2.95143C12.0617 2.69857 11.6872 2.43786 11.1706 2.20357C10.1388 1.73643 8.6646 1.42857 7 1.42857Z" fill="#848484"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.6 KiB |
3
frontend/src/assets/file.svg
Normal file
3
frontend/src/assets/file.svg
Normal file
@@ -0,0 +1,3 @@
|
||||
<svg width="13" height="17" viewBox="0 0 13 17" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M0 1.94971C0 0.983707 0.784 0.199707 1.75 0.199707H8.336C8.8 0.199707 9.245 0.383707 9.573 0.712707L12.487 3.62671C12.816 3.95471 13 4.39971 13 4.86371V14.4497C13 14.9138 12.8156 15.359 12.4874 15.6871C12.1592 16.0153 11.7141 16.1997 11.25 16.1997H1.75C1.28587 16.1997 0.840752 16.0153 0.512563 15.6871C0.184375 15.359 0 14.9138 0 14.4497V1.94971ZM1.75 1.69971C1.6837 1.69971 1.62011 1.72605 1.57322 1.77293C1.52634 1.81981 1.5 1.8834 1.5 1.94971V14.4497C1.5 14.5877 1.612 14.6997 1.75 14.6997H11.25C11.3163 14.6997 11.3799 14.6734 11.4268 14.6265C11.4737 14.5796 11.5 14.516 11.5 14.4497V6.19971H8.75C8.28587 6.19971 7.84075 6.01533 7.51256 5.68714C7.18437 5.35896 7 4.91384 7 4.44971V1.69971H1.75ZM8.5 1.76171V4.44971C8.5 4.58771 8.612 4.69971 8.75 4.69971H11.438L11.427 4.68671L8.513 1.77271L8.5 1.76171Z" fill="#59636E"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 938 B |
3
frontend/src/assets/folder.svg
Normal file
3
frontend/src/assets/folder.svg
Normal file
@@ -0,0 +1,3 @@
|
||||
<svg width="16" height="15" viewBox="0 0 16 15" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M1.75 0.599915C1.28587 0.599915 0.840752 0.784289 0.512563 1.11248C0.184374 1.44067 0 1.88579 0 2.34991L0 12.8499C0 13.8159 0.784 14.5999 1.75 14.5999H14.25C14.7141 14.5999 15.1592 14.4155 15.4874 14.0874C15.8156 13.7592 16 13.314 16 12.8499V4.34991C16 3.88579 15.8156 3.44067 15.4874 3.11248C15.1592 2.78429 14.7141 2.59991 14.25 2.59991H7.5C7.46119 2.59991 7.42291 2.59088 7.3882 2.57352C7.35348 2.55616 7.32329 2.53096 7.3 2.49991L6.4 1.29991C6.07 0.859915 5.55 0.599915 5 0.599915H1.75Z" fill="#A382E7"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 621 B |
3
frontend/src/assets/outline-source.svg
Normal file
3
frontend/src/assets/outline-source.svg
Normal file
@@ -0,0 +1,3 @@
|
||||
<svg width="21" height="21" viewBox="0 0 21 21" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M19.25 5.25H12.25L10.5 3.5H5.25C4.2875 3.5 3.50875 4.2875 3.50875 5.25L3.5 15.75C3.5 16.7125 4.2875 17.5 5.25 17.5H19.25C20.2125 17.5 21 16.7125 21 15.75V7C21 6.0375 20.2125 5.25 19.25 5.25ZM19.25 15.75H5.25V5.25H9.77375L11.5238 7H19.25V15.75ZM17.5 10.5H7V8.75H17.5V10.5ZM14 14H7V12.25H14V14Z" fill="#949494"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 423 B |
3
frontend/src/assets/search.svg
Normal file
3
frontend/src/assets/search.svg
Normal file
@@ -0,0 +1,3 @@
|
||||
<svg width="15" height="15" viewBox="0 0 15 15" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M10.4798 10.739C9.27414 11.6748 7.7572 12.116 6.23773 11.9728C4.71826 11.8296 3.31047 11.1127 2.30094 9.96806C1.2914 8.82345 0.756002 7.33714 0.803717 5.81168C0.851432 4.28622 1.47868 2.83628 2.55777 1.75699C3.63706 0.677895 5.087 0.0506505 6.61246 0.00293578C8.13792 -0.044779 9.62423 0.490623 10.7688 1.50016C11.9135 2.50969 12.6303 3.91747 12.7736 5.43694C12.9168 6.95641 12.4756 8.47336 11.5398 9.67899L14.5798 12.719C14.6785 12.8107 14.7507 12.9273 14.7887 13.0565C14.8267 13.1858 14.8291 13.3229 14.7958 13.4534C14.7624 13.5839 14.6944 13.703 14.5991 13.7982C14.5037 13.8933 14.3844 13.961 14.2538 13.994C14.1234 14.0274 13.9864 14.0251 13.8573 13.9872C13.7281 13.9494 13.6115 13.8775 13.5198 13.779L10.4798 10.739ZM11.2998 5.99899C11.3087 5.4026 11.1989 4.81039 10.9768 4.25681C10.7547 3.70323 10.4248 3.19934 10.0062 2.77445C9.58757 2.34955 9.08865 2.01214 8.53844 1.78183C7.98824 1.55152 7.39773 1.43292 6.80127 1.43292C6.20481 1.43292 5.6143 1.55152 5.0641 1.78183C4.5139 2.01214 4.01498 2.34955 3.59637 2.77445C3.17777 3.19934 2.84783 3.70323 2.62575 4.25681C2.40367 4.81039 2.29388 5.4026 2.30277 5.99899C2.32039 7.18045 2.80208 8.30756 3.6438 9.13682C4.48552 9.96608 5.61968 10.4309 6.80127 10.4309C7.98286 10.4309 9.11703 9.96608 9.95874 9.13682C10.8005 8.30756 11.2822 7.18045 11.2998 5.99899Z" fill="#59636E"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.4 KiB |
660
frontend/src/components/Chunks.tsx
Normal file
660
frontend/src/components/Chunks.tsx
Normal file
@@ -0,0 +1,660 @@
|
||||
import React, { useState, useEffect, useRef } from 'react';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { selectToken } from '../preferences/preferenceSlice';
|
||||
import { useDarkTheme, useLoaderState, useMediaQuery, useOutsideAlerter } from '../hooks';
|
||||
import userService from '../api/services/userService';
|
||||
import ArrowLeft from '../assets/arrow-left.svg';
|
||||
import NoFilesIcon from '../assets/no-files.svg';
|
||||
import NoFilesDarkIcon from '../assets/no-files-dark.svg';
|
||||
import OutlineSource from '../assets/outline-source.svg';
|
||||
import SkeletonLoader from './SkeletonLoader';
|
||||
import ConfirmationModal from '../modals/ConfirmationModal';
|
||||
import { ActiveState } from '../models/misc';
|
||||
import { ChunkType } from '../settings/types';
|
||||
import Pagination from './DocumentPagination';
|
||||
import FileIcon from '../assets/file.svg';
|
||||
import FolderIcon from '../assets/folder.svg';
|
||||
import SearchIcon from '../assets/search.svg';
|
||||
interface LineNumberedTextareaProps {
|
||||
value: string;
|
||||
onChange: (value: string) => void;
|
||||
placeholder?: string;
|
||||
ariaLabel?: string;
|
||||
className?: string;
|
||||
editable?: boolean;
|
||||
onDoubleClick?: () => void;
|
||||
}
|
||||
|
||||
const LineNumberedTextarea: React.FC<LineNumberedTextareaProps> = ({
|
||||
value,
|
||||
onChange,
|
||||
placeholder,
|
||||
ariaLabel,
|
||||
className = '',
|
||||
editable = true,
|
||||
onDoubleClick
|
||||
}) => {
|
||||
const { isMobile } = useMediaQuery();
|
||||
|
||||
const handleChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
|
||||
onChange(e.target.value);
|
||||
};
|
||||
|
||||
const lineHeight = 19.93;
|
||||
const contentLines = value.split('\n').length;
|
||||
|
||||
const heightOffset = isMobile ? 200 : 300;
|
||||
const minLinesForDisplay = Math.ceil((typeof window !== 'undefined' ? window.innerHeight - heightOffset : 600) / lineHeight);
|
||||
const totalLines = Math.max(contentLines, minLinesForDisplay);
|
||||
|
||||
return (
|
||||
<div className={`relative w-full ${className}`}>
|
||||
<div
|
||||
className="absolute left-0 top-0 w-8 lg:w-12 text-right text-gray-500 dark:text-gray-400 text-xs lg:text-sm font-mono leading-[19.93px] select-none pr-2 lg:pr-3 pointer-events-none"
|
||||
style={{
|
||||
height: `${totalLines * lineHeight}px`
|
||||
}}
|
||||
>
|
||||
{Array.from({ length: totalLines }, (_, i) => (
|
||||
<div
|
||||
key={i + 1}
|
||||
className="flex items-center justify-end h-[19.93px] leading-[19.93px]"
|
||||
>
|
||||
{i + 1}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
<textarea
|
||||
className={`w-full resize-none bg-transparent dark:text-white font-['Inter'] text-[13.68px] leading-[19.93px] text-[#18181B] outline-none border-none pl-8 lg:pl-12 overflow-hidden ${isMobile ? 'min-h-[calc(100vh-200px)]' : 'min-h-[calc(100vh-300px)]'} ${!editable ? 'select-none' : ''}`}
|
||||
value={value}
|
||||
onChange={editable ? handleChange : undefined}
|
||||
onDoubleClick={onDoubleClick}
|
||||
placeholder={placeholder}
|
||||
aria-label={ariaLabel}
|
||||
rows={totalLines}
|
||||
readOnly={!editable}
|
||||
style={{
|
||||
height: `${totalLines * lineHeight}px`
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
interface SearchResult {
|
||||
path: string;
|
||||
isFile: boolean;
|
||||
}
|
||||
|
||||
interface ChunksProps {
|
||||
documentId: string;
|
||||
documentName?: string;
|
||||
handleGoBack: () => void;
|
||||
path?: string;
|
||||
onFileSearch?: (query: string) => SearchResult[];
|
||||
onFileSelect?: (path: string) => void;
|
||||
}
|
||||
|
||||
const Chunks: React.FC<ChunksProps> = ({
|
||||
documentId,
|
||||
documentName,
|
||||
handleGoBack,
|
||||
path,
|
||||
onFileSearch,
|
||||
onFileSelect,
|
||||
}) => {
|
||||
const [fileSearchQuery, setFileSearchQuery] = useState('');
|
||||
const [fileSearchResults, setFileSearchResults] = useState<SearchResult[]>([]);
|
||||
const searchDropdownRef = useRef<HTMLDivElement>(null);
|
||||
const { t } = useTranslation();
|
||||
const token = useSelector(selectToken);
|
||||
const [isDarkTheme] = useDarkTheme();
|
||||
const [paginatedChunks, setPaginatedChunks] = useState<ChunkType[]>([]);
|
||||
const [page, setPage] = useState(1);
|
||||
const [perPage, setPerPage] = useState(5);
|
||||
const [totalChunks, setTotalChunks] = useState(0);
|
||||
const [loading, setLoading] = useLoaderState(true);
|
||||
const [searchTerm, setSearchTerm] = useState<string>('');
|
||||
const [editingChunk, setEditingChunk] = useState<ChunkType | null>(null);
|
||||
const [editingTitle, setEditingTitle] = useState('');
|
||||
const [editingText, setEditingText] = useState('');
|
||||
const [isAddingChunk, setIsAddingChunk] = useState(false);
|
||||
const [deleteModalState, setDeleteModalState] = useState<ActiveState>('INACTIVE');
|
||||
const [chunkToDelete, setChunkToDelete] = useState<ChunkType | null>(null);
|
||||
const [isEditing, setIsEditing] = useState(false);
|
||||
|
||||
const pathParts = path ? path.split('/') : [];
|
||||
|
||||
const fetchChunks = () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
userService
|
||||
.getDocumentChunks(documentId, page, perPage, token, path, searchTerm)
|
||||
.then((response) => {
|
||||
if (!response.ok) {
|
||||
setLoading(false);
|
||||
setPaginatedChunks([]);
|
||||
throw new Error('Failed to fetch chunks data');
|
||||
}
|
||||
return response.json();
|
||||
})
|
||||
.then((data) => {
|
||||
setPage(data.page);
|
||||
setPerPage(data.per_page);
|
||||
setTotalChunks(data.total);
|
||||
setPaginatedChunks(data.chunks);
|
||||
setLoading(false);
|
||||
})
|
||||
.catch((error) => {
|
||||
setLoading(false);
|
||||
setPaginatedChunks([]);
|
||||
});
|
||||
} catch (e) {
|
||||
setLoading(false);
|
||||
setPaginatedChunks([]);
|
||||
}
|
||||
};
|
||||
|
||||
const handleAddChunk = (title: string, text: string) => {
|
||||
if (!text.trim()) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const metadata = {
|
||||
source: path || documentName,
|
||||
source_id: documentId,
|
||||
title: title,
|
||||
};
|
||||
|
||||
userService
|
||||
.addChunk(
|
||||
{
|
||||
id: documentId,
|
||||
text: text,
|
||||
metadata: metadata,
|
||||
},
|
||||
token,
|
||||
)
|
||||
.then((response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to add chunk');
|
||||
}
|
||||
fetchChunks();
|
||||
});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
};
|
||||
|
||||
const handleUpdateChunk = (title: string, text: string, chunk: ChunkType) => {
|
||||
|
||||
if (!text.trim()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const originalTitle = chunk.metadata?.title || '';
|
||||
const originalText = chunk.text || '';
|
||||
|
||||
if (title === originalTitle && text === originalText) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
userService
|
||||
.updateChunk(
|
||||
{
|
||||
id: documentId,
|
||||
chunk_id: chunk.doc_id,
|
||||
text: text,
|
||||
metadata: {
|
||||
title: title,
|
||||
},
|
||||
},
|
||||
token,
|
||||
)
|
||||
.then((response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to update chunk');
|
||||
}
|
||||
fetchChunks();
|
||||
});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
};
|
||||
|
||||
const handleDeleteChunk = (chunk: ChunkType) => {
|
||||
try {
|
||||
userService
|
||||
.deleteChunk(documentId, chunk.doc_id, token)
|
||||
.then((response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to delete chunk');
|
||||
}
|
||||
setEditingChunk(null);
|
||||
fetchChunks();
|
||||
});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
};
|
||||
|
||||
const confirmDeleteChunk = (chunk: ChunkType) => {
|
||||
setChunkToDelete(chunk);
|
||||
setDeleteModalState('ACTIVE');
|
||||
};
|
||||
|
||||
const handleConfirmedDelete = () => {
|
||||
if (chunkToDelete) {
|
||||
handleDeleteChunk(chunkToDelete);
|
||||
setDeleteModalState('INACTIVE');
|
||||
setChunkToDelete(null);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCancelDelete = () => {
|
||||
setDeleteModalState('INACTIVE');
|
||||
setChunkToDelete(null);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const delayDebounceFn = setTimeout(() => {
|
||||
if (page !== 1) {
|
||||
setPage(1);
|
||||
} else {
|
||||
fetchChunks();
|
||||
}
|
||||
}, 300);
|
||||
|
||||
return () => clearTimeout(delayDebounceFn);
|
||||
}, [searchTerm]);
|
||||
|
||||
useEffect(() => {
|
||||
!loading && fetchChunks();
|
||||
}, [page, perPage, path]);
|
||||
|
||||
useEffect(() => {
|
||||
setSearchTerm('');
|
||||
setPage(1);
|
||||
}, [path]);
|
||||
|
||||
const filteredChunks = paginatedChunks;
|
||||
|
||||
const renderPathNavigation = () => {
|
||||
return (
|
||||
<div className="mb-0 min-h-[38px] flex flex-col sm:flex-row sm:items-center sm:justify-between text-base gap-2">
|
||||
<div className="flex w-full items-center sm:w-auto">
|
||||
<button
|
||||
className="mr-3 flex h-[29px] w-[29px] items-center justify-center rounded-full border p-2 text-sm text-gray-400 dark:border-0 dark:bg-[#28292D] dark:text-gray-500 dark:hover:bg-[#2E2F34] transition-all duration-200 font-medium"
|
||||
onClick={editingChunk ? () => setEditingChunk(null) : isAddingChunk ? () => setIsAddingChunk(false) : handleGoBack}
|
||||
>
|
||||
<img src={ArrowLeft} alt="left-arrow" className="h-3 w-3" />
|
||||
</button>
|
||||
|
||||
<div className="flex items-center flex-wrap">
|
||||
{/* Removed the directory icon */}
|
||||
<span className="text-[#7D54D1] font-semibold break-words">
|
||||
{documentName}
|
||||
</span>
|
||||
|
||||
{pathParts.length > 0 && (
|
||||
<>
|
||||
<span className="mx-1 text-gray-500 flex-shrink-0">/</span>
|
||||
{pathParts.map((part, index) => (
|
||||
<React.Fragment key={index}>
|
||||
<span className={`break-words ${
|
||||
index < pathParts.length - 1
|
||||
? 'text-[#7D54D1] font-medium'
|
||||
: 'text-gray-700 dark:text-gray-300'
|
||||
}`}>
|
||||
{part}
|
||||
</span>
|
||||
{index < pathParts.length - 1 && (
|
||||
<span className="mx-1 text-gray-500 flex-shrink-0">/</span>
|
||||
)}
|
||||
</React.Fragment>
|
||||
))}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-row flex-nowrap items-center gap-2 w-full sm:w-auto justify-end mt-2 sm:mt-0 overflow-x-auto">
|
||||
{editingChunk ? (
|
||||
!isEditing ? (
|
||||
<>
|
||||
<button
|
||||
className="bg-purple-30 hover:bg-violets-are-blue flex h-[38px] min-w-[108px] items-center justify-center rounded-full px-4 text-[14px] whitespace-nowrap text-white font-medium"
|
||||
onClick={() => setIsEditing(true)}
|
||||
>
|
||||
{t('modals.chunk.edit')}
|
||||
</button>
|
||||
<button
|
||||
className="rounded-full border border-solid border-red-500 px-4 py-1 text-[14px] text-nowrap text-red-500 hover:bg-red-500 hover:text-white h-[38px] min-w-[108px] flex items-center justify-center font-medium"
|
||||
onClick={() => {
|
||||
confirmDeleteChunk(editingChunk);
|
||||
}}
|
||||
>
|
||||
{t('modals.chunk.delete')}
|
||||
</button>
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<button
|
||||
onClick={() => {
|
||||
setIsEditing(false);
|
||||
}}
|
||||
className="dark:text-light-gray cursor-pointer rounded-full px-4 py-1 text-sm font-medium hover:bg-gray-100 dark:bg-transparent dark:hover:bg-[#767183]/50 text-nowrap h-[38px] min-w-[108px] flex items-center justify-center"
|
||||
>
|
||||
{t('modals.chunk.cancel')}
|
||||
</button>
|
||||
<button
|
||||
onClick={() => {
|
||||
if (editingText.trim()) {
|
||||
const hasChanges = editingTitle !== (editingChunk?.metadata?.title || '') ||
|
||||
editingText !== (editingChunk?.text || '');
|
||||
|
||||
if (hasChanges) {
|
||||
handleUpdateChunk(editingTitle, editingText, editingChunk);
|
||||
}
|
||||
setIsEditing(false);
|
||||
setEditingChunk(null);
|
||||
}
|
||||
}}
|
||||
disabled={!editingText.trim() || (editingTitle === (editingChunk?.metadata?.title || '') && editingText === (editingChunk?.text || ''))}
|
||||
className={`text-nowrap rounded-full px-4 py-1 text-[14px] text-white transition-all flex items-center justify-center h-[38px] min-w-[108px] font-medium ${
|
||||
editingText.trim() && (editingTitle !== (editingChunk?.metadata?.title || '') || editingText !== (editingChunk?.text || ''))
|
||||
? 'bg-purple-30 hover:bg-violets-are-blue cursor-pointer'
|
||||
: 'bg-gray-400 cursor-not-allowed'
|
||||
}`}
|
||||
>
|
||||
{t('modals.chunk.save')}
|
||||
</button>
|
||||
</>
|
||||
)
|
||||
) : isAddingChunk ? (
|
||||
<>
|
||||
<button
|
||||
onClick={() => setIsAddingChunk(false)}
|
||||
className="dark:text-light-gray cursor-pointer rounded-full px-4 py-1 text-sm font-medium hover:bg-gray-100 dark:bg-transparent dark:hover:bg-[#767183]/50 text-nowrap h-[38px] min-w-[108px] flex items-center justify-center"
|
||||
>
|
||||
{t('modals.chunk.cancel')}
|
||||
</button>
|
||||
<button
|
||||
onClick={() => {
|
||||
if (editingText.trim()) {
|
||||
handleAddChunk(editingTitle, editingText);
|
||||
setIsAddingChunk(false);
|
||||
}
|
||||
}}
|
||||
disabled={!editingText.trim()}
|
||||
className={`text-nowrap rounded-full px-4 py-1 text-[14px] text-white transition-all flex items-center justify-center h-[38px] min-w-[108px] font-medium ${
|
||||
editingText.trim()
|
||||
? 'bg-purple-30 hover:bg-violets-are-blue cursor-pointer'
|
||||
: 'bg-gray-400 cursor-not-allowed'
|
||||
}`}
|
||||
>
|
||||
{t('modals.chunk.add')}
|
||||
</button>
|
||||
</>
|
||||
) : null}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
// File search handling
|
||||
const handleFileSearchChange = (query: string) => {
|
||||
setFileSearchQuery(query);
|
||||
if (query.trim() && onFileSearch) {
|
||||
const results = onFileSearch(query);
|
||||
setFileSearchResults(results);
|
||||
} else {
|
||||
setFileSearchResults([]);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSearchResultClick = (result: SearchResult) => {
|
||||
if (!onFileSelect) return;
|
||||
|
||||
if (result.isFile) {
|
||||
onFileSelect(result.path);
|
||||
} else {
|
||||
// For directories, navigate to the directory and return to file tree
|
||||
onFileSelect(result.path);
|
||||
handleGoBack();
|
||||
}
|
||||
setFileSearchQuery('');
|
||||
setFileSearchResults([]);
|
||||
};
|
||||
|
||||
useOutsideAlerter(
|
||||
searchDropdownRef,
|
||||
() => {
|
||||
setFileSearchQuery('');
|
||||
setFileSearchResults([]);
|
||||
},
|
||||
[], // No additional dependencies
|
||||
false // Don't handle escape key
|
||||
);
|
||||
|
||||
const renderFileSearch = () => {
|
||||
return (
|
||||
<div className="relative" ref={searchDropdownRef}>
|
||||
<div className="relative flex items-center">
|
||||
<div className="absolute left-3 pointer-events-none">
|
||||
<img src={SearchIcon} alt="Search" className="w-4 h-4" />
|
||||
</div>
|
||||
<input
|
||||
type="text"
|
||||
value={fileSearchQuery}
|
||||
onChange={(e) => handleFileSearchChange(e.target.value)}
|
||||
placeholder={t('settings.sources.searchFiles')}
|
||||
className={`w-full h-[38px] border border-[#D1D9E0] pl-10 pr-4 py-2 dark:border-[#6A6A6A]
|
||||
${fileSearchQuery
|
||||
? 'rounded-t-[6px]'
|
||||
: 'rounded-[6px]'
|
||||
}
|
||||
bg-transparent focus:outline-none dark:text-[#E0E0E0] transition-all duration-200`}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{fileSearchQuery && (
|
||||
<div className="absolute z-10 max-h-[calc(100vh-200px)] w-full overflow-hidden rounded-b-[6px] border border-t-0 border-[#D1D9E0] bg-white shadow-lg dark:border-[#6A6A6A] dark:bg-[#1F2023]">
|
||||
<div className="max-h-[calc(100vh-200px)] overflow-y-auto overflow-x-hidden">
|
||||
{fileSearchResults.length === 0 ? (
|
||||
<div className="py-2 text-center text-sm text-gray-500 dark:text-gray-400">
|
||||
{t('settings.sources.noResults')}
|
||||
</div>
|
||||
) : (
|
||||
fileSearchResults.map((result, index) => (
|
||||
<div
|
||||
key={index}
|
||||
title={result.path}
|
||||
onClick={() => handleSearchResultClick(result)}
|
||||
className={`flex cursor-pointer items-center px-3 py-2 hover:bg-[#ECEEEF] dark:hover:bg-[#27282D] ${
|
||||
index !== fileSearchResults.length - 1
|
||||
? 'border-b border-[#D1D9E0] dark:border-[#6A6A6A]'
|
||||
: ''
|
||||
}`}
|
||||
>
|
||||
<img
|
||||
src={result.isFile ? FileIcon : FolderIcon}
|
||||
alt={result.isFile ? 'File' : 'Folder'}
|
||||
className="mr-2 h-4 w-4 flex-shrink-0"
|
||||
/>
|
||||
<span className="text-sm dark:text-[#E0E0E0] truncate">
|
||||
{result.path.split('/').pop() || result.path}
|
||||
</span>
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-col">
|
||||
<div className="mb-2">
|
||||
{renderPathNavigation()}
|
||||
</div>
|
||||
<div className="flex gap-4">
|
||||
{onFileSearch && onFileSelect && (
|
||||
<div className="hidden lg:block w-[198px]">
|
||||
{renderFileSearch()}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Right side: Chunks content */}
|
||||
<div className="flex-1">
|
||||
{!editingChunk && !isAddingChunk ? (
|
||||
<>
|
||||
<div className="mb-3 flex flex-col sm:flex-row items-start sm:items-center justify-between gap-3">
|
||||
<div className="flex-1 w-full flex items-center border border-[#D1D9E0] dark:border-[#6A6A6A] rounded-md overflow-hidden h-[38px]">
|
||||
<div className="px-4 flex items-center text-gray-700 dark:text-[#E0E0E0] font-medium whitespace-nowrap h-full">
|
||||
{totalChunks > 999999
|
||||
? `${(totalChunks / 1000000).toFixed(2)}M`
|
||||
: totalChunks > 999
|
||||
? `${(totalChunks / 1000).toFixed(2)}K`
|
||||
: totalChunks} {t('settings.sources.chunks')}
|
||||
</div>
|
||||
<div className="h-full w-[1px] bg-[#D1D9E0] dark:bg-[#6A6A6A]"></div>
|
||||
<div className="flex-1 h-full">
|
||||
<input
|
||||
type="text"
|
||||
placeholder={t('settings.sources.searchPlaceholder')}
|
||||
value={searchTerm}
|
||||
onChange={(e) => setSearchTerm(e.target.value)}
|
||||
className="w-full h-full px-3 py-2 bg-transparent border-none outline-none font-normal text-[13.56px] leading-[100%] dark:text-[#E0E0E0]"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<button
|
||||
className="bg-purple-30 hover:bg-violets-are-blue flex h-[38px] w-full sm:w-auto min-w-[108px] items-center justify-center rounded-full px-4 text-[14px] whitespace-normal text-white shrink-0 font-medium"
|
||||
title={t('settings.sources.addChunk')}
|
||||
onClick={() => {
|
||||
setIsAddingChunk(true);
|
||||
setEditingTitle('');
|
||||
setEditingText('');
|
||||
}}
|
||||
>
|
||||
{t('settings.sources.addChunk')}
|
||||
</button>
|
||||
</div>
|
||||
{loading ? (
|
||||
<div className="w-full grid grid-cols-1 sm:[grid-template-columns:repeat(auto-fit,minmax(400px,1fr))] gap-4 justify-items-start">
|
||||
<SkeletonLoader component="chunkCards" count={perPage} />
|
||||
</div>
|
||||
) : (
|
||||
<div className="w-full grid grid-cols-1 sm:[grid-template-columns:repeat(auto-fit,minmax(400px,1fr))] gap-4 justify-items-start">
|
||||
{filteredChunks.length === 0 ? (
|
||||
<div className="col-span-full w-full min-h-[50vh] flex flex-col items-center justify-center text-center text-gray-500 dark:text-gray-400">
|
||||
<img
|
||||
src={isDarkTheme ? NoFilesDarkIcon : NoFilesIcon}
|
||||
alt={t('settings.sources.noChunksAlt')}
|
||||
className="mx-auto mb-2 h-24 w-24"
|
||||
/>
|
||||
{t('settings.sources.noChunks')}
|
||||
</div>
|
||||
) : (
|
||||
filteredChunks.map((chunk, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className="transform transition-transform duration-200 hover:scale-105 relative flex h-[197px] flex-col justify-between rounded-[5.86px] border border-[#D1D9E0] dark:border-[#6A6A6A] overflow-hidden cursor-pointer w-full max-w-[487px]"
|
||||
onClick={() => {
|
||||
setEditingChunk(chunk);
|
||||
setEditingTitle(chunk.metadata?.title || '');
|
||||
setEditingText(chunk.text || '');
|
||||
}}
|
||||
>
|
||||
<div className="w-full">
|
||||
<div className="flex w-full items-center justify-between border-b border-[#D1D9E0] bg-[#F6F8FA] dark:bg-[#27282D] dark:border-[#6A6A6A] px-4 py-3">
|
||||
<div className="text-[#59636E] text-sm dark:text-[#E0E0E0]">
|
||||
{chunk.metadata.token_count ? chunk.metadata.token_count.toLocaleString() : '-'} {t('settings.sources.tokensUnit')}
|
||||
</div>
|
||||
</div>
|
||||
<div className="px-4 pt-3 pb-6">
|
||||
<p className="font-['Inter'] text-[13.68px] leading-[19.93px] text-[#18181B] dark:text-[#E0E0E0] line-clamp-6 font-normal">
|
||||
{chunk.text}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
) : isAddingChunk ? (
|
||||
<div className="w-full">
|
||||
<div className="relative border border-[#D1D9E0] dark:border-[#6A6A6A] rounded-lg overflow-hidden">
|
||||
<LineNumberedTextarea
|
||||
value={editingText}
|
||||
onChange={setEditingText}
|
||||
ariaLabel={t('modals.chunk.promptText')}
|
||||
editable={true}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
) : editingChunk && (
|
||||
<div className="w-full">
|
||||
<div className="relative flex flex-col rounded-[5.86px] border border-[#D1D9E0] dark:border-[#6A6A6A] overflow-hidden w-full">
|
||||
<div className="flex w-full items-center justify-between border-b border-[#D1D9E0] bg-[#F6F8FA] dark:bg-[#27282D] dark:border-[#6A6A6A] px-4 py-3">
|
||||
<div className="text-[#59636E] text-sm dark:text-[#E0E0E0]">
|
||||
{editingChunk.metadata.token_count ? editingChunk.metadata.token_count.toLocaleString() : '-'} {t('settings.sources.tokensUnit')}
|
||||
</div>
|
||||
</div>
|
||||
<div className="p-4 overflow-hidden">
|
||||
<LineNumberedTextarea
|
||||
value={isEditing ? editingText : editingChunk.text}
|
||||
onChange={setEditingText}
|
||||
ariaLabel={t('modals.chunk.promptText')}
|
||||
editable={isEditing}
|
||||
onDoubleClick={() => {
|
||||
if (!isEditing) {
|
||||
setIsEditing(true);
|
||||
setEditingTitle(editingChunk.metadata.title || '');
|
||||
setEditingText(editingChunk.text);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{!loading && totalChunks > perPage && !editingChunk && !isAddingChunk && (
|
||||
<Pagination
|
||||
currentPage={page}
|
||||
totalPages={Math.ceil(totalChunks / perPage)}
|
||||
rowsPerPage={perPage}
|
||||
onPageChange={setPage}
|
||||
onRowsPerPageChange={(rows) => {
|
||||
setPerPage(rows);
|
||||
setPage(1);
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Delete Confirmation Modal */}
|
||||
<ConfirmationModal
|
||||
message={t('modals.chunk.deleteConfirmation')}
|
||||
modalState={deleteModalState}
|
||||
setModalState={setDeleteModalState}
|
||||
handleSubmit={handleConfirmedDelete}
|
||||
handleCancel={handleCancelDelete}
|
||||
submitLabel={t('modals.chunk.delete')}
|
||||
variant="danger"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Chunks;
|
||||
112
frontend/src/components/ConnectorAuth.tsx
Normal file
112
frontend/src/components/ConnectorAuth.tsx
Normal file
@@ -0,0 +1,112 @@
|
||||
import React, { useRef } from 'react';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { selectToken } from '../preferences/preferenceSlice';
|
||||
|
||||
interface ConnectorAuthProps {
|
||||
provider: string;
|
||||
onSuccess: (data: { session_token: string; user_email: string }) => void;
|
||||
onError: (error: string) => void;
|
||||
label?: string;
|
||||
}
|
||||
|
||||
const providerLabel = (provider: string) => {
|
||||
const map: Record<string, string> = {
|
||||
google_drive: 'Google Drive',
|
||||
};
|
||||
return map[provider] || provider.replace(/_/g, ' ');
|
||||
};
|
||||
|
||||
const ConnectorAuth: React.FC<ConnectorAuthProps> = ({ provider, onSuccess, onError, label }) => {
|
||||
const token = useSelector(selectToken);
|
||||
const completedRef = useRef(false);
|
||||
const intervalRef = useRef<number | null>(null);
|
||||
|
||||
const cleanup = () => {
|
||||
if (intervalRef.current) {
|
||||
clearInterval(intervalRef.current);
|
||||
intervalRef.current = null;
|
||||
}
|
||||
window.removeEventListener('message', handleAuthMessage as any);
|
||||
};
|
||||
|
||||
const handleAuthMessage = (event: MessageEvent) => {
|
||||
const successGeneric = event.data?.type === 'connector_auth_success';
|
||||
const successProvider = event.data?.type === `${provider}_auth_success` || event.data?.type === 'google_drive_auth_success';
|
||||
const errorProvider = event.data?.type === `${provider}_auth_error` || event.data?.type === 'google_drive_auth_error';
|
||||
|
||||
if (successGeneric || successProvider) {
|
||||
completedRef.current = true;
|
||||
cleanup();
|
||||
onSuccess({
|
||||
session_token: event.data.session_token,
|
||||
user_email: event.data.user_email || 'Connected User',
|
||||
});
|
||||
} else if (errorProvider) {
|
||||
completedRef.current = true;
|
||||
cleanup();
|
||||
onError(event.data.error || 'Authentication failed');
|
||||
}
|
||||
};
|
||||
|
||||
const handleAuth = async () => {
|
||||
try {
|
||||
completedRef.current = false;
|
||||
cleanup();
|
||||
|
||||
const apiHost = import.meta.env.VITE_API_HOST;
|
||||
const authResponse = await fetch(`${apiHost}/api/connectors/auth?provider=${provider}`, {
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
|
||||
if (!authResponse.ok) {
|
||||
throw new Error(`Failed to get authorization URL: ${authResponse.status}`);
|
||||
}
|
||||
|
||||
const authData = await authResponse.json();
|
||||
if (!authData.success || !authData.authorization_url) {
|
||||
throw new Error(authData.error || 'Failed to get authorization URL');
|
||||
}
|
||||
|
||||
const authWindow = window.open(
|
||||
authData.authorization_url,
|
||||
`${provider}-auth`,
|
||||
'width=500,height=600,scrollbars=yes,resizable=yes'
|
||||
);
|
||||
if (!authWindow) {
|
||||
throw new Error('Failed to open authentication window. Please allow popups.');
|
||||
}
|
||||
|
||||
window.addEventListener('message', handleAuthMessage as any);
|
||||
|
||||
const checkClosed = window.setInterval(() => {
|
||||
if (authWindow.closed) {
|
||||
clearInterval(checkClosed);
|
||||
window.removeEventListener('message', handleAuthMessage as any);
|
||||
if (!completedRef.current) {
|
||||
onError('Authentication was cancelled');
|
||||
}
|
||||
}
|
||||
}, 1000);
|
||||
intervalRef.current = checkClosed;
|
||||
} catch (error) {
|
||||
onError(error instanceof Error ? error.message : 'Authentication failed');
|
||||
}
|
||||
};
|
||||
|
||||
const buttonLabel = label || `Connect ${providerLabel(provider)}`;
|
||||
|
||||
return (
|
||||
<button
|
||||
onClick={handleAuth}
|
||||
className="w-full flex items-center justify-center gap-2 rounded-lg bg-blue-500 px-4 py-3 text-white hover:bg-blue-600 transition-colors"
|
||||
>
|
||||
<svg className="h-5 w-5" viewBox="0 0 24 24">
|
||||
<path fill="currentColor" d="M6.28 3l5.72 10H24l-5.72-10H6.28zm11.44 0L12 13l5.72 10H24L18.28 3h-.56zM0 13l5.72 10h5.72L5.72 13H0z"/>
|
||||
</svg>
|
||||
{buttonLabel}
|
||||
</button>
|
||||
);
|
||||
};
|
||||
|
||||
export default ConnectorAuth;
|
||||
|
||||
733
frontend/src/components/ConnectorTreeComponent.tsx
Normal file
733
frontend/src/components/ConnectorTreeComponent.tsx
Normal file
@@ -0,0 +1,733 @@
|
||||
import React, { useState, useRef, useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { formatBytes } from '../utils/stringUtils';
|
||||
import { selectToken } from '../preferences/preferenceSlice';
|
||||
import Chunks from './Chunks';
|
||||
import ContextMenu, { MenuOption } from './ContextMenu';
|
||||
import userService from '../api/services/userService';
|
||||
import FileIcon from '../assets/file.svg';
|
||||
import FolderIcon from '../assets/folder.svg';
|
||||
import ArrowLeft from '../assets/arrow-left.svg';
|
||||
import ThreeDots from '../assets/three-dots.svg';
|
||||
import EyeView from '../assets/eye-view.svg';
|
||||
import SyncIcon from '../assets/sync.svg';
|
||||
import { useOutsideAlerter } from '../hooks';
|
||||
|
||||
interface FileNode {
|
||||
type?: string;
|
||||
token_count?: number;
|
||||
size_bytes?: number;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
interface DirectoryStructure {
|
||||
[key: string]: FileNode;
|
||||
}
|
||||
|
||||
interface ConnectorTreeComponentProps {
|
||||
docId: string;
|
||||
sourceName: string;
|
||||
onBackToDocuments: () => void;
|
||||
}
|
||||
|
||||
interface SearchResult {
|
||||
name: string;
|
||||
path: string;
|
||||
isFile: boolean;
|
||||
}
|
||||
|
||||
const ConnectorTreeComponent: React.FC<ConnectorTreeComponentProps> = ({
|
||||
docId,
|
||||
sourceName,
|
||||
onBackToDocuments,
|
||||
}) => {
|
||||
const { t } = useTranslation();
|
||||
const [loading, setLoading] = useState<boolean>(true);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [directoryStructure, setDirectoryStructure] =
|
||||
useState<DirectoryStructure | null>(null);
|
||||
const [currentPath, setCurrentPath] = useState<string[]>([]);
|
||||
const token = useSelector(selectToken);
|
||||
const [activeMenuId, setActiveMenuId] = useState<string | null>(null);
|
||||
const menuRefs = useRef<{
|
||||
[key: string]: React.RefObject<HTMLDivElement | null>;
|
||||
}>({});
|
||||
const [selectedFile, setSelectedFile] = useState<{
|
||||
id: string;
|
||||
name: string;
|
||||
} | null>(null);
|
||||
const [searchQuery, setSearchQuery] = useState('');
|
||||
const [searchResults, setSearchResults] = useState<SearchResult[]>([]);
|
||||
const searchDropdownRef = useRef<HTMLDivElement>(null);
|
||||
const [isSyncing, setIsSyncing] = useState<boolean>(false);
|
||||
const [syncProgress, setSyncProgress] = useState<number>(0);
|
||||
const [sourceProvider, setSourceProvider] = useState<string>('');
|
||||
const [syncDone, setSyncDone] = useState<boolean>(false);
|
||||
|
||||
useOutsideAlerter(
|
||||
searchDropdownRef,
|
||||
() => {
|
||||
setSearchQuery('');
|
||||
setSearchResults([]);
|
||||
},
|
||||
[],
|
||||
false,
|
||||
);
|
||||
|
||||
const handleFileClick = (fileName: string) => {
|
||||
const fullPath = [...currentPath, fileName].join('/');
|
||||
setSelectedFile({
|
||||
id: fullPath,
|
||||
name: fileName,
|
||||
});
|
||||
};
|
||||
|
||||
const handleSync = async () => {
|
||||
if (isSyncing) return;
|
||||
|
||||
const provider = sourceProvider;
|
||||
|
||||
setIsSyncing(true);
|
||||
setSyncProgress(0);
|
||||
|
||||
try {
|
||||
const response = await userService.syncConnector(docId, provider, token);
|
||||
const data = await response.json();
|
||||
|
||||
if (data.success) {
|
||||
console.log('Sync started successfully:', data.task_id);
|
||||
setSyncProgress(10);
|
||||
|
||||
// Poll task status using userService
|
||||
const maxAttempts = 30;
|
||||
const pollInterval = 2000;
|
||||
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
try {
|
||||
const statusResponse = await userService.getTaskStatus(
|
||||
data.task_id,
|
||||
token,
|
||||
);
|
||||
const statusData = await statusResponse.json();
|
||||
|
||||
console.log(
|
||||
`Task status (attempt ${attempt + 1}):`,
|
||||
statusData.status,
|
||||
);
|
||||
|
||||
if (statusData.status === 'SUCCESS') {
|
||||
setSyncProgress(100);
|
||||
console.log('Sync completed successfully');
|
||||
|
||||
// Refresh directory structure
|
||||
try {
|
||||
const refreshResponse = await userService.getDirectoryStructure(
|
||||
docId,
|
||||
token,
|
||||
);
|
||||
const refreshData = await refreshResponse.json();
|
||||
if (refreshData && refreshData.directory_structure) {
|
||||
setDirectoryStructure(refreshData.directory_structure);
|
||||
setCurrentPath([]);
|
||||
}
|
||||
if (refreshData && refreshData.provider) {
|
||||
setSourceProvider(refreshData.provider);
|
||||
}
|
||||
|
||||
setSyncDone(true);
|
||||
setTimeout(() => setSyncDone(false), 5000);
|
||||
} catch (err) {
|
||||
console.error('Error refreshing directory structure:', err);
|
||||
}
|
||||
break;
|
||||
} else if (statusData.status === 'FAILURE') {
|
||||
console.error('Sync task failed:', statusData.result);
|
||||
break;
|
||||
} else if (statusData.status === 'PROGRESS') {
|
||||
const progress = Number(
|
||||
statusData.result && statusData.result.current != null
|
||||
? statusData.result.current
|
||||
: statusData.meta && statusData.meta.current != null
|
||||
? statusData.meta.current
|
||||
: 0,
|
||||
);
|
||||
setSyncProgress(Math.max(10, progress));
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, pollInterval));
|
||||
} catch (error) {
|
||||
console.error('Error polling task status:', error);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.error('Sync failed:', data.error);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Error syncing connector:', err);
|
||||
} finally {
|
||||
setIsSyncing(false);
|
||||
setSyncProgress(0);
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const fetchDirectoryStructure = async () => {
|
||||
try {
|
||||
setLoading(true);
|
||||
|
||||
const directoryResponse = await userService.getDirectoryStructure(
|
||||
docId,
|
||||
token,
|
||||
);
|
||||
const directoryData = await directoryResponse.json();
|
||||
|
||||
if (directoryData && directoryData.directory_structure) {
|
||||
setDirectoryStructure(directoryData.directory_structure);
|
||||
} else {
|
||||
setError('Invalid response format');
|
||||
}
|
||||
|
||||
if (directoryData && directoryData.provider) {
|
||||
setSourceProvider(directoryData.provider);
|
||||
}
|
||||
} catch (err) {
|
||||
setError('Failed to load source information');
|
||||
console.error(err);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
if (docId) {
|
||||
fetchDirectoryStructure();
|
||||
}
|
||||
}, [docId, token]);
|
||||
|
||||
const navigateToDirectory = (dirName: string) => {
|
||||
setCurrentPath([...currentPath, dirName]);
|
||||
};
|
||||
|
||||
const navigateUp = () => {
|
||||
setCurrentPath(currentPath.slice(0, -1));
|
||||
};
|
||||
|
||||
const getCurrentDirectory = (): DirectoryStructure => {
|
||||
if (!directoryStructure) return {};
|
||||
|
||||
let current = directoryStructure;
|
||||
for (const dir of currentPath) {
|
||||
if (current[dir] && !current[dir].type) {
|
||||
current = current[dir] as DirectoryStructure;
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
return current;
|
||||
};
|
||||
|
||||
|
||||
|
||||
const getMenuRef = (id: string) => {
|
||||
if (!menuRefs.current[id]) {
|
||||
menuRefs.current[id] = React.createRef();
|
||||
}
|
||||
return menuRefs.current[id];
|
||||
};
|
||||
|
||||
const handleMenuClick = (
|
||||
e: React.MouseEvent<HTMLButtonElement>,
|
||||
id: string,
|
||||
) => {
|
||||
e.stopPropagation();
|
||||
setActiveMenuId(activeMenuId === id ? null : id);
|
||||
};
|
||||
|
||||
const getActionOptions = (
|
||||
name: string,
|
||||
isFile: boolean,
|
||||
_itemId: string,
|
||||
): MenuOption[] => {
|
||||
const options: MenuOption[] = [];
|
||||
|
||||
options.push({
|
||||
icon: EyeView,
|
||||
label: t('settings.sources.view'),
|
||||
onClick: (event: React.SyntheticEvent) => {
|
||||
event.stopPropagation();
|
||||
if (isFile) {
|
||||
handleFileClick(name);
|
||||
} else {
|
||||
navigateToDirectory(name);
|
||||
}
|
||||
},
|
||||
iconWidth: 18,
|
||||
iconHeight: 18,
|
||||
variant: 'primary',
|
||||
});
|
||||
|
||||
return options;
|
||||
};
|
||||
|
||||
const calculateDirectoryStats = (
|
||||
structure: DirectoryStructure,
|
||||
): { totalSize: number; totalTokens: number } => {
|
||||
let totalSize = 0;
|
||||
let totalTokens = 0;
|
||||
|
||||
Object.entries(structure).forEach(([_, node]) => {
|
||||
if (node.type) {
|
||||
// It's a file
|
||||
totalSize += node.size_bytes || 0;
|
||||
totalTokens += node.token_count || 0;
|
||||
} else {
|
||||
// It's a directory, recurse
|
||||
const stats = calculateDirectoryStats(node);
|
||||
totalSize += stats.totalSize;
|
||||
totalTokens += stats.totalTokens;
|
||||
}
|
||||
});
|
||||
|
||||
return { totalSize, totalTokens };
|
||||
};
|
||||
|
||||
const handleBackNavigation = () => {
|
||||
if (selectedFile) {
|
||||
setSelectedFile(null);
|
||||
} else if (currentPath.length === 0) {
|
||||
if (onBackToDocuments) {
|
||||
onBackToDocuments();
|
||||
}
|
||||
} else {
|
||||
navigateUp();
|
||||
}
|
||||
};
|
||||
|
||||
const renderPathNavigation = () => {
|
||||
return (
|
||||
<div className="mb-0 flex min-h-[38px] flex-col gap-2 text-base sm:flex-row sm:items-center sm:justify-between">
|
||||
{/* Left side with path navigation */}
|
||||
<div className="flex w-full items-center sm:w-auto">
|
||||
<button
|
||||
className="mr-3 flex h-[29px] w-[29px] items-center justify-center rounded-full border p-2 text-sm font-medium text-gray-400 dark:border-0 dark:bg-[#28292D] dark:text-gray-500 dark:hover:bg-[#2E2F34]"
|
||||
onClick={handleBackNavigation}
|
||||
>
|
||||
<img src={ArrowLeft} alt="left-arrow" className="h-3 w-3" />
|
||||
</button>
|
||||
|
||||
<div className="flex flex-wrap items-center">
|
||||
<span className="font-semibold break-words text-[#7D54D1]">
|
||||
{sourceName}
|
||||
</span>
|
||||
{currentPath.length > 0 && (
|
||||
<>
|
||||
<span className="mx-1 flex-shrink-0 text-gray-500">/</span>
|
||||
{currentPath.map((dir, index) => (
|
||||
<React.Fragment key={index}>
|
||||
<span className="break-words text-gray-700 dark:text-[#E0E0E0]">
|
||||
{dir}
|
||||
</span>
|
||||
{index < currentPath.length - 1 && (
|
||||
<span className="mx-1 flex-shrink-0 text-gray-500">
|
||||
/
|
||||
</span>
|
||||
)}
|
||||
</React.Fragment>
|
||||
))}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="relative mt-2 flex w-full flex-row flex-nowrap items-center justify-end gap-2 sm:mt-0 sm:w-auto">
|
||||
{renderFileSearch()}
|
||||
|
||||
{/* Sync button */}
|
||||
<button
|
||||
onClick={handleSync}
|
||||
disabled={isSyncing}
|
||||
className={`flex h-[38px] min-w-[108px] items-center justify-center rounded-full px-4 text-[14px] font-medium whitespace-nowrap transition-colors ${
|
||||
isSyncing
|
||||
? 'cursor-not-allowed bg-gray-300 text-gray-600 dark:bg-gray-600 dark:text-gray-400'
|
||||
: 'bg-purple-30 hover:bg-violets-are-blue text-white'
|
||||
}`}
|
||||
title={
|
||||
isSyncing
|
||||
? `${t('settings.sources.syncing')} ${syncProgress}%`
|
||||
: syncDone
|
||||
? 'Done'
|
||||
: t('settings.sources.sync')
|
||||
}
|
||||
>
|
||||
<img
|
||||
src={SyncIcon}
|
||||
alt={t('settings.sources.sync')}
|
||||
className={`mr-2 h-4 w-4 brightness-0 invert filter ${isSyncing ? 'animate-spin' : ''}`}
|
||||
/>
|
||||
{isSyncing
|
||||
? `${syncProgress}%`
|
||||
: syncDone
|
||||
? 'Done'
|
||||
: t('settings.sources.sync')}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const renderFileTree = (directory: DirectoryStructure) => {
|
||||
if (!directory) return [];
|
||||
|
||||
// Create parent directory row
|
||||
const parentRow =
|
||||
currentPath.length > 0
|
||||
? [
|
||||
<tr
|
||||
key="parent-dir"
|
||||
className="cursor-pointer border-b border-[#D1D9E0] hover:bg-[#ECEEEF] dark:border-[#6A6A6A] dark:hover:bg-[#27282D]"
|
||||
onClick={navigateUp}
|
||||
>
|
||||
<td className="px-2 py-2 lg:px-4">
|
||||
<div className="flex items-center">
|
||||
<img
|
||||
src={FolderIcon}
|
||||
alt={t('settings.sources.parentFolderAlt')}
|
||||
className="mr-2 h-4 w-4 flex-shrink-0"
|
||||
/>
|
||||
<span className="truncate text-sm dark:text-[#E0E0E0]">
|
||||
..
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
-
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
-
|
||||
</td>
|
||||
<td className="w-10 px-2 py-2 text-sm lg:px-4"></td>
|
||||
</tr>,
|
||||
]
|
||||
: [];
|
||||
|
||||
// Sort entries: directories first, then files, both alphabetically
|
||||
const sortedEntries = Object.entries(directory).sort(
|
||||
([nameA, nodeA], [nameB, nodeB]) => {
|
||||
const isFileA = !!nodeA.type;
|
||||
const isFileB = !!nodeB.type;
|
||||
|
||||
if (isFileA !== isFileB) {
|
||||
return isFileA ? 1 : -1; // Directories first
|
||||
}
|
||||
|
||||
return nameA.localeCompare(nameB); // Alphabetical within each group
|
||||
},
|
||||
);
|
||||
|
||||
// Process directories
|
||||
const directoryRows = sortedEntries
|
||||
.filter(([_, node]) => !node.type)
|
||||
.map(([name, node]) => {
|
||||
const itemId = `dir-${name}`;
|
||||
const menuRef = getMenuRef(itemId);
|
||||
|
||||
// Calculate directory stats
|
||||
const dirStats = calculateDirectoryStats(node as DirectoryStructure);
|
||||
|
||||
return (
|
||||
<tr
|
||||
key={itemId}
|
||||
className="cursor-pointer border-b border-[#D1D9E0] hover:bg-[#ECEEEF] dark:border-[#6A6A6A] dark:hover:bg-[#27282D]"
|
||||
onClick={() => navigateToDirectory(name)}
|
||||
>
|
||||
<td className="px-2 py-2 lg:px-4">
|
||||
<div className="flex min-w-0 items-center">
|
||||
<img
|
||||
src={FolderIcon}
|
||||
alt={t('settings.sources.folderAlt')}
|
||||
className="mr-2 h-4 w-4 flex-shrink-0"
|
||||
/>
|
||||
<span className="truncate text-sm dark:text-[#E0E0E0]">
|
||||
{name}
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
{dirStats.totalTokens > 0
|
||||
? dirStats.totalTokens.toLocaleString()
|
||||
: '-'}
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
{dirStats.totalSize > 0 ? formatBytes(dirStats.totalSize) : '-'}
|
||||
</td>
|
||||
<td className="w-10 px-2 py-2 text-sm lg:px-4">
|
||||
<div ref={menuRef} className="relative">
|
||||
<button
|
||||
onClick={(e) => handleMenuClick(e, itemId)}
|
||||
className="inline-flex h-[35px] w-[24px] shrink-0 items-center justify-center rounded-md font-medium transition-colors hover:bg-[#EBEBEB] dark:hover:bg-[#26272E]"
|
||||
aria-label={t('settings.sources.menuAlt')}
|
||||
>
|
||||
<img
|
||||
src={ThreeDots}
|
||||
alt={t('settings.sources.menuAlt')}
|
||||
className="opacity-60 hover:opacity-100"
|
||||
/>
|
||||
</button>
|
||||
<ContextMenu
|
||||
isOpen={activeMenuId === itemId}
|
||||
setIsOpen={(isOpen) =>
|
||||
setActiveMenuId(isOpen ? itemId : null)
|
||||
}
|
||||
options={getActionOptions(name, false, itemId)}
|
||||
anchorRef={menuRef}
|
||||
position="bottom-left"
|
||||
offset={{ x: -4, y: 4 }}
|
||||
/>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
);
|
||||
});
|
||||
|
||||
// Process files
|
||||
const fileRows = sortedEntries
|
||||
.filter(([_, node]) => !!node.type)
|
||||
.map(([name, node]) => {
|
||||
const itemId = `file-${name}`;
|
||||
const menuRef = getMenuRef(itemId);
|
||||
|
||||
return (
|
||||
<tr
|
||||
key={itemId}
|
||||
className="cursor-pointer border-b border-[#D1D9E0] hover:bg-[#ECEEEF] dark:border-[#6A6A6A] dark:hover:bg-[#27282D]"
|
||||
onClick={() => handleFileClick(name)}
|
||||
>
|
||||
<td className="px-2 py-2 lg:px-4">
|
||||
<div className="flex min-w-0 items-center">
|
||||
<img
|
||||
src={FileIcon}
|
||||
alt={t('settings.sources.fileAlt')}
|
||||
className="mr-2 h-4 w-4 flex-shrink-0"
|
||||
/>
|
||||
<span className="truncate text-sm dark:text-[#E0E0E0]">
|
||||
{name}
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
{node.token_count?.toLocaleString() || '-'}
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm md:px-4 dark:text-[#E0E0E0]">
|
||||
{node.size_bytes ? formatBytes(node.size_bytes) : '-'}
|
||||
</td>
|
||||
<td className="w-10 px-2 py-2 text-sm lg:px-4">
|
||||
<div ref={menuRef} className="relative">
|
||||
<button
|
||||
onClick={(e) => handleMenuClick(e, itemId)}
|
||||
className="inline-flex h-[35px] w-[24px] shrink-0 items-center justify-center rounded-md font-medium transition-colors hover:bg-[#EBEBEB] dark:hover:bg-[#26272E]"
|
||||
aria-label={t('settings.sources.menuAlt')}
|
||||
>
|
||||
<img
|
||||
src={ThreeDots}
|
||||
alt={t('settings.sources.menuAlt')}
|
||||
className="opacity-60 hover:opacity-100"
|
||||
/>
|
||||
</button>
|
||||
<ContextMenu
|
||||
isOpen={activeMenuId === itemId}
|
||||
setIsOpen={(isOpen) =>
|
||||
setActiveMenuId(isOpen ? itemId : null)
|
||||
}
|
||||
options={getActionOptions(name, true, itemId)}
|
||||
anchorRef={menuRef}
|
||||
position="bottom-left"
|
||||
offset={{ x: -4, y: 4 }}
|
||||
/>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
);
|
||||
});
|
||||
|
||||
return [...parentRow, ...directoryRows, ...fileRows];
|
||||
};
|
||||
|
||||
const searchFiles = (
|
||||
query: string,
|
||||
structure: DirectoryStructure,
|
||||
currentPath: string[] = [],
|
||||
): SearchResult[] => {
|
||||
let results: SearchResult[] = [];
|
||||
|
||||
Object.entries(structure).forEach(([name, node]) => {
|
||||
const fullPath = [...currentPath, name].join('/');
|
||||
|
||||
if (name.toLowerCase().includes(query.toLowerCase())) {
|
||||
results.push({
|
||||
name,
|
||||
path: fullPath,
|
||||
isFile: !!node.type,
|
||||
});
|
||||
}
|
||||
|
||||
if (!node.type) {
|
||||
// If it's a directory, search recursively
|
||||
results = [
|
||||
...results,
|
||||
...searchFiles(query, node as DirectoryStructure, [
|
||||
...currentPath,
|
||||
name,
|
||||
]),
|
||||
];
|
||||
}
|
||||
});
|
||||
|
||||
return results;
|
||||
};
|
||||
|
||||
const handleSearchSelect = (result: SearchResult) => {
|
||||
if (result.isFile) {
|
||||
const pathParts = result.path.split('/');
|
||||
const fileName = pathParts.pop() || '';
|
||||
setCurrentPath(pathParts);
|
||||
|
||||
setSelectedFile({
|
||||
id: result.path,
|
||||
name: fileName,
|
||||
});
|
||||
} else {
|
||||
setCurrentPath(result.path.split('/'));
|
||||
setSelectedFile(null);
|
||||
}
|
||||
setSearchQuery('');
|
||||
setSearchResults([]);
|
||||
};
|
||||
|
||||
const renderFileSearch = () => {
|
||||
return (
|
||||
<div className="relative w-52" ref={searchDropdownRef}>
|
||||
<input
|
||||
type="text"
|
||||
value={searchQuery}
|
||||
onChange={(e) => {
|
||||
setSearchQuery(e.target.value);
|
||||
if (directoryStructure) {
|
||||
setSearchResults(searchFiles(e.target.value, directoryStructure));
|
||||
}
|
||||
}}
|
||||
placeholder={t('settings.sources.searchFiles')}
|
||||
className={`h-[38px] w-full border border-[#D1D9E0] px-4 py-2 dark:border-[#6A6A6A] ${searchQuery ? 'rounded-t-[24px]' : 'rounded-[24px]'} bg-transparent focus:outline-none dark:text-[#E0E0E0]`}
|
||||
/>
|
||||
|
||||
{searchQuery && (
|
||||
<div className="absolute top-full right-0 left-0 z-10 max-h-[calc(100vh-200px)] w-full overflow-hidden rounded-b-[12px] border border-t-0 border-[#D1D9E0] bg-white shadow-lg transition-all duration-200 dark:border-[#6A6A6A] dark:bg-[#1F2023]">
|
||||
<div className="max-h-[calc(100vh-200px)] overflow-x-hidden overflow-y-auto overscroll-contain">
|
||||
{searchResults.length === 0 ? (
|
||||
<div className="py-2 text-center text-sm text-gray-500 dark:text-gray-400">
|
||||
{t('settings.sources.noResults')}
|
||||
</div>
|
||||
) : (
|
||||
searchResults.map((result, index) => (
|
||||
<div
|
||||
key={index}
|
||||
onClick={() => handleSearchSelect(result)}
|
||||
title={result.path}
|
||||
className={`flex min-w-0 cursor-pointer items-center px-3 py-2 hover:bg-[#ECEEEF] dark:hover:bg-[#27282D] ${
|
||||
index !== searchResults.length - 1
|
||||
? 'border-b border-[#D1D9E0] dark:border-[#6A6A6A]'
|
||||
: ''
|
||||
}`}
|
||||
>
|
||||
<img
|
||||
src={result.isFile ? FileIcon : FolderIcon}
|
||||
alt={
|
||||
result.isFile
|
||||
? t('settings.sources.fileAlt')
|
||||
: t('settings.sources.folderAlt')
|
||||
}
|
||||
className="mr-2 h-4 w-4 flex-shrink-0"
|
||||
/>
|
||||
<span className="flex-1 truncate text-sm dark:text-[#E0E0E0]">
|
||||
{result.path.split('/').pop() || result.path}
|
||||
</span>
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const handleFileSearch = (searchQuery: string) => {
|
||||
if (directoryStructure) {
|
||||
return searchFiles(searchQuery, directoryStructure);
|
||||
}
|
||||
return [];
|
||||
};
|
||||
|
||||
const handleFileSelect = (path: string) => {
|
||||
const pathParts = path.split('/');
|
||||
const fileName = pathParts.pop() || '';
|
||||
setCurrentPath(pathParts);
|
||||
setSelectedFile({
|
||||
id: path,
|
||||
name: fileName,
|
||||
});
|
||||
};
|
||||
|
||||
const currentDirectory = getCurrentDirectory();
|
||||
|
||||
const navigateToPath = (index: number) => {
|
||||
setCurrentPath(currentPath.slice(0, index + 1));
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
{selectedFile ? (
|
||||
<div className="flex">
|
||||
<div className="flex-1">
|
||||
<Chunks
|
||||
documentId={docId}
|
||||
documentName={sourceName}
|
||||
handleGoBack={() => setSelectedFile(null)}
|
||||
path={selectedFile.id}
|
||||
onFileSearch={handleFileSearch}
|
||||
onFileSelect={handleFileSelect}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex w-full max-w-full flex-col overflow-hidden">
|
||||
<div className="mb-2">{renderPathNavigation()}</div>
|
||||
|
||||
<div className="w-full">
|
||||
<div className="overflow-x-auto rounded-[6px] border border-[#D1D9E0] dark:border-[#6A6A6A]">
|
||||
<table className="w-full min-w-[600px] table-auto bg-transparent">
|
||||
<thead className="bg-gray-100 dark:bg-[#27282D]">
|
||||
<tr className="border-b border-[#D1D9E0] dark:border-[#6A6A6A]">
|
||||
<th className="min-w-[200px] px-2 py-3 text-left text-sm font-medium text-gray-700 lg:px-4 dark:text-[#59636E]">
|
||||
{t('settings.sources.fileName')}
|
||||
</th>
|
||||
<th className="min-w-[80px] px-2 py-3 text-left text-sm font-medium text-gray-700 lg:px-4 dark:text-[#59636E]">
|
||||
{t('settings.sources.tokens')}
|
||||
</th>
|
||||
<th className="min-w-[80px] px-2 py-3 text-left text-sm font-medium text-gray-700 lg:px-4 dark:text-[#59636E]">
|
||||
{t('settings.sources.size')}
|
||||
</th>
|
||||
<th className="w-10 px-2 py-3 text-left text-sm font-medium text-gray-700 lg:px-4 dark:text-[#59636E]"></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>{renderFileTree(getCurrentDirectory())}</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default ConnectorTreeComponent;
|
||||
@@ -82,14 +82,14 @@ export default function ContextMenu({
|
||||
// Adjust position based on specified position
|
||||
switch (position) {
|
||||
case 'bottom-left':
|
||||
left = rect.left + scrollX - offset.x;
|
||||
left = rect.right + scrollX - menuWidth + offset.x;
|
||||
break;
|
||||
case 'top-right':
|
||||
top = rect.top + scrollY - offset.y - menuHeight;
|
||||
break;
|
||||
case 'top-left':
|
||||
top = rect.top + scrollY - offset.y - menuHeight;
|
||||
left = rect.left + scrollX - offset.x;
|
||||
left = rect.right + scrollX - menuWidth + offset.x;
|
||||
break;
|
||||
// bottom-right is default
|
||||
}
|
||||
|
||||
@@ -3,8 +3,9 @@ import React from 'react';
|
||||
import Arrow2 from '../assets/dropdown-arrow.svg';
|
||||
import Edit from '../assets/edit.svg';
|
||||
import Trash from '../assets/trash.svg';
|
||||
import { DropdownOption, DropdownProps } from './types/Dropdown.types';
|
||||
|
||||
function Dropdown({
|
||||
function Dropdown<T extends DropdownOption>({
|
||||
options,
|
||||
selectedValue,
|
||||
onSelect,
|
||||
@@ -20,36 +21,7 @@ function Dropdown({
|
||||
placeholder,
|
||||
placeholderClassName = 'text-gray-500 dark:text-gray-400',
|
||||
contentSize = 'text-base',
|
||||
}: {
|
||||
options:
|
||||
| string[]
|
||||
| { name: string; id: string; type: string }[]
|
||||
| { label: string; value: string }[]
|
||||
| { value: number; description: string }[];
|
||||
selectedValue:
|
||||
| string
|
||||
| { label: string; value: string }
|
||||
| { value: number; description: string }
|
||||
| { name: string; id: string; type: string }
|
||||
| null;
|
||||
onSelect:
|
||||
| ((value: string) => void)
|
||||
| ((value: { name: string; id: string; type: string }) => void)
|
||||
| ((value: { label: string; value: string }) => void)
|
||||
| ((value: { value: number; description: string }) => void);
|
||||
size?: string;
|
||||
rounded?: 'xl' | '3xl';
|
||||
buttonClassName?: string;
|
||||
optionsClassName?: string;
|
||||
border?: 'border' | 'border-2';
|
||||
showEdit?: boolean;
|
||||
onEdit?: (value: { name: string; id: string; type: string }) => void;
|
||||
showDelete?: boolean | ((option: any) => boolean);
|
||||
onDelete?: (value: string) => void;
|
||||
placeholder?: string;
|
||||
placeholderClassName?: string;
|
||||
contentSize?: string;
|
||||
}) {
|
||||
}: DropdownProps<T>) {
|
||||
const dropdownRef = React.useRef<HTMLDivElement>(null);
|
||||
const [isOpen, setIsOpen] = React.useState(false);
|
||||
const borderRadius = rounded === 'xl' ? 'rounded-xl' : 'rounded-3xl';
|
||||
|
||||
882
frontend/src/components/FileTreeComponent.tsx
Normal file
882
frontend/src/components/FileTreeComponent.tsx
Normal file
@@ -0,0 +1,882 @@
|
||||
import React, { useState, useRef, useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useSelector } from 'react-redux';
|
||||
import { selectToken } from '../preferences/preferenceSlice';
|
||||
import { formatBytes } from '../utils/stringUtils';
|
||||
import Chunks from './Chunks';
|
||||
import ContextMenu, { MenuOption } from './ContextMenu';
|
||||
import userService from '../api/services/userService';
|
||||
import FileIcon from '../assets/file.svg';
|
||||
import FolderIcon from '../assets/folder.svg';
|
||||
import ArrowLeft from '../assets/arrow-left.svg';
|
||||
import ThreeDots from '../assets/three-dots.svg';
|
||||
import EyeView from '../assets/eye-view.svg';
|
||||
import OutlineSource from '../assets/outline-source.svg';
|
||||
import Trash from '../assets/red-trash.svg';
|
||||
import SearchIcon from '../assets/search.svg';
|
||||
import { useOutsideAlerter } from '../hooks';
|
||||
import ConfirmationModal from '../modals/ConfirmationModal';
|
||||
|
||||
interface FileNode {
|
||||
type?: string;
|
||||
token_count?: number;
|
||||
size_bytes?: number;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
interface DirectoryStructure {
|
||||
[key: string]: FileNode;
|
||||
}
|
||||
|
||||
interface FileTreeComponentProps {
|
||||
docId: string;
|
||||
sourceName: string;
|
||||
onBackToDocuments: () => void;
|
||||
}
|
||||
|
||||
interface SearchResult {
|
||||
name: string;
|
||||
path: string;
|
||||
isFile: boolean;
|
||||
}
|
||||
|
||||
const FileTreeComponent: React.FC<FileTreeComponentProps> = ({
|
||||
docId,
|
||||
sourceName,
|
||||
onBackToDocuments,
|
||||
}) => {
|
||||
const { t } = useTranslation();
|
||||
const [loading, setLoading] = useState<boolean>(true);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [directoryStructure, setDirectoryStructure] =
|
||||
useState<DirectoryStructure | null>(null);
|
||||
const [currentPath, setCurrentPath] = useState<string[]>([]);
|
||||
const token = useSelector(selectToken);
|
||||
const [activeMenuId, setActiveMenuId] = useState<string | null>(null);
|
||||
const menuRefs = useRef<{
|
||||
[key: string]: React.RefObject<HTMLDivElement | null>;
|
||||
}>({});
|
||||
const [selectedFile, setSelectedFile] = useState<{
|
||||
id: string;
|
||||
name: string;
|
||||
} | null>(null);
|
||||
const [searchQuery, setSearchQuery] = useState('');
|
||||
const [searchResults, setSearchResults] = useState<SearchResult[]>([]);
|
||||
const searchDropdownRef = useRef<HTMLDivElement>(null);
|
||||
const currentOpRef = useRef<null | 'add' | 'remove' | 'remove_directory'>(
|
||||
null,
|
||||
);
|
||||
|
||||
const [deleteModalState, setDeleteModalState] = useState<
|
||||
'ACTIVE' | 'INACTIVE'
|
||||
>('INACTIVE');
|
||||
const [itemToDelete, setItemToDelete] = useState<{
|
||||
name: string;
|
||||
isFile: boolean;
|
||||
} | null>(null);
|
||||
|
||||
type QueuedOperation = {
|
||||
operation: 'add' | 'remove' | 'remove_directory';
|
||||
files?: File[];
|
||||
filePath?: string;
|
||||
directoryPath?: string;
|
||||
parentDirPath?: string;
|
||||
};
|
||||
const opQueueRef = useRef<QueuedOperation[]>([]);
|
||||
const processingRef = useRef(false);
|
||||
const [queueLength, setQueueLength] = useState(0);
|
||||
|
||||
useOutsideAlerter(
|
||||
searchDropdownRef,
|
||||
() => {
|
||||
setSearchQuery('');
|
||||
setSearchResults([]);
|
||||
},
|
||||
[],
|
||||
false,
|
||||
);
|
||||
|
||||
const handleFileClick = (fileName: string) => {
|
||||
const fullPath = [...currentPath, fileName].join('/');
|
||||
setSelectedFile({
|
||||
id: fullPath,
|
||||
name: fileName,
|
||||
});
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const fetchDirectoryStructure = async () => {
|
||||
try {
|
||||
setLoading(true);
|
||||
const response = await userService.getDirectoryStructure(docId, token);
|
||||
const data = await response.json();
|
||||
|
||||
if (data && data.directory_structure) {
|
||||
setDirectoryStructure(data.directory_structure);
|
||||
} else {
|
||||
setError('Invalid response format');
|
||||
}
|
||||
} catch (err) {
|
||||
setError('Failed to load directory structure');
|
||||
console.error(err);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
if (docId) {
|
||||
fetchDirectoryStructure();
|
||||
}
|
||||
}, [docId, token]);
|
||||
|
||||
|
||||
|
||||
const navigateToDirectory = (dirName: string) => {
|
||||
setCurrentPath((prev) => [...prev, dirName]);
|
||||
};
|
||||
|
||||
const navigateUp = () => {
|
||||
setCurrentPath((prev) => prev.slice(0, -1));
|
||||
};
|
||||
|
||||
const getCurrentDirectory = (): DirectoryStructure => {
|
||||
if (!directoryStructure) return {};
|
||||
|
||||
let structure = directoryStructure;
|
||||
if (typeof structure === 'string') {
|
||||
try {
|
||||
structure = JSON.parse(structure);
|
||||
} catch (e) {
|
||||
console.error(
|
||||
'Error parsing directory structure in getCurrentDirectory:',
|
||||
e,
|
||||
);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof structure !== 'object' || structure === null) {
|
||||
return {};
|
||||
}
|
||||
|
||||
let current: any = structure;
|
||||
for (const dir of currentPath) {
|
||||
if (
|
||||
current[dir] &&
|
||||
typeof current[dir] === 'object' &&
|
||||
!current[dir].type
|
||||
) {
|
||||
current = current[dir];
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
return current;
|
||||
};
|
||||
|
||||
const handleBackNavigation = () => {
|
||||
if (selectedFile) {
|
||||
setSelectedFile(null);
|
||||
} else if (currentPath.length === 0) {
|
||||
if (onBackToDocuments) {
|
||||
onBackToDocuments();
|
||||
}
|
||||
} else {
|
||||
navigateUp();
|
||||
}
|
||||
};
|
||||
|
||||
const getMenuRef = (itemId: string) => {
|
||||
if (!menuRefs.current[itemId]) {
|
||||
menuRefs.current[itemId] = React.createRef<HTMLDivElement>();
|
||||
}
|
||||
return menuRefs.current[itemId];
|
||||
};
|
||||
|
||||
const handleMenuClick = (e: React.MouseEvent, itemId: string) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
|
||||
if (activeMenuId === itemId) {
|
||||
setActiveMenuId(null);
|
||||
return;
|
||||
}
|
||||
setActiveMenuId(itemId);
|
||||
};
|
||||
|
||||
const getActionOptions = (
|
||||
name: string,
|
||||
isFile: boolean,
|
||||
_itemId: string,
|
||||
): MenuOption[] => {
|
||||
const options: MenuOption[] = [];
|
||||
|
||||
options.push({
|
||||
icon: EyeView,
|
||||
label: t('settings.sources.view'),
|
||||
onClick: (event: React.SyntheticEvent) => {
|
||||
event.stopPropagation();
|
||||
if (isFile) {
|
||||
handleFileClick(name);
|
||||
} else {
|
||||
navigateToDirectory(name);
|
||||
}
|
||||
},
|
||||
iconWidth: 18,
|
||||
iconHeight: 18,
|
||||
variant: 'primary',
|
||||
});
|
||||
|
||||
options.push({
|
||||
icon: Trash,
|
||||
label: t('convTile.delete'),
|
||||
onClick: (event: React.SyntheticEvent) => {
|
||||
event.stopPropagation();
|
||||
confirmDeleteItem(name, isFile);
|
||||
},
|
||||
iconWidth: 18,
|
||||
iconHeight: 18,
|
||||
variant: 'danger',
|
||||
});
|
||||
|
||||
return options;
|
||||
};
|
||||
|
||||
const confirmDeleteItem = (name: string, isFile: boolean) => {
|
||||
setItemToDelete({ name, isFile });
|
||||
setDeleteModalState('ACTIVE');
|
||||
setActiveMenuId(null);
|
||||
};
|
||||
|
||||
const handleConfirmedDelete = async () => {
|
||||
if (itemToDelete) {
|
||||
await handleDeleteFile(itemToDelete.name, itemToDelete.isFile);
|
||||
setDeleteModalState('INACTIVE');
|
||||
setItemToDelete(null);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCancelDelete = () => {
|
||||
setDeleteModalState('INACTIVE');
|
||||
setItemToDelete(null);
|
||||
};
|
||||
|
||||
const manageSource = async (
|
||||
operation: 'add' | 'remove' | 'remove_directory',
|
||||
files?: File[] | null,
|
||||
filePath?: string,
|
||||
directoryPath?: string,
|
||||
parentDirPath?: string,
|
||||
) => {
|
||||
currentOpRef.current = operation;
|
||||
|
||||
try {
|
||||
const formData = new FormData();
|
||||
formData.append('source_id', docId);
|
||||
formData.append('operation', operation);
|
||||
|
||||
if (operation === 'add' && files && files.length) {
|
||||
formData.append('parent_dir', parentDirPath ?? currentPath.join('/'));
|
||||
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
formData.append('file', files[i]);
|
||||
}
|
||||
} else if (operation === 'remove' && filePath) {
|
||||
const filePaths = JSON.stringify([filePath]);
|
||||
formData.append('file_paths', filePaths);
|
||||
} else if (operation === 'remove_directory' && directoryPath) {
|
||||
formData.append('directory_path', directoryPath);
|
||||
}
|
||||
|
||||
const response = await userService.manageSourceFiles(formData, token);
|
||||
const result = await response.json();
|
||||
|
||||
if (result.success && result.reingest_task_id) {
|
||||
if (operation === 'add') {
|
||||
console.log('Files uploaded successfully:', result.added_files);
|
||||
} else if (operation === 'remove') {
|
||||
console.log('Files deleted successfully:', result.removed_files);
|
||||
} else if (operation === 'remove_directory') {
|
||||
console.log(
|
||||
'Directory deleted successfully:',
|
||||
result.removed_directory,
|
||||
);
|
||||
}
|
||||
console.log('Reingest task started:', result.reingest_task_id);
|
||||
|
||||
const maxAttempts = 30;
|
||||
const pollInterval = 2000;
|
||||
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
try {
|
||||
const statusResponse = await userService.getTaskStatus(
|
||||
result.reingest_task_id,
|
||||
token,
|
||||
);
|
||||
const statusData = await statusResponse.json();
|
||||
|
||||
console.log(
|
||||
`Task status (attempt ${attempt + 1}):`,
|
||||
statusData.status,
|
||||
);
|
||||
|
||||
if (statusData.status === 'SUCCESS') {
|
||||
console.log('Task completed successfully');
|
||||
|
||||
const structureResponse = await userService.getDirectoryStructure(
|
||||
docId,
|
||||
token,
|
||||
);
|
||||
const structureData = await structureResponse.json();
|
||||
|
||||
if (structureData && structureData.directory_structure) {
|
||||
setDirectoryStructure(structureData.directory_structure);
|
||||
currentOpRef.current = null;
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
} else if (statusData.status === 'FAILURE') {
|
||||
console.error('Task failed');
|
||||
break;
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, pollInterval));
|
||||
} catch (error) {
|
||||
console.error('Error polling task status:', error);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new Error(
|
||||
`Failed to ${operation} ${operation === 'remove_directory' ? 'directory' : 'file(s)'}`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
const actionText =
|
||||
operation === 'add'
|
||||
? 'uploading'
|
||||
: operation === 'remove_directory'
|
||||
? 'deleting directory'
|
||||
: 'deleting file(s)';
|
||||
const errorText =
|
||||
operation === 'add'
|
||||
? 'upload'
|
||||
: operation === 'remove_directory'
|
||||
? 'delete directory'
|
||||
: 'delete file(s)';
|
||||
console.error(`Error ${actionText}:`, error);
|
||||
setError(`Failed to ${errorText}`);
|
||||
} finally {
|
||||
currentOpRef.current = null;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
const processQueue = async () => {
|
||||
if (processingRef.current) return;
|
||||
processingRef.current = true;
|
||||
try {
|
||||
while (opQueueRef.current.length > 0) {
|
||||
const nextOp = opQueueRef.current.shift()!;
|
||||
setQueueLength(opQueueRef.current.length);
|
||||
await manageSource(
|
||||
nextOp.operation,
|
||||
nextOp.files,
|
||||
nextOp.filePath,
|
||||
nextOp.directoryPath,
|
||||
nextOp.parentDirPath,
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
processingRef.current = false;
|
||||
}
|
||||
};
|
||||
|
||||
const enqueueOperation = (op: QueuedOperation) => {
|
||||
opQueueRef.current.push(op);
|
||||
setQueueLength(opQueueRef.current.length);
|
||||
if (!processingRef.current) {
|
||||
void processQueue();
|
||||
}
|
||||
};
|
||||
|
||||
const handleAddFile = () => {
|
||||
const fileInput = document.createElement('input');
|
||||
fileInput.type = 'file';
|
||||
fileInput.multiple = true;
|
||||
fileInput.accept =
|
||||
'.rst,.md,.pdf,.txt,.docx,.csv,.epub,.html,.mdx,.json,.xlsx,.pptx,.png,.jpg,.jpeg';
|
||||
|
||||
fileInput.onchange = async (event) => {
|
||||
const fileList = (event.target as HTMLInputElement).files;
|
||||
if (!fileList || fileList.length === 0) return;
|
||||
const files = Array.from(fileList);
|
||||
enqueueOperation({
|
||||
operation: 'add',
|
||||
files,
|
||||
parentDirPath: currentPath.join('/'),
|
||||
});
|
||||
};
|
||||
|
||||
fileInput.click();
|
||||
};
|
||||
|
||||
const handleDeleteFile = async (name: string, isFile: boolean) => {
|
||||
// Construct the full path to the file or directory
|
||||
const itemPath = [...currentPath, name].join('/');
|
||||
|
||||
if (isFile) {
|
||||
enqueueOperation({ operation: 'remove', filePath: itemPath });
|
||||
} else {
|
||||
enqueueOperation({
|
||||
operation: 'remove_directory',
|
||||
directoryPath: itemPath,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const renderPathNavigation = () => {
|
||||
return (
|
||||
<div className="mb-0 min-h-[38px] flex flex-col gap-2 text-base sm:flex-row sm:items-center sm:justify-between">
|
||||
{/* Left side with path navigation */}
|
||||
<div className="flex w-full items-center sm:w-auto">
|
||||
<button
|
||||
className="mr-3 flex h-[29px] w-[29px] items-center justify-center rounded-full border p-2 text-sm text-gray-400 dark:border-0 dark:bg-[#28292D] dark:text-gray-500 dark:hover:bg-[#2E2F34] font-medium"
|
||||
onClick={handleBackNavigation}
|
||||
>
|
||||
<img src={ArrowLeft} alt="left-arrow" className="h-3 w-3" />
|
||||
</button>
|
||||
|
||||
<div className="flex flex-wrap items-center">
|
||||
<span className="text-[#7D54D1] font-semibold break-words">
|
||||
{sourceName}
|
||||
</span>
|
||||
{currentPath.length > 0 && (
|
||||
<>
|
||||
<span className="mx-1 flex-shrink-0 text-gray-500">/</span>
|
||||
{currentPath.map((dir, index) => (
|
||||
<React.Fragment key={index}>
|
||||
<span className="break-words text-gray-700 dark:text-gray-300">
|
||||
{dir}
|
||||
</span>
|
||||
{index < currentPath.length - 1 && (
|
||||
<span className="mx-1 flex-shrink-0 text-gray-500">
|
||||
/
|
||||
</span>
|
||||
)}
|
||||
</React.Fragment>
|
||||
))}
|
||||
</>
|
||||
)}
|
||||
{selectedFile && (
|
||||
<>
|
||||
<span className="mx-1 flex-shrink-0 text-gray-500">/</span>
|
||||
<span className="break-words text-gray-700 dark:text-gray-300">
|
||||
{selectedFile.name}
|
||||
</span>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex relative flex-row flex-nowrap items-center gap-2 w-full sm:w-auto justify-end mt-2 sm:mt-0">
|
||||
|
||||
{processingRef.current && (
|
||||
<div className="text-sm text-gray-500">
|
||||
{currentOpRef.current === 'add'
|
||||
? t('settings.sources.uploadingFilesTitle')
|
||||
: t('settings.sources.deletingTitle')}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{renderFileSearch()}
|
||||
|
||||
{/* Add file button */}
|
||||
{!processingRef.current && (
|
||||
<button
|
||||
onClick={handleAddFile}
|
||||
className="bg-purple-30 hover:bg-violets-are-blue flex h-[38px] min-w-[108px] items-center justify-center rounded-full px-4 text-[14px] whitespace-nowrap text-white font-medium"
|
||||
title={t('settings.sources.addFile')}
|
||||
>
|
||||
{t('settings.sources.addFile')}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
const calculateDirectoryStats = (
|
||||
structure: DirectoryStructure,
|
||||
): { totalSize: number; totalTokens: number } => {
|
||||
let totalSize = 0;
|
||||
let totalTokens = 0;
|
||||
|
||||
Object.entries(structure).forEach(([_, node]) => {
|
||||
if (node.type) {
|
||||
// It's a file
|
||||
totalSize += node.size_bytes || 0;
|
||||
totalTokens += node.token_count || 0;
|
||||
} else {
|
||||
// It's a directory, recurse
|
||||
const stats = calculateDirectoryStats(node);
|
||||
totalSize += stats.totalSize;
|
||||
totalTokens += stats.totalTokens;
|
||||
}
|
||||
});
|
||||
|
||||
return { totalSize, totalTokens };
|
||||
};
|
||||
|
||||
const renderFileTree = (structure: DirectoryStructure): React.ReactNode[] => {
|
||||
// Separate directories and files
|
||||
const entries = Object.entries(structure);
|
||||
const directories = entries.filter(([_, node]) => !node.type);
|
||||
const files = entries.filter(([_, node]) => node.type);
|
||||
|
||||
// Create parent directory row
|
||||
const parentRow =
|
||||
currentPath.length > 0
|
||||
? [
|
||||
<tr
|
||||
key="parent-dir"
|
||||
className="cursor-pointer border-b border-[#D1D9E0] hover:bg-[#ECEEEF] dark:border-[#6A6A6A] dark:hover:bg-[#27282D]"
|
||||
onClick={navigateUp}
|
||||
>
|
||||
<td className="px-2 py-2 lg:px-4">
|
||||
<div className="flex items-center">
|
||||
<img
|
||||
src={FolderIcon}
|
||||
alt={t('settings.sources.parentFolderAlt')}
|
||||
className="mr-2 h-4 w-4 flex-shrink-0"
|
||||
/>
|
||||
<span className="truncate text-sm dark:text-[#E0E0E0]">
|
||||
..
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
-
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
-
|
||||
</td>
|
||||
<td className="w-10 px-2 py-2 text-sm lg:px-4"></td>
|
||||
</tr>,
|
||||
]
|
||||
: [];
|
||||
|
||||
// Render directories first, then files
|
||||
return [
|
||||
...parentRow,
|
||||
...directories.map(([name, node]) => {
|
||||
const itemId = `dir-${name}`;
|
||||
const menuRef = getMenuRef(itemId);
|
||||
const dirStats = calculateDirectoryStats(node as DirectoryStructure);
|
||||
|
||||
return (
|
||||
<tr
|
||||
key={itemId}
|
||||
className="cursor-pointer border-b border-[#D1D9E0] hover:bg-[#ECEEEF] dark:border-[#6A6A6A] dark:hover:bg-[#27282D]"
|
||||
onClick={() => navigateToDirectory(name)}
|
||||
>
|
||||
<td className="px-2 py-2 lg:px-4">
|
||||
<div className="flex min-w-0 items-center">
|
||||
<img
|
||||
src={FolderIcon}
|
||||
alt={t('settings.sources.folderAlt')}
|
||||
className="mr-2 h-4 w-4 flex-shrink-0"
|
||||
/>
|
||||
<span className="truncate text-sm dark:text-[#E0E0E0]">
|
||||
{name}
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
{dirStats.totalTokens > 0
|
||||
? dirStats.totalTokens.toLocaleString()
|
||||
: '-'}
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
{dirStats.totalSize > 0 ? formatBytes(dirStats.totalSize) : '-'}
|
||||
</td>
|
||||
<td className="w-10 px-2 py-2 text-sm lg:px-4">
|
||||
<div ref={menuRef} className="relative">
|
||||
<button
|
||||
onClick={(e) => handleMenuClick(e, itemId)}
|
||||
className="inline-flex h-[35px] w-[24px] shrink-0 items-center justify-center rounded-md transition-colors hover:bg-[#EBEBEB] dark:hover:bg-[#26272E] font-medium"
|
||||
aria-label={t('settings.sources.menuAlt')}
|
||||
>
|
||||
<img
|
||||
src={ThreeDots}
|
||||
alt={t('settings.sources.menuAlt')}
|
||||
className="opacity-60 hover:opacity-100"
|
||||
/>
|
||||
</button>
|
||||
<ContextMenu
|
||||
isOpen={activeMenuId === itemId}
|
||||
setIsOpen={(isOpen) =>
|
||||
setActiveMenuId(isOpen ? itemId : null)
|
||||
}
|
||||
options={getActionOptions(name, false, itemId)}
|
||||
anchorRef={menuRef}
|
||||
position="bottom-left"
|
||||
offset={{ x: -4, y: 4 }}
|
||||
/>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
);
|
||||
}),
|
||||
...files.map(([name, node]) => {
|
||||
const itemId = `file-${name}`;
|
||||
const menuRef = getMenuRef(itemId);
|
||||
|
||||
return (
|
||||
<tr
|
||||
key={itemId}
|
||||
className="cursor-pointer border-b border-[#D1D9E0] hover:bg-[#ECEEEF] dark:border-[#6A6A6A] dark:hover:bg-[#27282D]"
|
||||
onClick={() => handleFileClick(name)}
|
||||
>
|
||||
<td className="px-2 py-2 lg:px-4">
|
||||
<div className="flex min-w-0 items-center">
|
||||
<img
|
||||
src={FileIcon}
|
||||
alt={t('settings.sources.fileAlt')}
|
||||
className="mr-2 h-4 w-4 flex-shrink-0"
|
||||
/>
|
||||
<span className="truncate text-sm dark:text-[#E0E0E0]">
|
||||
{name}
|
||||
</span>
|
||||
</div>
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm lg:px-4 dark:text-[#E0E0E0]">
|
||||
{node.token_count?.toLocaleString() || '-'}
|
||||
</td>
|
||||
<td className="px-2 py-2 text-sm md:px-4 dark:text-[#E0E0E0]">
|
||||
{node.size_bytes ? formatBytes(node.size_bytes) : '-'}
|
||||
</td>
|
||||
<td className="w-10 px-2 py-2 text-sm lg:px-4">
|
||||
<div ref={menuRef} className="relative">
|
||||
<button
|
||||
onClick={(e) => handleMenuClick(e, itemId)}
|
||||
className="inline-flex h-[35px] w-[24px] shrink-0 items-center justify-center rounded-md transition-colors hover:bg-[#EBEBEB] dark:hover:bg-[#26272E] font-medium"
|
||||
aria-label={t('settings.sources.menuAlt')}
|
||||
>
|
||||
<img
|
||||
src={ThreeDots}
|
||||
alt={t('settings.sources.menuAlt')}
|
||||
className="opacity-60 hover:opacity-100"
|
||||
/>
|
||||
</button>
|
||||
<ContextMenu
|
||||
isOpen={activeMenuId === itemId}
|
||||
setIsOpen={(isOpen) =>
|
||||
setActiveMenuId(isOpen ? itemId : null)
|
||||
}
|
||||
options={getActionOptions(name, true, itemId)}
|
||||
anchorRef={menuRef}
|
||||
position="bottom-left"
|
||||
offset={{ x: -4, y: 4 }}
|
||||
/>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
);
|
||||
}),
|
||||
];
|
||||
};
|
||||
const currentDirectory = getCurrentDirectory();
|
||||
|
||||
const searchFiles = (
|
||||
query: string,
|
||||
structure: DirectoryStructure,
|
||||
currentPath: string[] = [],
|
||||
): SearchResult[] => {
|
||||
let results: SearchResult[] = [];
|
||||
|
||||
Object.entries(structure).forEach(([name, node]) => {
|
||||
const fullPath = [...currentPath, name].join('/');
|
||||
|
||||
if (name.toLowerCase().includes(query.toLowerCase())) {
|
||||
results.push({
|
||||
name,
|
||||
path: fullPath,
|
||||
isFile: !!node.type,
|
||||
});
|
||||
}
|
||||
|
||||
if (!node.type) {
|
||||
// If it's a directory, search recursively
|
||||
results = [
|
||||
...results,
|
||||
...searchFiles(query, node as DirectoryStructure, [
|
||||
...currentPath,
|
||||
name,
|
||||
]),
|
||||
];
|
||||
}
|
||||
});
|
||||
|
||||
return results;
|
||||
};
|
||||
|
||||
const handleSearchSelect = (result: SearchResult) => {
|
||||
if (result.isFile) {
|
||||
const pathParts = result.path.split('/');
|
||||
const fileName = pathParts.pop() || '';
|
||||
setCurrentPath(pathParts);
|
||||
|
||||
setSelectedFile({
|
||||
id: result.path,
|
||||
name: fileName,
|
||||
});
|
||||
} else {
|
||||
setCurrentPath(result.path.split('/'));
|
||||
setSelectedFile(null);
|
||||
}
|
||||
setSearchQuery('');
|
||||
setSearchResults([]);
|
||||
};
|
||||
|
||||
const renderFileSearch = () => {
|
||||
return (
|
||||
<div className="relative w-52" ref={searchDropdownRef}>
|
||||
<input
|
||||
type="text"
|
||||
value={searchQuery}
|
||||
onChange={(e) => {
|
||||
setSearchQuery(e.target.value);
|
||||
if (directoryStructure) {
|
||||
setSearchResults(searchFiles(e.target.value, directoryStructure));
|
||||
}
|
||||
}}
|
||||
placeholder={t('settings.sources.searchFiles')}
|
||||
className={`w-full h-[38px] border border-[#D1D9E0] px-4 py-2 dark:border-[#6A6A6A]
|
||||
${searchQuery ? 'rounded-t-[24px]' : 'rounded-[24px]'}
|
||||
bg-transparent focus:outline-none dark:text-[#E0E0E0]`}
|
||||
/>
|
||||
|
||||
{searchQuery && (
|
||||
<div className="absolute top-full left-0 right-0 z-10 max-h-[calc(100vh-200px)] w-full overflow-hidden rounded-b-[12px] border border-t-0 border-[#D1D9E0] bg-white shadow-lg dark:border-[#6A6A6A] dark:bg-[#1F2023] transition-all duration-200">
|
||||
<div className="max-h-[calc(100vh-200px)] overflow-y-auto overflow-x-hidden overscroll-contain">
|
||||
{searchResults.length === 0 ? (
|
||||
<div className="py-2 text-center text-sm text-gray-500 dark:text-gray-400">
|
||||
{t('settings.sources.noResults')}
|
||||
</div>
|
||||
) : (
|
||||
searchResults.map((result, index) => (
|
||||
<div
|
||||
key={index}
|
||||
onClick={() => handleSearchSelect(result)}
|
||||
title={result.path}
|
||||
className={`flex min-w-0 cursor-pointer items-center px-3 py-2 hover:bg-[#ECEEEF] dark:hover:bg-[#27282D] ${index !== searchResults.length - 1
|
||||
? 'border-b border-[#D1D9E0] dark:border-[#6A6A6A]'
|
||||
: ''
|
||||
}`}
|
||||
>
|
||||
<img
|
||||
src={result.isFile ? FileIcon : FolderIcon}
|
||||
alt={
|
||||
result.isFile
|
||||
? t('settings.sources.fileAlt')
|
||||
: t('settings.sources.folderAlt')
|
||||
}
|
||||
className="mr-2 h-4 w-4 flex-shrink-0"
|
||||
/>
|
||||
<span className="text-sm dark:text-[#E0E0E0] truncate flex-1">
|
||||
{result.path.split('/').pop() || result.path}
|
||||
</span>
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const handleFileSearch = (searchQuery: string) => {
|
||||
if (directoryStructure) {
|
||||
return searchFiles(searchQuery, directoryStructure);
|
||||
}
|
||||
return [];
|
||||
};
|
||||
|
||||
const handleFileSelect = (path: string) => {
|
||||
const pathParts = path.split('/');
|
||||
const fileName = pathParts.pop() || '';
|
||||
setCurrentPath(pathParts);
|
||||
setSelectedFile({
|
||||
id: path,
|
||||
name: fileName,
|
||||
});
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
{selectedFile ? (
|
||||
<div className="flex">
|
||||
<div className="flex-1">
|
||||
<Chunks
|
||||
documentId={docId}
|
||||
documentName={sourceName}
|
||||
handleGoBack={() => setSelectedFile(null)}
|
||||
path={selectedFile.id}
|
||||
onFileSearch={handleFileSearch}
|
||||
onFileSelect={handleFileSelect}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex w-full max-w-full flex-col overflow-hidden">
|
||||
<div className="mb-2">{renderPathNavigation()}</div>
|
||||
|
||||
<div className="w-full">
|
||||
<div className="overflow-x-auto rounded-[6px] border border-[#D1D9E0] dark:border-[#6A6A6A]">
|
||||
<table className="w-full min-w-[600px] table-auto bg-transparent">
|
||||
<thead className="bg-gray-100 dark:bg-[#27282D]">
|
||||
<tr className="border-b border-[#D1D9E0] dark:border-[#6A6A6A]">
|
||||
<th className="min-w-[200px] px-2 py-3 text-left text-sm font-medium text-gray-700 lg:px-4 dark:text-[#59636E]">
|
||||
{t('settings.sources.fileName')}
|
||||
</th>
|
||||
<th className="min-w-[80px] px-2 py-3 text-left text-sm font-medium text-gray-700 lg:px-4 dark:text-[#59636E]">
|
||||
{t('settings.sources.tokens')}
|
||||
</th>
|
||||
<th className="min-w-[80px] px-2 py-3 text-left text-sm font-medium text-gray-700 lg:px-4 dark:text-[#59636E]">
|
||||
{t('settings.sources.size')}
|
||||
</th>
|
||||
<th className="w-[60px] px-2 py-3 text-left text-sm font-medium text-gray-700 lg:px-4 dark:text-[#59636E]">
|
||||
<span className="sr-only">
|
||||
{t('settings.sources.actions')}
|
||||
</span>
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody className="[&>tr:last-child]:border-b-0">
|
||||
{renderFileTree(currentDirectory)}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<ConfirmationModal
|
||||
message={
|
||||
itemToDelete?.isFile
|
||||
? t('settings.sources.confirmDelete')
|
||||
: t('settings.sources.deleteDirectoryWarning', { name: itemToDelete?.name })
|
||||
}
|
||||
modalState={deleteModalState}
|
||||
setModalState={setDeleteModalState}
|
||||
handleSubmit={handleConfirmedDelete}
|
||||
handleCancel={handleCancelDelete}
|
||||
submitLabel={t('convTile.delete')}
|
||||
variant="danger"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default FileTreeComponent;
|
||||
@@ -1,6 +1,6 @@
|
||||
import React from 'react';
|
||||
|
||||
interface DocumentHeadProps {
|
||||
interface HeadProps {
|
||||
title?: string;
|
||||
description?: string;
|
||||
keywords?: string;
|
||||
@@ -13,7 +13,7 @@ interface DocumentHeadProps {
|
||||
children?: React.ReactNode;
|
||||
}
|
||||
|
||||
export function DocumentHead({
|
||||
export function Head({
|
||||
title,
|
||||
description,
|
||||
keywords,
|
||||
@@ -24,7 +24,7 @@ export function DocumentHead({
|
||||
twitterTitle,
|
||||
twitterDescription,
|
||||
children,
|
||||
}: DocumentHeadProps) {
|
||||
}: HeadProps) {
|
||||
return (
|
||||
<>
|
||||
{title && <title>{title}</title>}
|
||||
@@ -16,7 +16,9 @@ const MermaidRenderer: React.FC<MermaidRendererProps> = ({
|
||||
isLoading,
|
||||
}) => {
|
||||
const [isDarkTheme] = useDarkTheme();
|
||||
const diagramId = useRef(`mermaid-${crypto.randomUUID()}`);
|
||||
const diagramId = useRef(
|
||||
`mermaid-${Date.now()}-${Math.random().toString(36).substring(2)}`,
|
||||
);
|
||||
const status = useSelector(selectStatus);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [showCode, setShowCode] = useState<boolean>(false);
|
||||
|
||||
@@ -10,7 +10,7 @@ const useTabs = () => {
|
||||
const { t } = useTranslation();
|
||||
const tabs = [
|
||||
t('settings.general.label'),
|
||||
t('settings.documents.label'),
|
||||
t('settings.sources.label'),
|
||||
t('settings.analytics.label'),
|
||||
t('settings.logs.label'),
|
||||
t('settings.tools.label'),
|
||||
|
||||
@@ -8,7 +8,9 @@ interface SkeletonLoaderProps {
|
||||
| 'logs'
|
||||
| 'table'
|
||||
| 'chatbot'
|
||||
| 'dropdown';
|
||||
| 'dropdown'
|
||||
| 'chunkCards'
|
||||
| 'sourceCards';
|
||||
}
|
||||
|
||||
const SkeletonLoader: React.FC<SkeletonLoaderProps> = ({
|
||||
@@ -182,6 +184,62 @@ const SkeletonLoader: React.FC<SkeletonLoaderProps> = ({
|
||||
</>
|
||||
);
|
||||
|
||||
const renderChunkCards = () => (
|
||||
<>
|
||||
{Array.from({ length: count }).map((_, index) => (
|
||||
<div
|
||||
key={`chunk-skel-${index}`}
|
||||
className="relative flex h-[197px] flex-col rounded-[5.86px] border border-[#D1D9E0] dark:border-[#6A6A6A] overflow-hidden w-full max-w-[487px] animate-pulse"
|
||||
>
|
||||
<div className="w-full">
|
||||
<div className="flex w-full items-center justify-between border-b border-[#D1D9E0] bg-[#F6F8FA] dark:bg-[#27282D] dark:border-[#6A6A6A] px-4 py-3">
|
||||
<div className="h-4 bg-gray-300 dark:bg-gray-600 rounded w-20"></div>
|
||||
</div>
|
||||
<div className="px-4 pt-4 pb-6 space-y-3">
|
||||
<div className="h-3 bg-gray-200 dark:bg-gray-700 rounded w-full"></div>
|
||||
<div className="h-3 bg-gray-200 dark:bg-gray-700 rounded w-11/12"></div>
|
||||
<div className="h-3 bg-gray-200 dark:bg-gray-700 rounded w-5/6"></div>
|
||||
<div className="h-3 bg-gray-200 dark:bg-gray-700 rounded w-4/5"></div>
|
||||
<div className="h-3 bg-gray-200 dark:bg-gray-700 rounded w-3/4"></div>
|
||||
<div className="h-3 bg-gray-200 dark:bg-gray-700 rounded w-2/3"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</>
|
||||
);
|
||||
|
||||
const renderSourceCards = () => (
|
||||
<>
|
||||
{Array.from({ length: count }).map((_, idx) => (
|
||||
<div
|
||||
key={`source-skel-${idx}`}
|
||||
className="flex h-[130px] w-full flex-col rounded-2xl bg-[#F9F9F9] dark:bg-[#383838] p-3 animate-pulse"
|
||||
>
|
||||
<div className="w-full flex-1">
|
||||
<div className="flex w-full items-center justify-between gap-2">
|
||||
<div className="flex-1">
|
||||
<div className="h-[13px] w-full rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
</div>
|
||||
<div className="w-6 h-6 rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col items-start justify-start gap-1 pt-3">
|
||||
<div className="flex items-center gap-2 mb-1">
|
||||
<div className="w-3 h-3 rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
<div className="h-[12px] w-20 rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<div className="w-3 h-3 rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
<div className="h-[12px] w-16 rounded bg-gray-200 dark:bg-gray-700"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</>
|
||||
);
|
||||
|
||||
const componentMap = {
|
||||
table: renderTable,
|
||||
chatbot: renderChatbot,
|
||||
@@ -189,8 +247,11 @@ const SkeletonLoader: React.FC<SkeletonLoaderProps> = ({
|
||||
logs: renderLogs,
|
||||
default: renderDefault,
|
||||
analysis: renderAnalysis,
|
||||
chunkCards: renderChunkCards,
|
||||
sourceCards: renderSourceCards,
|
||||
};
|
||||
|
||||
|
||||
const render = componentMap[component] || componentMap.default;
|
||||
|
||||
return <>{render()}</>;
|
||||
|
||||
@@ -158,7 +158,7 @@ function SourceDropdown({
|
||||
</div>
|
||||
)}
|
||||
<ConfirmationModal
|
||||
message={t('settings.documents.deleteWarning', {
|
||||
message={t('settings.sources.deleteWarning', {
|
||||
name: documentToDelete?.name,
|
||||
})}
|
||||
modalState={deleteModalState}
|
||||
|
||||
@@ -135,7 +135,7 @@ export default function SourcesPopup({
|
||||
type="text"
|
||||
value={searchTerm}
|
||||
onChange={(e) => setSearchTerm(e.target.value)}
|
||||
placeholder={t('settings.documents.searchPlaceholder')}
|
||||
placeholder={t('settings.sources.searchPlaceholder')}
|
||||
borderVariant="thin"
|
||||
className="mb-4"
|
||||
labelBgClassName="bg-lotion dark:bg-charleston-green-2"
|
||||
@@ -203,11 +203,11 @@ export default function SourcesPopup({
|
||||
|
||||
<div className="shrink-0 px-4 py-4 opacity-75 transition-opacity duration-200 hover:opacity-100 md:px-6">
|
||||
<a
|
||||
href="/settings/documents"
|
||||
href="/settings/sources"
|
||||
className="text-violets-are-blue inline-flex items-center gap-2 text-base font-medium"
|
||||
onClick={onClose}
|
||||
>
|
||||
{t('settings.documents.goToDocuments')}
|
||||
{t('settings.sources.goToSources')}
|
||||
<img src={RedirectIcon} alt="Redirect" className="h-3 w-3" />
|
||||
</a>
|
||||
</div>
|
||||
@@ -217,7 +217,7 @@ export default function SourcesPopup({
|
||||
onClick={handleUploadClick}
|
||||
className="border-violets-are-blue text-violets-are-blue hover:bg-violets-are-blue w-auto rounded-full border px-4 py-2 text-[14px] font-medium transition-colors duration-200 hover:text-white"
|
||||
>
|
||||
{t('settings.documents.uploadNew')}
|
||||
{t('settings.sources.uploadNew')}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
45
frontend/src/components/types/Dropdown.types.ts
Normal file
45
frontend/src/components/types/Dropdown.types.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
export type DropdownOptionBase = {
|
||||
id?: string;
|
||||
type?: string;
|
||||
};
|
||||
|
||||
export type StringOption = string;
|
||||
export type NameIdOption = { name: string; id: string } & DropdownOptionBase;
|
||||
export type LabelValueOption = {
|
||||
label: string;
|
||||
value: string;
|
||||
} & DropdownOptionBase;
|
||||
export type ValueDescriptionOption = {
|
||||
value: number;
|
||||
description: string;
|
||||
} & DropdownOptionBase;
|
||||
|
||||
export type DropdownOption =
|
||||
| StringOption
|
||||
| NameIdOption
|
||||
| LabelValueOption
|
||||
| ValueDescriptionOption;
|
||||
|
||||
export type DropdownSelectedValue = DropdownOption | null;
|
||||
|
||||
export type OnSelectHandler<T extends DropdownOption = DropdownOption> = (
|
||||
value: T,
|
||||
) => void;
|
||||
|
||||
export interface DropdownProps<T extends DropdownOption = DropdownOption> {
|
||||
options: T[];
|
||||
selectedValue: DropdownSelectedValue;
|
||||
onSelect: OnSelectHandler<T>;
|
||||
size?: string;
|
||||
rounded?: 'xl' | '3xl';
|
||||
buttonClassName?: string;
|
||||
optionsClassName?: string;
|
||||
border?: 'border' | 'border-2';
|
||||
showEdit?: boolean;
|
||||
onEdit?: (value: NameIdOption) => void;
|
||||
showDelete?: boolean | ((option: T) => boolean);
|
||||
onDelete?: (id: string) => void;
|
||||
placeholder?: string;
|
||||
placeholderClassName?: string;
|
||||
contentSize?: string;
|
||||
}
|
||||
@@ -92,8 +92,7 @@ const ConversationBubble = forwardRef<
|
||||
const [editInputBox, setEditInputBox] = useState<string>('');
|
||||
const messageRef = useRef<HTMLDivElement>(null);
|
||||
const [shouldShowToggle, setShouldShowToggle] = useState(false);
|
||||
const [isLikeClicked, setIsLikeClicked] = useState(false);
|
||||
const [isDislikeClicked, setIsDislikeClicked] = useState(false);
|
||||
|
||||
const [activeTooltip, setActiveTooltip] = useState<number | null>(null);
|
||||
const [isSidebarOpen, setIsSidebarOpen] = useState<boolean>(false);
|
||||
const editableQueryRef = useRef<HTMLDivElement>(null);
|
||||
@@ -550,104 +549,71 @@ const ConversationBubble = forwardRef<
|
||||
)}
|
||||
{message && (
|
||||
<div className="my-2 ml-2 flex justify-start">
|
||||
<div
|
||||
className={`relative mr-2 block items-center justify-center lg:invisible ${type !== 'ERROR' ? 'lg:group-hover:visible' : 'hidden'}`}
|
||||
>
|
||||
<div>
|
||||
<CopyButton textToCopy={message} />
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
className={`relative mr-2 block items-center justify-center lg:invisible ${type !== 'ERROR' ? 'lg:group-hover:visible' : 'hidden'}`}
|
||||
>
|
||||
<div>
|
||||
<SpeakButton text={message} />
|
||||
</div>
|
||||
</div>
|
||||
{type === 'ERROR' && (
|
||||
{type === 'ERROR' ? (
|
||||
<div className="relative mr-2 block items-center justify-center">
|
||||
<div>{retryBtn}</div>
|
||||
</div>
|
||||
)}
|
||||
{handleFeedback && (
|
||||
) : (
|
||||
<>
|
||||
<div
|
||||
className={`relative mr-2 flex items-center justify-center ${
|
||||
feedback === 'LIKE' || isLikeClicked
|
||||
? 'visible'
|
||||
: 'lg:invisible'
|
||||
} ${type !== 'ERROR' ? 'lg:group-hover:visible' : ''} ${feedback === 'DISLIKE' && type !== 'ERROR' ? 'hidden' : ''}`}
|
||||
>
|
||||
<div>
|
||||
<div
|
||||
className={`flex items-center justify-center rounded-full p-2 ${
|
||||
isLikeHovered
|
||||
? 'dark:bg-purple-taupe bg-[#EEEEEE]'
|
||||
: 'bg-white-3000 dark:bg-transparent'
|
||||
}`}
|
||||
>
|
||||
<Like
|
||||
className={`cursor-pointer ${
|
||||
isLikeClicked || feedback === 'LIKE'
|
||||
? 'fill-white-3000 stroke-purple-30 dark:fill-transparent'
|
||||
: 'stroke-gray-4000 fill-none'
|
||||
}`}
|
||||
onClick={() => {
|
||||
if (feedback === undefined || feedback === null) {
|
||||
handleFeedback?.('LIKE');
|
||||
setIsLikeClicked(true);
|
||||
setIsDislikeClicked(false);
|
||||
} else if (feedback === 'LIKE') {
|
||||
handleFeedback?.(null);
|
||||
setIsLikeClicked(false);
|
||||
setIsDislikeClicked(false);
|
||||
}
|
||||
}}
|
||||
onMouseEnter={() => setIsLikeHovered(true)}
|
||||
onMouseLeave={() => setIsLikeHovered(false)}
|
||||
></Like>
|
||||
</div>
|
||||
</div>
|
||||
<div className="relative mr-2 block items-center justify-center">
|
||||
<CopyButton textToCopy={message} />
|
||||
</div>
|
||||
<div className="relative mr-2 block items-center justify-center">
|
||||
<SpeakButton text={message} />
|
||||
</div>
|
||||
{handleFeedback && (
|
||||
<>
|
||||
<div className="relative mr-2 flex items-center justify-center">
|
||||
<div>
|
||||
<div
|
||||
className={`flex items-center justify-center rounded-full p-2 ${
|
||||
isLikeHovered
|
||||
? 'dark:bg-purple-taupe bg-[#EEEEEE]'
|
||||
: 'bg-white-3000 dark:bg-transparent'
|
||||
}`}
|
||||
>
|
||||
<Like
|
||||
className={`${feedback === 'LIKE' ? 'fill-white-3000 stroke-purple-30 dark:fill-transparent' : 'stroke-gray-4000 fill-none'} cursor-pointer`}
|
||||
onClick={() => {
|
||||
if (feedback === 'LIKE') {
|
||||
handleFeedback?.(null);
|
||||
} else {
|
||||
handleFeedback?.('LIKE');
|
||||
}
|
||||
}}
|
||||
onMouseEnter={() => setIsLikeHovered(true)}
|
||||
onMouseLeave={() => setIsLikeHovered(false)}
|
||||
></Like>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div
|
||||
className={`relative mr-2 flex items-center justify-center ${
|
||||
feedback === 'DISLIKE' || isLikeClicked
|
||||
? 'visible'
|
||||
: 'lg:invisible'
|
||||
} ${type !== 'ERROR' ? 'lg:group-hover:visible' : ''} ${feedback === 'LIKE' && type !== 'ERROR' ? 'hidden' : ''}`}
|
||||
>
|
||||
<div>
|
||||
<div
|
||||
className={`flex items-center justify-center rounded-full p-2 ${
|
||||
isDislikeHovered
|
||||
? 'dark:bg-purple-taupe bg-[#EEEEEE]'
|
||||
: 'bg-white-3000 dark:bg-transparent'
|
||||
}`}
|
||||
>
|
||||
<Dislike
|
||||
className={`cursor-pointer ${
|
||||
isDislikeClicked || feedback === 'DISLIKE'
|
||||
? 'fill-white-3000 stroke-red-2000 dark:fill-transparent'
|
||||
: 'stroke-gray-4000 fill-none'
|
||||
}`}
|
||||
onClick={() => {
|
||||
if (feedback === undefined || feedback === null) {
|
||||
handleFeedback?.('DISLIKE');
|
||||
setIsDislikeClicked(true);
|
||||
setIsLikeClicked(false);
|
||||
} else if (feedback === 'DISLIKE') {
|
||||
handleFeedback?.(null);
|
||||
setIsLikeClicked(false);
|
||||
setIsDislikeClicked(false);
|
||||
}
|
||||
}}
|
||||
onMouseEnter={() => setIsDislikeHovered(true)}
|
||||
onMouseLeave={() => setIsDislikeHovered(false)}
|
||||
></Dislike>
|
||||
<div className="relative mr-2 flex items-center justify-center">
|
||||
<div>
|
||||
<div
|
||||
className={`flex items-center justify-center rounded-full p-2 ${
|
||||
isDislikeHovered
|
||||
? 'dark:bg-purple-taupe bg-[#EEEEEE]'
|
||||
: 'bg-white-3000 dark:bg-transparent'
|
||||
}`}
|
||||
>
|
||||
<Dislike
|
||||
className={`${feedback === 'DISLIKE' ? 'fill-white-3000 stroke-red-2000 dark:fill-transparent' : 'stroke-gray-4000 fill-none'} cursor-pointer`}
|
||||
onClick={() => {
|
||||
if (feedback === 'DISLIKE') {
|
||||
handleFeedback?.(null);
|
||||
} else {
|
||||
handleFeedback?.('DISLIKE');
|
||||
}
|
||||
}}
|
||||
onMouseEnter={() => setIsDislikeHovered(true)}
|
||||
onMouseLeave={() => setIsDislikeHovered(false)}
|
||||
></Dislike>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -23,7 +23,7 @@ import {
|
||||
updateQuery,
|
||||
} from './sharedConversationSlice';
|
||||
import { selectCompletedAttachments } from '../upload/uploadSlice';
|
||||
import { DocumentHead } from '../components/DocumentHead';
|
||||
import { Head as DocumentHead } from '../components/Head';
|
||||
|
||||
export const SharedConversation = () => {
|
||||
const navigate = useNavigate();
|
||||
|
||||
@@ -8,7 +8,6 @@ export function handleFetchAnswer(
|
||||
signal: AbortSignal,
|
||||
token: string | null,
|
||||
selectedDocs: Doc | null,
|
||||
history: Array<any> = [],
|
||||
conversationId: string | null,
|
||||
promptId: string | null,
|
||||
chunks: string,
|
||||
@@ -37,16 +36,8 @@ export function handleFetchAnswer(
|
||||
title: any;
|
||||
}
|
||||
> {
|
||||
history = history.map((item) => {
|
||||
return {
|
||||
prompt: item.prompt,
|
||||
response: item.response,
|
||||
tool_calls: item.tool_calls,
|
||||
};
|
||||
});
|
||||
const payload: RetrievalPayload = {
|
||||
question: question,
|
||||
history: JSON.stringify(history),
|
||||
conversation_id: conversationId,
|
||||
prompt_id: promptId,
|
||||
chunks: chunks,
|
||||
@@ -94,7 +85,6 @@ export function handleFetchAnswerSteaming(
|
||||
signal: AbortSignal,
|
||||
token: string | null,
|
||||
selectedDocs: Doc | null,
|
||||
history: Array<any> = [],
|
||||
conversationId: string | null,
|
||||
promptId: string | null,
|
||||
chunks: string,
|
||||
@@ -105,17 +95,8 @@ export function handleFetchAnswerSteaming(
|
||||
attachments?: string[],
|
||||
save_conversation = true,
|
||||
): Promise<Answer> {
|
||||
history = history.map((item) => {
|
||||
return {
|
||||
prompt: item.prompt,
|
||||
response: item.response,
|
||||
thought: item.thought,
|
||||
tool_calls: item.tool_calls,
|
||||
};
|
||||
});
|
||||
const payload: RetrievalPayload = {
|
||||
question: question,
|
||||
history: JSON.stringify(history),
|
||||
conversation_id: conversationId,
|
||||
prompt_id: promptId,
|
||||
chunks: chunks,
|
||||
@@ -192,20 +173,11 @@ export function handleSearch(
|
||||
token: string | null,
|
||||
selectedDocs: Doc | null,
|
||||
conversation_id: string | null,
|
||||
history: Array<any> = [],
|
||||
chunks: string,
|
||||
token_limit: number,
|
||||
) {
|
||||
history = history.map((item) => {
|
||||
return {
|
||||
prompt: item.prompt,
|
||||
response: item.response,
|
||||
tool_calls: item.tool_calls,
|
||||
};
|
||||
});
|
||||
const payload: RetrievalPayload = {
|
||||
question: question,
|
||||
history: JSON.stringify(history),
|
||||
conversation_id: conversation_id,
|
||||
chunks: chunks,
|
||||
token_limit: token_limit,
|
||||
|
||||
@@ -33,6 +33,8 @@ export interface Answer {
|
||||
thought: string;
|
||||
sources: { title: string; text: string; source: string }[];
|
||||
tool_calls: ToolCallsType[];
|
||||
structured?: boolean;
|
||||
schema?: object;
|
||||
}
|
||||
|
||||
export interface Query {
|
||||
@@ -46,13 +48,14 @@ export interface Query {
|
||||
tool_calls?: ToolCallsType[];
|
||||
error?: string;
|
||||
attachments?: { id: string; fileName: string }[];
|
||||
structured?: boolean;
|
||||
schema?: object;
|
||||
}
|
||||
|
||||
export interface RetrievalPayload {
|
||||
question: string;
|
||||
active_docs?: string;
|
||||
retriever?: string;
|
||||
history: string;
|
||||
conversation_id: string | null;
|
||||
prompt_id?: string | null;
|
||||
chunks: string;
|
||||
|
||||
@@ -57,7 +57,6 @@ export const fetchAnswer = createAsyncThunk<
|
||||
signal,
|
||||
state.preference.token,
|
||||
state.preference.selectedDocs!,
|
||||
state.conversation.queries,
|
||||
currentConversationId,
|
||||
state.preference.prompt.id,
|
||||
state.preference.chunks,
|
||||
@@ -131,6 +130,18 @@ export const fetchAnswer = createAsyncThunk<
|
||||
message: data.error,
|
||||
}),
|
||||
);
|
||||
} else if (data.type === 'structured_answer') {
|
||||
dispatch(
|
||||
updateStreamingQuery({
|
||||
conversationId: currentConversationId,
|
||||
index: targetIndex,
|
||||
query: {
|
||||
response: data.answer,
|
||||
structured: data.structured,
|
||||
schema: data.schema,
|
||||
},
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
dispatch(
|
||||
updateStreamingQuery({
|
||||
@@ -153,7 +164,6 @@ export const fetchAnswer = createAsyncThunk<
|
||||
signal,
|
||||
state.preference.token,
|
||||
state.preference.selectedDocs!,
|
||||
state.conversation.queries,
|
||||
state.conversation.conversationId,
|
||||
state.preference.prompt.id,
|
||||
state.preference.chunks,
|
||||
@@ -252,6 +262,14 @@ export const conversationSlice = createSlice({
|
||||
state.queries[index].response =
|
||||
(state.queries[index].response || '') + query.response;
|
||||
}
|
||||
|
||||
if (query.structured !== undefined) {
|
||||
state.queries[index].structured = query.structured;
|
||||
}
|
||||
|
||||
if (query.schema !== undefined) {
|
||||
state.queries[index].schema = query.schema;
|
||||
}
|
||||
},
|
||||
updateConversationId(
|
||||
state,
|
||||
|
||||
@@ -133,10 +133,10 @@ layer(base);
|
||||
}
|
||||
|
||||
@utility table-default {
|
||||
@apply block w-full table-auto justify-center overflow-auto rounded-xl border border-silver text-center dark:border-silver/40 dark:text-bright-gray;
|
||||
@apply border-silver dark:border-silver/40 dark:text-bright-gray block w-full table-auto justify-center overflow-auto rounded-xl border text-center;
|
||||
|
||||
& th {
|
||||
@apply text-nowrap p-4 font-normal text-gray-400;
|
||||
@apply p-4 font-normal text-nowrap text-gray-400;
|
||||
}
|
||||
|
||||
& th {
|
||||
@@ -148,7 +148,7 @@ layer(base);
|
||||
}
|
||||
|
||||
& td {
|
||||
@apply w-full border-t border-silver px-4 py-2 dark:border-silver/40;
|
||||
@apply border-silver dark:border-silver/40 w-full border-t px-4 py-2;
|
||||
}
|
||||
|
||||
& td:last-child {
|
||||
@@ -208,483 +208,482 @@ layer(base);
|
||||
}
|
||||
}
|
||||
|
||||
@layer base{
|
||||
@layer base {
|
||||
/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */
|
||||
|
||||
/* Document
|
||||
/* Document
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Correct the line height in all browsers.
|
||||
* 2. Prevent adjustments of font size after orientation changes in iOS.
|
||||
*/
|
||||
|
||||
html {
|
||||
line-height: 1.15; /* 1 */
|
||||
-webkit-text-size-adjust: 100%; /* 2 */
|
||||
min-height: 100vh;
|
||||
overflow-x: hidden;
|
||||
}
|
||||
html {
|
||||
line-height: 1.15; /* 1 */
|
||||
-webkit-text-size-adjust: 100%; /* 2 */
|
||||
min-height: 100vh;
|
||||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
/* Sections
|
||||
/* Sections
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
/**
|
||||
* Remove the margin in all browsers.
|
||||
*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
min-height: var(--viewport-height);
|
||||
overflow-x: hidden;
|
||||
font-family: 'Inter', sans-serif;
|
||||
}
|
||||
/*
|
||||
body {
|
||||
margin: 0;
|
||||
min-height: var(--viewport-height);
|
||||
overflow-x: hidden;
|
||||
font-family: 'Inter', sans-serif;
|
||||
}
|
||||
/*
|
||||
Avoid over-scrolling in mobile browsers
|
||||
*/
|
||||
@media only screen and (max-width: 500px) {
|
||||
body,
|
||||
html {
|
||||
min-height: var(--viewport-height);
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
@media only screen and (max-width: 500px) {
|
||||
body,
|
||||
html {
|
||||
min-height: var(--viewport-height);
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
/**
|
||||
* Render the `main` element consistently in IE.
|
||||
*/
|
||||
|
||||
main {
|
||||
display: block;
|
||||
}
|
||||
main {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Correct the font size and margin on `h1` elements within `section` and
|
||||
* `article` contexts in Chrome, Firefox, and Safari.
|
||||
*/
|
||||
|
||||
h1 {
|
||||
font-size: 2em;
|
||||
margin: 0.67em 0;
|
||||
}
|
||||
h1 {
|
||||
font-size: 2em;
|
||||
margin: 0.67em 0;
|
||||
}
|
||||
|
||||
/* Grouping content
|
||||
/* Grouping content
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Add the correct box sizing in Firefox.
|
||||
* 2. Show the overflow in Edge and IE.
|
||||
*/
|
||||
|
||||
hr {
|
||||
box-sizing: content-box; /* 1 */
|
||||
height: 0; /* 1 */
|
||||
overflow: visible; /* 2 */
|
||||
}
|
||||
hr {
|
||||
box-sizing: content-box; /* 1 */
|
||||
height: 0; /* 1 */
|
||||
overflow: visible; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Correct the inheritance and scaling of font size in all browsers.
|
||||
* 2. Correct the odd `em` font sizing in all browsers.
|
||||
*/
|
||||
|
||||
pre {
|
||||
font-family: monospace, monospace; /* 1 */
|
||||
font-size: 1em; /* 2 */
|
||||
}
|
||||
pre {
|
||||
font-family: monospace, monospace; /* 1 */
|
||||
font-size: 1em; /* 2 */
|
||||
}
|
||||
|
||||
/* Text-level semantics
|
||||
/* Text-level semantics
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
/**
|
||||
* Remove the gray background on active links in IE 10.
|
||||
*/
|
||||
|
||||
a {
|
||||
background-color: transparent;
|
||||
}
|
||||
a {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Remove the bottom border in Chrome 57-
|
||||
* 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.
|
||||
*/
|
||||
|
||||
abbr[title] {
|
||||
border-bottom: none; /* 1 */
|
||||
text-decoration: underline; /* 2 */
|
||||
text-decoration: underline dotted; /* 2 */
|
||||
}
|
||||
abbr[title] {
|
||||
border-bottom: none; /* 1 */
|
||||
text-decoration: underline; /* 2 */
|
||||
text-decoration: underline dotted; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Add the correct font weight in Chrome, Edge, and Safari.
|
||||
*/
|
||||
|
||||
b,
|
||||
strong {
|
||||
font-weight: bolder;
|
||||
}
|
||||
b,
|
||||
strong {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Correct the inheritance and scaling of font size in all browsers.
|
||||
* 2. Correct the odd `em` font sizing in all browsers.
|
||||
*/
|
||||
|
||||
code,
|
||||
kbd,
|
||||
samp {
|
||||
font-family: monospace, monospace; /* 1 */
|
||||
font-size: 1em; /* 2 */
|
||||
}
|
||||
code,
|
||||
kbd,
|
||||
samp {
|
||||
font-family: monospace, monospace; /* 1 */
|
||||
font-size: 1em; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Add the correct font size in all browsers.
|
||||
*/
|
||||
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Prevent `sub` and `sup` elements from affecting the line height in
|
||||
* all browsers.
|
||||
*/
|
||||
|
||||
sub,
|
||||
sup {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
sub,
|
||||
sup {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
|
||||
sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
|
||||
/* Embedded content
|
||||
/* Embedded content
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
/**
|
||||
* Remove the border on images inside links in IE 10.
|
||||
*/
|
||||
|
||||
img {
|
||||
border-style: none;
|
||||
}
|
||||
img {
|
||||
border-style: none;
|
||||
}
|
||||
|
||||
/* Forms
|
||||
/* Forms
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Change the font styles in all browsers.
|
||||
* 2. Remove the margin in Firefox and Safari.
|
||||
*/
|
||||
|
||||
button,
|
||||
input,
|
||||
optgroup,
|
||||
select,
|
||||
textarea {
|
||||
font-family: inherit; /* 1 */
|
||||
font-size: 100%; /* 1 */
|
||||
line-height: 1.15; /* 1 */
|
||||
margin: 0; /* 2 */
|
||||
}
|
||||
button,
|
||||
input,
|
||||
optgroup,
|
||||
select,
|
||||
textarea {
|
||||
font-family: inherit; /* 1 */
|
||||
font-size: 100%; /* 1 */
|
||||
line-height: 1.15; /* 1 */
|
||||
margin: 0; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Show the overflow in IE.
|
||||
* 1. Show the overflow in Edge.
|
||||
*/
|
||||
|
||||
button,
|
||||
input {
|
||||
/* 1 */
|
||||
overflow: visible;
|
||||
}
|
||||
button,
|
||||
input {
|
||||
/* 1 */
|
||||
overflow: visible;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Remove the inheritance of text transform in Edge, Firefox, and IE.
|
||||
* 1. Remove the inheritance of text transform in Firefox.
|
||||
*/
|
||||
|
||||
button,
|
||||
select {
|
||||
/* 1 */
|
||||
text-transform: none;
|
||||
}
|
||||
button,
|
||||
select {
|
||||
/* 1 */
|
||||
text-transform: none;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Correct the inability to style clickable types in iOS and Safari.
|
||||
*/
|
||||
|
||||
button,
|
||||
[type='button'],
|
||||
[type='reset'],
|
||||
[type='submit'] {
|
||||
-webkit-appearance: button;
|
||||
cursor: pointer;
|
||||
}
|
||||
button,
|
||||
[type='button'],
|
||||
[type='reset'],
|
||||
[type='submit'] {
|
||||
-webkit-appearance: button;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Remove the inner border and padding in Firefox.
|
||||
*/
|
||||
|
||||
button::-moz-focus-inner,
|
||||
[type='button']::-moz-focus-inner,
|
||||
[type='reset']::-moz-focus-inner,
|
||||
[type='submit']::-moz-focus-inner {
|
||||
border-style: none;
|
||||
padding: 0;
|
||||
}
|
||||
button::-moz-focus-inner,
|
||||
[type='button']::-moz-focus-inner,
|
||||
[type='reset']::-moz-focus-inner,
|
||||
[type='submit']::-moz-focus-inner {
|
||||
border-style: none;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Restore the focus styles unset by the previous rule.
|
||||
*/
|
||||
|
||||
button:-moz-focusring,
|
||||
[type='button']:-moz-focusring,
|
||||
[type='reset']:-moz-focusring,
|
||||
[type='submit']:-moz-focusring {
|
||||
outline: 1px dotted ButtonText;
|
||||
}
|
||||
button:-moz-focusring,
|
||||
[type='button']:-moz-focusring,
|
||||
[type='reset']:-moz-focusring,
|
||||
[type='submit']:-moz-focusring {
|
||||
outline: 1px dotted ButtonText;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Correct the padding in Firefox.
|
||||
*/
|
||||
|
||||
fieldset {
|
||||
padding: 0.35em 0.75em 0.625em;
|
||||
}
|
||||
fieldset {
|
||||
padding: 0.35em 0.75em 0.625em;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Correct the text wrapping in Edge and IE.
|
||||
* 2. Correct the color inheritance from `fieldset` elements in IE.
|
||||
* 3. Remove the padding so developers are not caught out when they zero out
|
||||
* `fieldset` elements in all browsers.
|
||||
*/
|
||||
|
||||
legend {
|
||||
box-sizing: border-box; /* 1 */
|
||||
color: inherit; /* 2 */
|
||||
display: table; /* 1 */
|
||||
max-width: 100%; /* 1 */
|
||||
padding: 0; /* 3 */
|
||||
white-space: normal; /* 1 */
|
||||
}
|
||||
legend {
|
||||
box-sizing: border-box; /* 1 */
|
||||
color: inherit; /* 2 */
|
||||
display: table; /* 1 */
|
||||
max-width: 100%; /* 1 */
|
||||
padding: 0; /* 3 */
|
||||
white-space: normal; /* 1 */
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Add the correct vertical alignment in Chrome, Firefox, and Opera.
|
||||
*/
|
||||
|
||||
progress {
|
||||
vertical-align: baseline;
|
||||
}
|
||||
progress {
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Remove the default vertical scrollbar in IE 10+.
|
||||
*/
|
||||
|
||||
textarea {
|
||||
overflow: auto;
|
||||
}
|
||||
textarea {
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Add the correct box sizing in IE 10.
|
||||
* 2. Remove the padding in IE 10.
|
||||
*/
|
||||
|
||||
[type='checkbox'],
|
||||
[type='radio'] {
|
||||
box-sizing: border-box; /* 1 */
|
||||
padding: 0; /* 2 */
|
||||
}
|
||||
[type='checkbox'],
|
||||
[type='radio'] {
|
||||
box-sizing: border-box; /* 1 */
|
||||
padding: 0; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Correct the cursor style of increment and decrement buttons in Chrome.
|
||||
*/
|
||||
|
||||
[type='number']::-webkit-inner-spin-button,
|
||||
[type='number']::-webkit-outer-spin-button {
|
||||
height: auto;
|
||||
}
|
||||
[type='number']::-webkit-inner-spin-button,
|
||||
[type='number']::-webkit-outer-spin-button {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Correct the odd appearance in Chrome and Safari.
|
||||
* 2. Correct the outline style in Safari.
|
||||
*/
|
||||
|
||||
[type='search'] {
|
||||
-webkit-appearance: textfield; /* 1 */
|
||||
outline-offset: -2px; /* 2 */
|
||||
}
|
||||
[type='search'] {
|
||||
-webkit-appearance: textfield; /* 1 */
|
||||
outline-offset: -2px; /* 2 */
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Remove the inner padding in Chrome and Safari on macOS.
|
||||
*/
|
||||
|
||||
[type='search']::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
[type='search']::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* 1. Correct the inability to style clickable types in iOS and Safari.
|
||||
* 2. Change font properties to `inherit` in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-file-upload-button {
|
||||
-webkit-appearance: button; /* 1 */
|
||||
font: inherit; /* 2 */
|
||||
}
|
||||
::-webkit-file-upload-button {
|
||||
-webkit-appearance: button; /* 1 */
|
||||
font: inherit; /* 2 */
|
||||
}
|
||||
|
||||
/* Interactive
|
||||
/* Interactive
|
||||
========================================================================== */
|
||||
|
||||
/*
|
||||
/*
|
||||
* Add the correct display in Edge, IE 10+, and Firefox.
|
||||
*/
|
||||
|
||||
details {
|
||||
display: block;
|
||||
}
|
||||
details {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Add the correct display in all browsers.
|
||||
*/
|
||||
|
||||
summary {
|
||||
display: list-item;
|
||||
}
|
||||
summary {
|
||||
display: list-item;
|
||||
}
|
||||
|
||||
/* Misc
|
||||
/* Misc
|
||||
========================================================================== */
|
||||
|
||||
/**
|
||||
/**
|
||||
* Add the correct display in IE 10+.
|
||||
*/
|
||||
|
||||
template {
|
||||
display: none;
|
||||
}
|
||||
template {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Add the correct display in IE 10.
|
||||
*/
|
||||
|
||||
[hidden] {
|
||||
display: none;
|
||||
}
|
||||
|
||||
[contentEditable]:empty:before {
|
||||
content: attr(placeholder);
|
||||
color: #9ca3af;
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
/* third container laylout for Firefox */
|
||||
@-moz-document url-prefix() {
|
||||
.firefox {
|
||||
padding: 32px;
|
||||
[hidden] {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* For screens with a maximum width of 768px (mobile) */
|
||||
@media (max-width: 768px) {
|
||||
.firefox {
|
||||
padding: 16px;
|
||||
[contentEditable]:empty:before {
|
||||
content: attr(placeholder);
|
||||
color: #9ca3af;
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'Inter';
|
||||
font-weight: 100 200 300 400 500 600 700 800 900;
|
||||
src: url('/fonts/Inter-Variable.ttf');
|
||||
}
|
||||
/* third container laylout for Firefox */
|
||||
@-moz-document url-prefix() {
|
||||
.firefox {
|
||||
padding: 32px;
|
||||
}
|
||||
}
|
||||
|
||||
@font-face {
|
||||
font-family: 'IBMPlexMono-Medium';
|
||||
font-weight: 500;
|
||||
src: url('/fonts/IBMPlexMono-Medium.ttf');
|
||||
}
|
||||
/* For screens with a maximum width of 768px (mobile) */
|
||||
@media (max-width: 768px) {
|
||||
.firefox {
|
||||
padding: 16px;
|
||||
}
|
||||
}
|
||||
|
||||
::-webkit-scrollbar {
|
||||
width: 10;
|
||||
}
|
||||
/* Light mode specific autofill styles */
|
||||
input:-webkit-autofill,
|
||||
input:-webkit-autofill:hover,
|
||||
input:-webkit-autofill:focus,
|
||||
input:-webkit-autofill:active {
|
||||
-webkit-text-fill-color: #343541 !important;
|
||||
-webkit-box-shadow: 0 0 0 30px transparent inset !important;
|
||||
transition: background-color 5000s ease-in-out 0s;
|
||||
caret-color: #343541;
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Inter';
|
||||
font-weight: 100 200 300 400 500 600 700 800 900;
|
||||
src: url('/fonts/Inter-Variable.ttf');
|
||||
}
|
||||
|
||||
/* Dark mode specific autofill styles */
|
||||
.dark input:-webkit-autofill,
|
||||
.dark input:-webkit-autofill:hover,
|
||||
.dark input:-webkit-autofill:focus,
|
||||
.dark input:-webkit-autofill:active {
|
||||
-webkit-text-fill-color: #e5e7eb !important;
|
||||
-webkit-box-shadow: 0 0 0 30px transparent inset !important;
|
||||
background-color: transparent !important;
|
||||
caret-color: #e5e7eb;
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'IBMPlexMono-Medium';
|
||||
font-weight: 500;
|
||||
src: url('/fonts/IBMPlexMono-Medium.ttf');
|
||||
}
|
||||
|
||||
/* Additional autocomplete dropdown styles for dark mode */
|
||||
.dark input:-webkit-autofill::first-line {
|
||||
color: #e5e7eb;
|
||||
}
|
||||
::-webkit-scrollbar {
|
||||
width: 10;
|
||||
}
|
||||
/* Light mode specific autofill styles */
|
||||
input:-webkit-autofill,
|
||||
input:-webkit-autofill:hover,
|
||||
input:-webkit-autofill:focus,
|
||||
input:-webkit-autofill:active {
|
||||
-webkit-text-fill-color: #343541 !important;
|
||||
-webkit-box-shadow: 0 0 0 30px transparent inset !important;
|
||||
transition: background-color 5000s ease-in-out 0s;
|
||||
caret-color: #343541;
|
||||
}
|
||||
|
||||
.inputbox-style {
|
||||
resize: none;
|
||||
padding-left: 36px;
|
||||
padding-right: 36px;
|
||||
}
|
||||
/* Dark mode specific autofill styles */
|
||||
.dark input:-webkit-autofill,
|
||||
.dark input:-webkit-autofill:hover,
|
||||
.dark input:-webkit-autofill:focus,
|
||||
.dark input:-webkit-autofill:active {
|
||||
-webkit-text-fill-color: #e5e7eb !important;
|
||||
-webkit-box-shadow: 0 0 0 30px transparent inset !important;
|
||||
background-color: transparent !important;
|
||||
caret-color: #e5e7eb;
|
||||
}
|
||||
|
||||
.bottom-safe {
|
||||
bottom: env(safe-area-inset-bottom, 0);
|
||||
}
|
||||
/* Additional autocomplete dropdown styles for dark mode */
|
||||
.dark input:-webkit-autofill::first-line {
|
||||
color: #e5e7eb;
|
||||
}
|
||||
|
||||
.ellipsis-text {
|
||||
overflow: hidden;
|
||||
display: -webkit-box;
|
||||
-webkit-line-clamp: 3;
|
||||
-webkit-box-orient: vertical;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
.inputbox-style {
|
||||
resize: none;
|
||||
padding-left: 36px;
|
||||
padding-right: 36px;
|
||||
}
|
||||
|
||||
.logs-table {
|
||||
font-family: 'IBMPlexMono-Medium', system-ui;
|
||||
}
|
||||
.bottom-safe {
|
||||
bottom: env(safe-area-inset-bottom, 0);
|
||||
}
|
||||
|
||||
.fade-in {
|
||||
animation: fadeIn 0.5s ease-in-out;
|
||||
}
|
||||
.ellipsis-text {
|
||||
overflow: hidden;
|
||||
display: -webkit-box;
|
||||
-webkit-line-clamp: 3;
|
||||
-webkit-box-orient: vertical;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
@keyframes fadeIn {
|
||||
0% {
|
||||
.logs-table {
|
||||
font-family: 'IBMPlexMono-Medium', system-ui;
|
||||
}
|
||||
|
||||
.fade-in {
|
||||
animation: fadeIn 0.5s ease-in-out;
|
||||
}
|
||||
|
||||
@keyframes fadeIn {
|
||||
0% {
|
||||
opacity: 0;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
.fade-in-bubble {
|
||||
opacity: 0;
|
||||
transform: translateY(10px);
|
||||
animation: fadeInUp 0.5s forwards;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
|
||||
@keyframes fadeInUp {
|
||||
to {
|
||||
opacity: 1;
|
||||
transform: translateY(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.fade-in-bubble {
|
||||
opacity: 0;
|
||||
transform: translateY(10px);
|
||||
animation: fadeInUp 0.5s forwards;
|
||||
}
|
||||
|
||||
@keyframes fadeInUp {
|
||||
to {
|
||||
opacity: 1;
|
||||
transform: translateY(0);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -52,19 +52,22 @@
|
||||
"default": "Default",
|
||||
"add": "Add"
|
||||
},
|
||||
"documents": {
|
||||
"title": "This table contains all the documents that are available to you and those you have uploaded",
|
||||
"label": "Documents",
|
||||
"name": "Document Name",
|
||||
"sources": {
|
||||
"title": "Here you can manage all of the source file that are available to you and those you have uploaded.",
|
||||
"label": "Sources",
|
||||
"name": "Source Name",
|
||||
"date": "Vector Date",
|
||||
"type": "Type",
|
||||
"tokenUsage": "Token Usage",
|
||||
"noData": "No existing Documents",
|
||||
"noData": "No existing Sources",
|
||||
"searchPlaceholder": "Search...",
|
||||
"addNew": "Add New",
|
||||
"addSource": "Add Source",
|
||||
"addChunk": "Add Chunk",
|
||||
"preLoaded": "Pre-loaded",
|
||||
"private": "Private",
|
||||
"sync": "Sync",
|
||||
"syncing": "Syncing...",
|
||||
"syncFrequency": {
|
||||
"never": "Never",
|
||||
"daily": "Daily",
|
||||
@@ -74,12 +77,32 @@
|
||||
"actions": "Actions",
|
||||
"view": "View",
|
||||
"deleteWarning": "Are you sure you want to delete \"{{name}}\"?",
|
||||
"backToAll": "Back to all documents",
|
||||
"confirmDelete": "Are you sure you want to delete this file? This action cannot be undone.",
|
||||
"backToAll": "Back to all sources",
|
||||
"chunks": "Chunks",
|
||||
"noChunks": "No chunks found",
|
||||
"noChunksAlt": "No chunks found",
|
||||
"goToDocuments": "Go to Documents",
|
||||
"uploadNew": "Upload new"
|
||||
"goToSources": "Go to Sources",
|
||||
"uploadNew": "Upload new",
|
||||
"searchFiles": "Search files...",
|
||||
"noResults": "No results found",
|
||||
"fileName": "Name",
|
||||
"tokens": "Tokens",
|
||||
"size": "Size",
|
||||
"fileAlt": "File",
|
||||
"folderAlt": "Folder",
|
||||
"parentFolderAlt": "Parent folder",
|
||||
"menuAlt": "Menu",
|
||||
"tokensUnit": "tokens",
|
||||
"editAlt": "Edit",
|
||||
"uploading": "Uploading…",
|
||||
"deleting": "Deleting…",
|
||||
"queued": "Queued: {{count}}",
|
||||
"addFile": "Add file",
|
||||
"uploadingFilesTitle": "Uploading files...",
|
||||
"deletingTitle": "Deleting...",
|
||||
"deleteDirectoryWarning": "Are you sure you want to delete the directory \"{{name}}\" and all its contents? This action cannot be undone.",
|
||||
"searchAlt": "Search"
|
||||
},
|
||||
"apiKeys": {
|
||||
"label": "Chatbots",
|
||||
@@ -250,13 +273,14 @@
|
||||
},
|
||||
"chunk": {
|
||||
"add": "Add Chunk",
|
||||
"edit": "Edit Chunk",
|
||||
"edit": "Edit",
|
||||
"title": "Title",
|
||||
"enterTitle": "Enter title",
|
||||
"bodyText": "Body text",
|
||||
"promptText": "Prompt Text",
|
||||
"update": "Update",
|
||||
"save": "Save",
|
||||
"close": "Close",
|
||||
"cancel": "Cancel",
|
||||
"delete": "Delete",
|
||||
"deleteConfirmation": "Are you sure you want to delete this chunk?"
|
||||
}
|
||||
|
||||
@@ -52,19 +52,22 @@
|
||||
"default": "Predeterminado",
|
||||
"addNew": "Añadir Nuevo"
|
||||
},
|
||||
"documents": {
|
||||
"title": "Esta tabla contiene todos los documentos que están disponibles para ti y los que has subido",
|
||||
"label": "Documentos",
|
||||
"name": "Nombre del Documento",
|
||||
"sources": {
|
||||
"title": "Aquí puedes gestionar todos los archivos fuente que están disponibles para ti y los que has subido.",
|
||||
"label": "Fuentes",
|
||||
"name": "Nombre de la Fuente",
|
||||
"date": "Fecha de Vector",
|
||||
"type": "Tipo",
|
||||
"tokenUsage": "Uso de Tokens",
|
||||
"noData": "No hay documentos existentes",
|
||||
"noData": "No hay fuentes existentes",
|
||||
"searchPlaceholder": "Buscar...",
|
||||
"addNew": "Agregar Nuevo",
|
||||
"addSource": "Agregar Fuente",
|
||||
"addChunk": "Agregar Fragmento",
|
||||
"preLoaded": "Precargado",
|
||||
"private": "Privado",
|
||||
"sync": "Sincronizar",
|
||||
"syncing": "Sincronizando...",
|
||||
"syncFrequency": {
|
||||
"never": "Nunca",
|
||||
"daily": "Diario",
|
||||
@@ -74,12 +77,32 @@
|
||||
"actions": "Acciones",
|
||||
"view": "Ver",
|
||||
"deleteWarning": "¿Estás seguro de que deseas eliminar \"{{name}}\"?",
|
||||
"backToAll": "Volver a todos los documentos",
|
||||
"confirmDelete": "¿Estás seguro de que deseas eliminar este archivo? Esta acción no se puede deshacer.",
|
||||
"backToAll": "Volver a todas las fuentes",
|
||||
"chunks": "Fragmentos",
|
||||
"noChunks": "No se encontraron fragmentos",
|
||||
"noChunksAlt": "No se encontraron fragmentos",
|
||||
"goToDocuments": "Ir a Documentos",
|
||||
"uploadNew": "Subir nuevo"
|
||||
"goToSources": "Ir a Fuentes",
|
||||
"uploadNew": "Subir nuevo",
|
||||
"searchFiles": "Buscar archivos...",
|
||||
"noResults": "No se encontraron resultados",
|
||||
"fileName": "Nombre",
|
||||
"tokens": "Tokens",
|
||||
"size": "Tamaño",
|
||||
"fileAlt": "Archivo",
|
||||
"folderAlt": "Carpeta",
|
||||
"parentFolderAlt": "Carpeta padre",
|
||||
"menuAlt": "Menú",
|
||||
"tokensUnit": "tokens",
|
||||
"editAlt": "Editar",
|
||||
"uploading": "Subiendo…",
|
||||
"deleting": "Eliminando…",
|
||||
"queued": "En cola: {{count}}",
|
||||
"addFile": "Añadir archivo",
|
||||
"uploadingFilesTitle": "Subiendo archivos...",
|
||||
"deletingTitle": "Eliminando...",
|
||||
"deleteDirectoryWarning": "¿Está seguro de que desea eliminar el directorio \"{{name}}\" y todo su contenido? Esta acción no se puede deshacer.",
|
||||
"searchAlt": "Buscar"
|
||||
},
|
||||
"apiKeys": {
|
||||
"label": "Chatbots",
|
||||
@@ -250,13 +273,14 @@
|
||||
},
|
||||
"chunk": {
|
||||
"add": "Agregar Fragmento",
|
||||
"edit": "Editar Fragmento",
|
||||
"edit": "Editar",
|
||||
"title": "Título",
|
||||
"enterTitle": "Ingresar título",
|
||||
"bodyText": "Texto del cuerpo",
|
||||
"promptText": "Texto del prompt",
|
||||
"update": "Actualizar",
|
||||
"save": "Guardar",
|
||||
"close": "Cerrar",
|
||||
"cancel": "Cancelar",
|
||||
"delete": "Eliminar",
|
||||
"deleteConfirmation": "¿Estás seguro de que deseas eliminar este fragmento?"
|
||||
}
|
||||
|
||||
@@ -52,19 +52,22 @@
|
||||
"default": "デフォルト",
|
||||
"add": "追加"
|
||||
},
|
||||
"documents": {
|
||||
"title": "この表には、利用可能なすべてのドキュメントとアップロードしたドキュメントが含まれています",
|
||||
"label": "ドキュメント",
|
||||
"name": "ドキュメント名",
|
||||
"sources": {
|
||||
"title": "ここでは、利用可能なすべてのソースファイルとアップロードしたファイルを管理できます。",
|
||||
"label": "ソース",
|
||||
"name": "ソース名",
|
||||
"date": "ベクトル日付",
|
||||
"type": "タイプ",
|
||||
"tokenUsage": "トークン使用量",
|
||||
"noData": "既存のドキュメントがありません",
|
||||
"noData": "既存のソースがありません",
|
||||
"searchPlaceholder": "検索...",
|
||||
"addNew": "新規追加",
|
||||
"addSource": "ソースを追加",
|
||||
"addChunk": "チャンクを追加",
|
||||
"preLoaded": "プリロード済み",
|
||||
"private": "プライベート",
|
||||
"sync": "同期",
|
||||
"syncing": "同期中...",
|
||||
"syncFrequency": {
|
||||
"never": "なし",
|
||||
"daily": "毎日",
|
||||
@@ -74,12 +77,32 @@
|
||||
"actions": "アクション",
|
||||
"view": "表示",
|
||||
"deleteWarning": "\"{{name}}\"を削除してもよろしいですか?",
|
||||
"backToAll": "すべてのドキュメントに戻る",
|
||||
"confirmDelete": "このファイルを削除してもよろしいですか?この操作は元に戻せません。",
|
||||
"backToAll": "すべてのソースに戻る",
|
||||
"chunks": "チャンク",
|
||||
"noChunks": "チャンクが見つかりません",
|
||||
"noChunksAlt": "チャンクが見つかりません",
|
||||
"goToDocuments": "ドキュメントへ移動",
|
||||
"uploadNew": "新規アップロード"
|
||||
"goToSources": "ソースへ移動",
|
||||
"uploadNew": "新規アップロード",
|
||||
"searchFiles": "ファイルを検索...",
|
||||
"noResults": "結果が見つかりません",
|
||||
"fileName": "名前",
|
||||
"tokens": "トークン",
|
||||
"size": "サイズ",
|
||||
"fileAlt": "ファイル",
|
||||
"folderAlt": "フォルダ",
|
||||
"parentFolderAlt": "親フォルダ",
|
||||
"menuAlt": "メニュー",
|
||||
"tokensUnit": "トークン",
|
||||
"editAlt": "編集",
|
||||
"uploading": "アップロード中…",
|
||||
"deleting": "削除中…",
|
||||
"queued": "キュー: {{count}}",
|
||||
"addFile": "ファイルを追加",
|
||||
"uploadingFilesTitle": "ファイルをアップロード中...",
|
||||
"deletingTitle": "削除中...",
|
||||
"deleteDirectoryWarning": "ディレクトリ \"{{name}}\" とその内容をすべて削除してもよろしいですか?この操作は元に戻せません。",
|
||||
"searchAlt": "検索"
|
||||
},
|
||||
"apiKeys": {
|
||||
"label": "チャットボット",
|
||||
@@ -250,13 +273,14 @@
|
||||
},
|
||||
"chunk": {
|
||||
"add": "チャンクを追加",
|
||||
"edit": "チャンクを編集",
|
||||
"edit": "編集",
|
||||
"title": "タイトル",
|
||||
"enterTitle": "タイトルを入力",
|
||||
"bodyText": "本文",
|
||||
"promptText": "プロンプトテキスト",
|
||||
"update": "更新",
|
||||
"save": "保存",
|
||||
"close": "閉じる",
|
||||
"cancel": "キャンセル",
|
||||
"delete": "削除",
|
||||
"deleteConfirmation": "このチャンクを削除してもよろしいですか?"
|
||||
}
|
||||
|
||||
@@ -52,19 +52,22 @@
|
||||
"default": "По умолчанию",
|
||||
"add": "Добавить"
|
||||
},
|
||||
"documents": {
|
||||
"title": "Эта таблица содержит все документы, которые доступны вам и те, которые вы загрузили",
|
||||
"label": "Документы",
|
||||
"name": "Название документа",
|
||||
"sources": {
|
||||
"title": "Здесь вы можете управлять всеми исходными файлами, которые доступны вам и которые вы загрузили.",
|
||||
"label": "Источники",
|
||||
"name": "Название источника",
|
||||
"date": "Дата вектора",
|
||||
"type": "Тип",
|
||||
"tokenUsage": "Использование токена",
|
||||
"noData": "Нет существующих документов",
|
||||
"noData": "Нет существующих источников",
|
||||
"searchPlaceholder": "Поиск...",
|
||||
"addNew": "добавить новый",
|
||||
"addSource": "Добавить источник",
|
||||
"addChunk": "Добавить фрагмент",
|
||||
"preLoaded": "Предзагруженный",
|
||||
"private": "Частный",
|
||||
"sync": "Синхронизация",
|
||||
"syncing": "Синхронизация...",
|
||||
"syncFrequency": {
|
||||
"never": "Никогда",
|
||||
"daily": "Ежедневно",
|
||||
@@ -74,12 +77,32 @@
|
||||
"actions": "Действия",
|
||||
"view": "Просмотр",
|
||||
"deleteWarning": "Вы уверены, что хотите удалить \"{{name}}\"?",
|
||||
"backToAll": "Вернуться ко всем документам",
|
||||
"confirmDelete": "Вы уверены, что хотите удалить этот файл? Это действие нельзя отменить.",
|
||||
"backToAll": "Вернуться ко всем источникам",
|
||||
"chunks": "Фрагменты",
|
||||
"noChunks": "Фрагменты не найдены",
|
||||
"noChunksAlt": "Фрагменты не найдены",
|
||||
"goToDocuments": "Перейти к документам",
|
||||
"uploadNew": "Загрузить новый"
|
||||
"goToSources": "Перейти к источникам",
|
||||
"uploadNew": "Загрузить новый",
|
||||
"searchFiles": "Поиск файлов...",
|
||||
"noResults": "Результаты не найдены",
|
||||
"fileName": "Имя",
|
||||
"tokens": "Токены",
|
||||
"size": "Размер",
|
||||
"fileAlt": "Файл",
|
||||
"folderAlt": "Папка",
|
||||
"parentFolderAlt": "Родительская папка",
|
||||
"menuAlt": "Меню",
|
||||
"tokensUnit": "токенов",
|
||||
"editAlt": "Редактировать",
|
||||
"uploading": "Загрузка…",
|
||||
"deleting": "Удаление…",
|
||||
"queued": "В очереди: {{count}}",
|
||||
"addFile": "Добавить файл",
|
||||
"uploadingFilesTitle": "Загрузка файлов...",
|
||||
"deletingTitle": "Удаление...",
|
||||
"deleteDirectoryWarning": "Вы уверены, что хотите удалить каталог \"{{name}}\" и все его содержимое? Это действие нельзя отменить.",
|
||||
"searchAlt": "Поиск"
|
||||
},
|
||||
"apiKeys": {
|
||||
"label": "API ключи",
|
||||
@@ -250,13 +273,14 @@
|
||||
},
|
||||
"chunk": {
|
||||
"add": "Добавить фрагмент",
|
||||
"edit": "Редактировать фрагмент",
|
||||
"edit": "Редактировать",
|
||||
"title": "Заголовок",
|
||||
"enterTitle": "Введите заголовок",
|
||||
"bodyText": "Текст",
|
||||
"promptText": "Текст подсказки",
|
||||
"update": "Обновить",
|
||||
"save": "Сохранить",
|
||||
"close": "Закрыть",
|
||||
"cancel": "Отмена",
|
||||
"delete": "Удалить",
|
||||
"deleteConfirmation": "Вы уверены, что хотите удалить этот фрагмент?"
|
||||
}
|
||||
|
||||
@@ -52,19 +52,22 @@
|
||||
"default": "預設",
|
||||
"add": "添加"
|
||||
},
|
||||
"documents": {
|
||||
"title": "此表格包含所有可供您使用的文件以及您上傳的文件",
|
||||
"label": "文件",
|
||||
"name": "文件名稱",
|
||||
"sources": {
|
||||
"title": "在這裡您可以管理所有可用的來源檔案以及您上傳的檔案。",
|
||||
"label": "來源",
|
||||
"name": "來源名稱",
|
||||
"date": "向量日期",
|
||||
"type": "類型",
|
||||
"tokenUsage": "Token 使用量",
|
||||
"noData": "沒有現有的文件",
|
||||
"noData": "沒有現有的來源",
|
||||
"searchPlaceholder": "搜尋...",
|
||||
"addNew": "新增文件",
|
||||
"addSource": "新增來源",
|
||||
"addChunk": "新增區塊",
|
||||
"preLoaded": "預載入",
|
||||
"private": "私人",
|
||||
"sync": "同步",
|
||||
"syncing": "同步中...",
|
||||
"syncFrequency": {
|
||||
"never": "從不",
|
||||
"daily": "每天",
|
||||
@@ -74,12 +77,32 @@
|
||||
"actions": "操作",
|
||||
"view": "查看",
|
||||
"deleteWarning": "您確定要刪除 \"{{name}}\" 嗎?",
|
||||
"backToAll": "返回所有文件",
|
||||
"confirmDelete": "您確定要刪除此檔案嗎?此操作無法復原。",
|
||||
"backToAll": "返回所有來源",
|
||||
"chunks": "文本塊",
|
||||
"noChunks": "未找到文本塊",
|
||||
"noChunksAlt": "未找到文本塊",
|
||||
"goToDocuments": "前往文件",
|
||||
"uploadNew": "上傳新文件"
|
||||
"goToSources": "前往來源",
|
||||
"uploadNew": "上傳新文件",
|
||||
"searchFiles": "搜尋檔案...",
|
||||
"noResults": "未找到結果",
|
||||
"fileName": "名稱",
|
||||
"tokens": "Token",
|
||||
"size": "大小",
|
||||
"fileAlt": "檔案",
|
||||
"folderAlt": "資料夾",
|
||||
"parentFolderAlt": "上層資料夾",
|
||||
"menuAlt": "選單",
|
||||
"tokensUnit": "Token",
|
||||
"editAlt": "編輯",
|
||||
"uploading": "正在上傳…",
|
||||
"deleting": "正在刪除…",
|
||||
"queued": "已排隊:{{count}}",
|
||||
"addFile": "新增檔案",
|
||||
"uploadingFilesTitle": "正在上傳檔案...",
|
||||
"deletingTitle": "正在刪除...",
|
||||
"deleteDirectoryWarning": "您確定要刪除目錄 \"{{name}}\" 及其所有內容嗎?此操作無法復原。",
|
||||
"searchAlt": "搜尋"
|
||||
},
|
||||
"apiKeys": {
|
||||
"label": "聊天機器人",
|
||||
@@ -250,13 +273,14 @@
|
||||
},
|
||||
"chunk": {
|
||||
"add": "新增區塊",
|
||||
"edit": "編輯區塊",
|
||||
"edit": "編輯",
|
||||
"title": "標題",
|
||||
"enterTitle": "輸入標題",
|
||||
"bodyText": "內文",
|
||||
"promptText": "提示文字",
|
||||
"update": "更新",
|
||||
"save": "儲存",
|
||||
"close": "關閉",
|
||||
"cancel": "取消",
|
||||
"delete": "刪除",
|
||||
"deleteConfirmation": "您確定要刪除此區塊嗎?"
|
||||
}
|
||||
|
||||
@@ -52,19 +52,22 @@
|
||||
"default": "默认",
|
||||
"add": "添加"
|
||||
},
|
||||
"documents": {
|
||||
"title": "此表格包含所有可供您使用的文档以及您上传的文档",
|
||||
"label": "文档",
|
||||
"name": "文件名称",
|
||||
"sources": {
|
||||
"title": "在这里您可以管理所有可用的源文件以及您上传的文件。",
|
||||
"label": "来源",
|
||||
"name": "来源名称",
|
||||
"date": "向量日期",
|
||||
"type": "类型",
|
||||
"tokenUsage": "令牌使用",
|
||||
"noData": "没有现有的文档",
|
||||
"noData": "没有现有的来源",
|
||||
"searchPlaceholder": "搜索...",
|
||||
"addNew": "添加新文档",
|
||||
"addSource": "添加来源",
|
||||
"addChunk": "添加块",
|
||||
"preLoaded": "预加载",
|
||||
"private": "私有",
|
||||
"sync": "同步",
|
||||
"syncing": "同步中...",
|
||||
"syncFrequency": {
|
||||
"never": "从不",
|
||||
"daily": "每天",
|
||||
@@ -74,12 +77,32 @@
|
||||
"actions": "操作",
|
||||
"view": "查看",
|
||||
"deleteWarning": "您确定要删除 \"{{name}}\" 吗?",
|
||||
"backToAll": "返回所有文档",
|
||||
"confirmDelete": "您确定要删除此文件吗?此操作无法撤销。",
|
||||
"backToAll": "返回所有来源",
|
||||
"chunks": "文本块",
|
||||
"noChunks": "未找到文本块",
|
||||
"noChunksAlt": "未找到文本块",
|
||||
"goToDocuments": "前往文档",
|
||||
"uploadNew": "上传新文档"
|
||||
"goToSources": "前往来源",
|
||||
"uploadNew": "上传新文档",
|
||||
"searchFiles": "搜索文件...",
|
||||
"noResults": "未找到结果",
|
||||
"fileName": "名称",
|
||||
"tokens": "令牌",
|
||||
"size": "大小",
|
||||
"fileAlt": "文件",
|
||||
"folderAlt": "文件夹",
|
||||
"parentFolderAlt": "父文件夹",
|
||||
"menuAlt": "菜单",
|
||||
"tokensUnit": "令牌",
|
||||
"editAlt": "编辑",
|
||||
"uploading": "正在上传…",
|
||||
"deleting": "正在删除…",
|
||||
"queued": "已排队:{{count}}",
|
||||
"addFile": "添加文件",
|
||||
"uploadingFilesTitle": "正在上传文件...",
|
||||
"deletingTitle": "正在删除...",
|
||||
"deleteDirectoryWarning": "确定要删除目录 \"{{name}}\" 及其所有内容吗?此操作无法撤销。",
|
||||
"searchAlt": "搜索"
|
||||
},
|
||||
"apiKeys": {
|
||||
"label": "聊天机器人",
|
||||
@@ -250,13 +273,14 @@
|
||||
},
|
||||
"chunk": {
|
||||
"add": "添加块",
|
||||
"edit": "编辑块",
|
||||
"edit": "编辑",
|
||||
"title": "标题",
|
||||
"enterTitle": "输入标题",
|
||||
"bodyText": "正文",
|
||||
"promptText": "提示文本",
|
||||
"update": "更新",
|
||||
"save": "保存",
|
||||
"close": "关闭",
|
||||
"cancel": "取消",
|
||||
"delete": "删除",
|
||||
"deleteConfirmation": "您确定要删除此块吗?"
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ export type Doc = {
|
||||
type?: string;
|
||||
retriever?: string;
|
||||
syncFrequency?: string;
|
||||
isNested?: boolean;
|
||||
};
|
||||
|
||||
export type GetDocsResponse = {
|
||||
|
||||
@@ -1,788 +0,0 @@
|
||||
import React, { useCallback, useEffect, useRef, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useDispatch, useSelector } from 'react-redux';
|
||||
|
||||
import userService from '../api/services/userService';
|
||||
import ArrowLeft from '../assets/arrow-left.svg';
|
||||
import caretSort from '../assets/caret-sort.svg';
|
||||
import Edit from '../assets/edit.svg';
|
||||
import EyeView from '../assets/eye-view.svg';
|
||||
import NoFilesDarkIcon from '../assets/no-files-dark.svg';
|
||||
import NoFilesIcon from '../assets/no-files.svg';
|
||||
import Trash from '../assets/red-trash.svg';
|
||||
import SyncIcon from '../assets/sync.svg';
|
||||
import ThreeDots from '../assets/three-dots.svg';
|
||||
import ContextMenu, { MenuOption } from '../components/ContextMenu';
|
||||
import Pagination from '../components/DocumentPagination';
|
||||
import DropdownMenu from '../components/DropdownMenu';
|
||||
import Input from '../components/Input';
|
||||
import SkeletonLoader from '../components/SkeletonLoader';
|
||||
import Spinner from '../components/Spinner';
|
||||
import { useDarkTheme, useLoaderState } from '../hooks';
|
||||
import ChunkModal from '../modals/ChunkModal';
|
||||
import ConfirmationModal from '../modals/ConfirmationModal';
|
||||
import { ActiveState, Doc, DocumentsProps } from '../models/misc';
|
||||
import { getDocs, getDocsWithPagination } from '../preferences/preferenceApi';
|
||||
import {
|
||||
selectToken,
|
||||
setPaginatedDocuments,
|
||||
setSourceDocs,
|
||||
} from '../preferences/preferenceSlice';
|
||||
import Upload from '../upload/Upload';
|
||||
import { formatDate } from '../utils/dateTimeUtils';
|
||||
import { ChunkType } from './types';
|
||||
|
||||
const formatTokens = (tokens: number): string => {
|
||||
const roundToTwoDecimals = (num: number): string => {
|
||||
return (Math.round((num + Number.EPSILON) * 100) / 100).toString();
|
||||
};
|
||||
|
||||
if (tokens >= 1_000_000_000) {
|
||||
return roundToTwoDecimals(tokens / 1_000_000_000) + 'b';
|
||||
} else if (tokens >= 1_000_000) {
|
||||
return roundToTwoDecimals(tokens / 1_000_000) + 'm';
|
||||
} else if (tokens >= 1_000) {
|
||||
return roundToTwoDecimals(tokens / 1_000) + 'k';
|
||||
} else {
|
||||
return tokens.toString();
|
||||
}
|
||||
};
|
||||
|
||||
export default function Documents({
|
||||
paginatedDocuments,
|
||||
handleDeleteDocument,
|
||||
}: DocumentsProps) {
|
||||
const { t } = useTranslation();
|
||||
const dispatch = useDispatch();
|
||||
const token = useSelector(selectToken);
|
||||
|
||||
const [searchTerm, setSearchTerm] = useState<string>('');
|
||||
const [modalState, setModalState] = useState<ActiveState>('INACTIVE');
|
||||
const [isOnboarding, setIsOnboarding] = useState<boolean>(false);
|
||||
const [loading, setLoading] = useLoaderState(false);
|
||||
const [sortField, setSortField] = useState<'date' | 'tokens'>('date');
|
||||
const [sortOrder, setSortOrder] = useState<'asc' | 'desc'>('desc');
|
||||
// Pagination
|
||||
const [currentPage, setCurrentPage] = useState<number>(1);
|
||||
const [rowsPerPage, setRowsPerPage] = useState<number>(10);
|
||||
const [totalPages, setTotalPages] = useState<number>(1);
|
||||
|
||||
const [activeMenuId, setActiveMenuId] = useState<string | null>(null);
|
||||
const menuRefs = useRef<{
|
||||
[key: string]: React.RefObject<HTMLDivElement | null>;
|
||||
}>({});
|
||||
|
||||
// Create or get a ref for each document wrapper div (not the td)
|
||||
const getMenuRef = (docId: string) => {
|
||||
if (!menuRefs.current[docId]) {
|
||||
menuRefs.current[docId] = React.createRef<HTMLDivElement>();
|
||||
}
|
||||
return menuRefs.current[docId];
|
||||
};
|
||||
|
||||
const handleMenuClick = (e: React.MouseEvent, docId: string) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
|
||||
const isAnyMenuOpen =
|
||||
(syncMenuState.isOpen && syncMenuState.docId === docId) ||
|
||||
activeMenuId === docId;
|
||||
|
||||
if (isAnyMenuOpen) {
|
||||
setSyncMenuState((prev) => ({ ...prev, isOpen: false, docId: null }));
|
||||
setActiveMenuId(null);
|
||||
return;
|
||||
}
|
||||
setActiveMenuId(docId);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const handleClickOutside = (event: MouseEvent) => {
|
||||
if (activeMenuId) {
|
||||
const activeRef = menuRefs.current[activeMenuId];
|
||||
if (
|
||||
activeRef?.current &&
|
||||
!activeRef.current.contains(event.target as Node)
|
||||
) {
|
||||
setActiveMenuId(null);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
document.addEventListener('mousedown', handleClickOutside);
|
||||
return () => document.removeEventListener('mousedown', handleClickOutside);
|
||||
}, [activeMenuId]);
|
||||
|
||||
const currentDocuments = paginatedDocuments ?? [];
|
||||
const syncOptions = [
|
||||
{ label: t('settings.documents.syncFrequency.never'), value: 'never' },
|
||||
{ label: t('settings.documents.syncFrequency.daily'), value: 'daily' },
|
||||
{ label: t('settings.documents.syncFrequency.weekly'), value: 'weekly' },
|
||||
{ label: t('settings.documents.syncFrequency.monthly'), value: 'monthly' },
|
||||
];
|
||||
const [showDocumentChunks, setShowDocumentChunks] = useState<Doc>();
|
||||
const [isMenuOpen, setIsMenuOpen] = useState(false);
|
||||
const [syncMenuState, setSyncMenuState] = useState<{
|
||||
isOpen: boolean;
|
||||
docId: string | null;
|
||||
document: Doc | null;
|
||||
}>({
|
||||
isOpen: false,
|
||||
docId: null,
|
||||
document: null,
|
||||
});
|
||||
|
||||
const refreshDocs = useCallback(
|
||||
(
|
||||
field: 'date' | 'tokens' | undefined,
|
||||
pageNumber?: number,
|
||||
rows?: number,
|
||||
) => {
|
||||
const page = pageNumber ?? currentPage;
|
||||
const rowsPerPg = rows ?? rowsPerPage;
|
||||
|
||||
// If field is undefined, (Pagination or Search) use the current sortField
|
||||
const newSortField = field ?? sortField;
|
||||
|
||||
// If field is undefined, (Pagination or Search) use the current sortOrder
|
||||
const newSortOrder =
|
||||
field === sortField
|
||||
? sortOrder === 'asc'
|
||||
? 'desc'
|
||||
: 'asc'
|
||||
: sortOrder;
|
||||
|
||||
// If field is defined, update the sortField and sortOrder
|
||||
if (field) {
|
||||
setSortField(newSortField);
|
||||
setSortOrder(newSortOrder);
|
||||
}
|
||||
|
||||
setLoading(true);
|
||||
getDocsWithPagination(
|
||||
newSortField,
|
||||
newSortOrder,
|
||||
page,
|
||||
rowsPerPg,
|
||||
searchTerm,
|
||||
token,
|
||||
)
|
||||
.then((data) => {
|
||||
dispatch(setPaginatedDocuments(data ? data.docs : []));
|
||||
setTotalPages(data ? data.totalPages : 0);
|
||||
})
|
||||
.catch((error) => console.error(error))
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
},
|
||||
[currentPage, rowsPerPage, sortField, sortOrder, searchTerm],
|
||||
);
|
||||
|
||||
const handleManageSync = (doc: Doc, sync_frequency: string) => {
|
||||
setLoading(true);
|
||||
userService
|
||||
.manageSync({ source_id: doc.id, sync_frequency }, token)
|
||||
.then(() => {
|
||||
return getDocs(token);
|
||||
})
|
||||
.then((data) => {
|
||||
dispatch(setSourceDocs(data));
|
||||
return getDocsWithPagination(
|
||||
sortField,
|
||||
sortOrder,
|
||||
currentPage,
|
||||
rowsPerPage,
|
||||
searchTerm,
|
||||
token,
|
||||
);
|
||||
})
|
||||
.then((paginatedData) => {
|
||||
dispatch(
|
||||
setPaginatedDocuments(paginatedData ? paginatedData.docs : []),
|
||||
);
|
||||
setTotalPages(paginatedData ? paginatedData.totalPages : 0);
|
||||
})
|
||||
.catch((error) => console.error('Error in handleManageSync:', error))
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
};
|
||||
|
||||
const [documentToDelete, setDocumentToDelete] = useState<{
|
||||
index: number;
|
||||
document: Doc;
|
||||
} | null>(null);
|
||||
const [deleteModalState, setDeleteModalState] =
|
||||
useState<ActiveState>('INACTIVE');
|
||||
|
||||
const handleDeleteConfirmation = (index: number, document: Doc) => {
|
||||
setDocumentToDelete({ index, document });
|
||||
setDeleteModalState('ACTIVE');
|
||||
};
|
||||
|
||||
const handleConfirmedDelete = () => {
|
||||
if (documentToDelete) {
|
||||
handleDeleteDocument(documentToDelete.index, documentToDelete.document);
|
||||
setDeleteModalState('INACTIVE');
|
||||
setDocumentToDelete(null);
|
||||
}
|
||||
};
|
||||
|
||||
const getActionOptions = (index: number, document: Doc): MenuOption[] => {
|
||||
const actions: MenuOption[] = [
|
||||
{
|
||||
icon: EyeView,
|
||||
label: t('settings.documents.view'),
|
||||
onClick: () => {
|
||||
setShowDocumentChunks(document);
|
||||
},
|
||||
iconWidth: 18,
|
||||
iconHeight: 18,
|
||||
variant: 'primary',
|
||||
},
|
||||
];
|
||||
|
||||
if (document.syncFrequency) {
|
||||
actions.push({
|
||||
icon: SyncIcon,
|
||||
label: t('settings.documents.sync'),
|
||||
onClick: () => {
|
||||
setSyncMenuState({
|
||||
isOpen: true,
|
||||
docId: document.id ?? null,
|
||||
document: document,
|
||||
});
|
||||
},
|
||||
iconWidth: 14,
|
||||
iconHeight: 14,
|
||||
variant: 'primary',
|
||||
});
|
||||
}
|
||||
|
||||
actions.push({
|
||||
icon: Trash,
|
||||
label: t('convTile.delete'),
|
||||
onClick: () => {
|
||||
handleDeleteConfirmation(index, document);
|
||||
},
|
||||
iconWidth: 18,
|
||||
iconHeight: 18,
|
||||
variant: 'danger',
|
||||
});
|
||||
|
||||
return actions;
|
||||
};
|
||||
useEffect(() => {
|
||||
refreshDocs(undefined, 1, rowsPerPage);
|
||||
}, [searchTerm]);
|
||||
|
||||
return showDocumentChunks ? (
|
||||
<DocumentChunks
|
||||
document={showDocumentChunks}
|
||||
handleGoBack={() => {
|
||||
setShowDocumentChunks(undefined);
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<div className="mt-8 flex w-full max-w-full flex-col overflow-hidden">
|
||||
<div className="relative flex grow flex-col">
|
||||
<div className="mb-6">
|
||||
<h2 className="text-sonic-silver text-base font-medium">
|
||||
{t('settings.documents.title')}
|
||||
</h2>
|
||||
</div>
|
||||
<div className="mb-6 flex flex-col items-start justify-between gap-3 sm:flex-row sm:items-center">
|
||||
<div className="w-full sm:w-auto">
|
||||
<label htmlFor="document-search-input" className="sr-only">
|
||||
{t('settings.documents.searchPlaceholder')}
|
||||
</label>
|
||||
<Input
|
||||
maxLength={256}
|
||||
placeholder={t('settings.documents.searchPlaceholder')}
|
||||
name="Document-search-input"
|
||||
type="text"
|
||||
id="document-search-input"
|
||||
value={searchTerm}
|
||||
onChange={(e) => {
|
||||
setSearchTerm(e.target.value);
|
||||
setCurrentPage(1);
|
||||
}}
|
||||
borderVariant="thin"
|
||||
/>
|
||||
</div>
|
||||
<button
|
||||
className="bg-purple-30 hover:bg-violets-are-blue flex h-[32px] min-w-[108px] items-center justify-center rounded-full px-4 text-sm whitespace-normal text-white"
|
||||
title={t('settings.documents.addNew')}
|
||||
onClick={() => {
|
||||
setIsOnboarding(false);
|
||||
setModalState('ACTIVE');
|
||||
}}
|
||||
>
|
||||
{t('settings.documents.addNew')}
|
||||
</button>
|
||||
</div>
|
||||
<div className="relative w-full">
|
||||
<div className="dark:border-silver/40 overflow-hidden rounded-md border border-gray-300">
|
||||
<div className="table-scroll overflow-x-auto">
|
||||
<table className="w-full table-auto">
|
||||
<thead>
|
||||
<tr className="dark:border-silver/40 border-b border-gray-300">
|
||||
<th className="text-sonic-silver w-[45%] px-4 py-3 text-left text-xs font-medium">
|
||||
{t('settings.documents.name')}
|
||||
</th>
|
||||
<th className="text-sonic-silver w-[30%] px-4 py-3 text-left text-xs font-medium">
|
||||
<div className="flex items-center justify-start">
|
||||
{t('settings.documents.date')}
|
||||
<img
|
||||
className="ml-2 cursor-pointer"
|
||||
onClick={() => refreshDocs('date')}
|
||||
src={caretSort}
|
||||
alt="sort"
|
||||
/>
|
||||
</div>
|
||||
</th>
|
||||
<th className="text-sonic-silver w-[15%] px-4 py-3 text-left text-xs font-medium">
|
||||
<div className="flex items-center justify-start">
|
||||
<span className="hidden sm:inline">
|
||||
{t('settings.documents.tokenUsage')}
|
||||
</span>
|
||||
<span className="sm:hidden">
|
||||
{t('settings.documents.tokenUsage')}
|
||||
</span>
|
||||
<img
|
||||
className="ml-2 cursor-pointer"
|
||||
onClick={() => refreshDocs('tokens')}
|
||||
src={caretSort}
|
||||
alt="sort"
|
||||
/>
|
||||
</div>
|
||||
</th>
|
||||
<th className="sr-only w-[10%] px-4 py-3">
|
||||
{t('settings.documents.actions')}
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody className="dark:divide-silver/40 divide-y divide-gray-300">
|
||||
{loading ? (
|
||||
<SkeletonLoader component="table" />
|
||||
) : !currentDocuments?.length ? (
|
||||
<tr>
|
||||
<td
|
||||
colSpan={4}
|
||||
className="bg-transparent py-4 text-center text-gray-700 dark:text-neutral-200"
|
||||
>
|
||||
{t('settings.documents.noData')}
|
||||
</td>
|
||||
</tr>
|
||||
) : (
|
||||
currentDocuments.map((document, index) => {
|
||||
const docId = document.id ? document.id.toString() : '';
|
||||
|
||||
return (
|
||||
<tr key={docId} className="group transition-colors">
|
||||
<td
|
||||
className="max-w-0 min-w-48 truncate px-4 py-4 text-sm font-semibold text-gray-700 group-hover:bg-gray-50 dark:text-[#E0E0E0] dark:group-hover:bg-gray-800/50"
|
||||
title={document.name}
|
||||
>
|
||||
{document.name}
|
||||
</td>
|
||||
<td className="px-4 py-4 text-sm whitespace-nowrap text-gray-700 group-hover:bg-gray-50 dark:text-[#E0E0E0] dark:group-hover:bg-gray-800/50">
|
||||
{document.date ? formatDate(document.date) : ''}
|
||||
</td>
|
||||
<td className="px-4 py-4 text-sm whitespace-nowrap text-gray-700 group-hover:bg-gray-50 dark:text-[#E0E0E0] dark:group-hover:bg-gray-800/50">
|
||||
{document.tokens
|
||||
? formatTokens(+document.tokens)
|
||||
: ''}
|
||||
</td>
|
||||
<td
|
||||
className="px-4 py-4 text-right group-hover:bg-gray-50 dark:group-hover:bg-gray-800/50"
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
<div
|
||||
ref={getMenuRef(docId)}
|
||||
className="relative flex items-center justify-end gap-3"
|
||||
>
|
||||
{document.syncFrequency && (
|
||||
<DropdownMenu
|
||||
name={t('settings.documents.sync')}
|
||||
options={syncOptions}
|
||||
onSelect={(value: string) => {
|
||||
handleManageSync(document, value);
|
||||
}}
|
||||
defaultValue={document.syncFrequency}
|
||||
icon={SyncIcon}
|
||||
isOpen={
|
||||
syncMenuState.docId === docId &&
|
||||
syncMenuState.isOpen
|
||||
}
|
||||
onOpenChange={(isOpen) => {
|
||||
setSyncMenuState((prev) => ({
|
||||
...prev,
|
||||
isOpen,
|
||||
docId: isOpen ? docId : null,
|
||||
document: isOpen ? document : null,
|
||||
}));
|
||||
}}
|
||||
anchorRef={getMenuRef(docId)}
|
||||
position="bottom-left"
|
||||
offset={{ x: 24, y: -24 }}
|
||||
className="min-w-[120px]"
|
||||
/>
|
||||
)}
|
||||
<button
|
||||
onClick={(e) => handleMenuClick(e, docId)}
|
||||
className="inline-flex h-8 w-8 shrink-0 items-center justify-center rounded-full transition-colors hover:bg-gray-100 dark:hover:bg-gray-700"
|
||||
aria-label="Open menu"
|
||||
data-testid={`menu-button-${docId}`}
|
||||
>
|
||||
<img
|
||||
src={ThreeDots}
|
||||
alt={t('convTile.menu')}
|
||||
className="h-4 w-4 opacity-60 hover:opacity-100"
|
||||
/>
|
||||
</button>
|
||||
<ContextMenu
|
||||
isOpen={activeMenuId === docId}
|
||||
setIsOpen={(isOpen) => {
|
||||
setActiveMenuId(isOpen ? docId : null);
|
||||
}}
|
||||
options={getActionOptions(index, document)}
|
||||
anchorRef={getMenuRef(docId)}
|
||||
position="bottom-left"
|
||||
offset={{ x: 48, y: 0 }}
|
||||
className="z-50"
|
||||
/>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
);
|
||||
})
|
||||
)}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="mt-auto pt-4">
|
||||
<Pagination
|
||||
currentPage={currentPage}
|
||||
totalPages={totalPages}
|
||||
rowsPerPage={rowsPerPage}
|
||||
onPageChange={(page) => {
|
||||
setCurrentPage(page);
|
||||
refreshDocs(undefined, page, rowsPerPage);
|
||||
}}
|
||||
onRowsPerPageChange={(rows) => {
|
||||
setRowsPerPage(rows);
|
||||
setCurrentPage(1);
|
||||
refreshDocs(undefined, 1, rows);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{modalState === 'ACTIVE' && (
|
||||
<Upload
|
||||
receivedFile={[]}
|
||||
setModalState={setModalState}
|
||||
isOnboarding={isOnboarding}
|
||||
renderTab={null}
|
||||
close={() => setModalState('INACTIVE')}
|
||||
onSuccessfulUpload={() =>
|
||||
refreshDocs(undefined, currentPage, rowsPerPage)
|
||||
}
|
||||
/>
|
||||
)}
|
||||
|
||||
{deleteModalState === 'ACTIVE' && documentToDelete && (
|
||||
<ConfirmationModal
|
||||
message={t('settings.documents.deleteWarning', {
|
||||
name: documentToDelete.document.name,
|
||||
})}
|
||||
modalState={deleteModalState}
|
||||
setModalState={setDeleteModalState}
|
||||
handleSubmit={handleConfirmedDelete}
|
||||
handleCancel={() => {
|
||||
setDeleteModalState('INACTIVE');
|
||||
setDocumentToDelete(null);
|
||||
}}
|
||||
submitLabel={t('convTile.delete')}
|
||||
variant="danger"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function DocumentChunks({
|
||||
document,
|
||||
handleGoBack,
|
||||
}: {
|
||||
document: Doc;
|
||||
handleGoBack: () => void;
|
||||
}) {
|
||||
const { t } = useTranslation();
|
||||
const token = useSelector(selectToken);
|
||||
const [isDarkTheme] = useDarkTheme();
|
||||
const [paginatedChunks, setPaginatedChunks] = useState<ChunkType[]>([]);
|
||||
const [page, setPage] = useState(1);
|
||||
const [perPage, setPerPage] = useState(5);
|
||||
const [totalChunks, setTotalChunks] = useState(0);
|
||||
const [loading, setLoading] = useLoaderState(true);
|
||||
const [searchTerm, setSearchTerm] = useState<string>('');
|
||||
const [addModal, setAddModal] = useState<ActiveState>('INACTIVE');
|
||||
const [editModal, setEditModal] = useState<{
|
||||
state: ActiveState;
|
||||
chunk: ChunkType | null;
|
||||
}>({ state: 'INACTIVE', chunk: null });
|
||||
|
||||
const fetchChunks = () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
userService
|
||||
.getDocumentChunks(document.id ?? '', page, perPage, token)
|
||||
.then((response) => {
|
||||
if (!response.ok) {
|
||||
setLoading(false);
|
||||
setPaginatedChunks([]);
|
||||
throw new Error('Failed to fetch chunks data');
|
||||
}
|
||||
return response.json();
|
||||
})
|
||||
.then((data) => {
|
||||
setPage(data.page);
|
||||
setPerPage(data.per_page);
|
||||
setTotalChunks(data.total);
|
||||
setPaginatedChunks(data.chunks);
|
||||
setLoading(false);
|
||||
});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleAddChunk = (title: string, text: string) => {
|
||||
try {
|
||||
userService
|
||||
.addChunk(
|
||||
{
|
||||
id: document.id ?? '',
|
||||
text: text,
|
||||
metadata: {
|
||||
title: title,
|
||||
},
|
||||
},
|
||||
token,
|
||||
)
|
||||
.then((response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to add chunk');
|
||||
}
|
||||
fetchChunks();
|
||||
});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
};
|
||||
|
||||
const handleUpdateChunk = (title: string, text: string, chunk: ChunkType) => {
|
||||
try {
|
||||
userService
|
||||
.updateChunk(
|
||||
{
|
||||
id: document.id ?? '',
|
||||
chunk_id: chunk.doc_id,
|
||||
text: text,
|
||||
metadata: {
|
||||
title: title,
|
||||
},
|
||||
},
|
||||
token,
|
||||
)
|
||||
.then((response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to update chunk');
|
||||
}
|
||||
fetchChunks();
|
||||
});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
};
|
||||
|
||||
const handleDeleteChunk = (chunk: ChunkType) => {
|
||||
try {
|
||||
userService
|
||||
.deleteChunk(document.id ?? '', chunk.doc_id, token)
|
||||
.then((response) => {
|
||||
if (!response.ok) {
|
||||
throw new Error('Failed to delete chunk');
|
||||
}
|
||||
setEditModal({ state: 'INACTIVE', chunk: null });
|
||||
fetchChunks();
|
||||
});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
};
|
||||
|
||||
React.useEffect(() => {
|
||||
fetchChunks();
|
||||
}, [page, perPage]);
|
||||
return (
|
||||
<div className="mt-8 flex flex-col">
|
||||
<div className="text-eerie-black dark:text-bright-gray mb-3 flex items-center gap-3 text-sm">
|
||||
<button
|
||||
className="rounded-full border p-3 text-sm text-gray-400 dark:border-0 dark:bg-[#28292D] dark:text-gray-500 dark:hover:bg-[#2E2F34]"
|
||||
onClick={handleGoBack}
|
||||
>
|
||||
<img src={ArrowLeft} alt="left-arrow" className="h-3 w-3" />
|
||||
</button>
|
||||
<p className="mt-px">{t('settings.documents.backToAll')}</p>
|
||||
</div>
|
||||
<div className="my-3 flex items-center justify-between gap-1">
|
||||
<div className="text-eerie-black dark:text-bright-gray flex w-full items-center gap-2 sm:w-auto">
|
||||
<p className="hidden text-2xl font-semibold sm:flex">{`${totalChunks} ${t('settings.documents.chunks')}`}</p>
|
||||
<label htmlFor="chunk-search-input" className="sr-only">
|
||||
{t('settings.documents.searchPlaceholder')}
|
||||
</label>
|
||||
<Input
|
||||
maxLength={256}
|
||||
placeholder={t('settings.documents.searchPlaceholder')}
|
||||
name="chunk-search-input"
|
||||
type="text"
|
||||
id="chunk-search-input"
|
||||
value={searchTerm}
|
||||
onChange={(e) => {
|
||||
setSearchTerm(e.target.value);
|
||||
}}
|
||||
borderVariant="thin"
|
||||
/>
|
||||
</div>
|
||||
<button
|
||||
className="bg-purple-30 hover:bg-violets-are-blue flex h-[32px] min-w-[108px] items-center justify-center rounded-full px-4 text-sm whitespace-normal text-white"
|
||||
title={t('settings.documents.addNew')}
|
||||
onClick={() => setAddModal('ACTIVE')}
|
||||
>
|
||||
{t('settings.documents.addNew')}
|
||||
</button>
|
||||
</div>
|
||||
{loading ? (
|
||||
<div className="grid grid-cols-1 gap-6 sm:grid-cols-2 lg:grid-cols-3">
|
||||
<div className="col-span-2 mt-24 flex h-32 items-center justify-center lg:col-span-3">
|
||||
<Spinner />
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<div className="grid grid-cols-1 gap-6 sm:grid-cols-2 lg:grid-cols-3">
|
||||
{paginatedChunks.filter((chunk) => {
|
||||
if (!chunk.metadata?.title) return true;
|
||||
return chunk.metadata.title
|
||||
.toLowerCase()
|
||||
.includes(searchTerm.toLowerCase());
|
||||
}).length === 0 ? (
|
||||
<div className="col-span-2 mt-24 text-center text-gray-500 lg:col-span-3 dark:text-gray-400">
|
||||
<img
|
||||
src={isDarkTheme ? NoFilesDarkIcon : NoFilesIcon}
|
||||
alt={t('settings.documents.noChunksAlt')}
|
||||
className="mx-auto mb-2 h-24 w-24"
|
||||
/>
|
||||
{t('settings.documents.noChunks')}
|
||||
</div>
|
||||
) : (
|
||||
paginatedChunks
|
||||
.filter((chunk) => {
|
||||
if (!chunk.metadata?.title) return true;
|
||||
return chunk.metadata.title
|
||||
.toLowerCase()
|
||||
.includes(searchTerm.toLowerCase());
|
||||
})
|
||||
.map((chunk, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className="border-silver dark:border-silver/40 relative flex h-56 w-full flex-col justify-between rounded-2xl border p-6"
|
||||
>
|
||||
<div className="w-full">
|
||||
<div className="flex w-full items-center justify-between">
|
||||
<button
|
||||
aria-label={'edit'}
|
||||
onClick={() => {
|
||||
setEditModal({
|
||||
state: 'ACTIVE',
|
||||
chunk: chunk,
|
||||
});
|
||||
}}
|
||||
className="absolute top-3 right-3 h-4 w-4 cursor-pointer"
|
||||
>
|
||||
<img
|
||||
alt={'edit'}
|
||||
src={Edit}
|
||||
className="opacity-60 hover:opacity-100"
|
||||
/>
|
||||
</button>
|
||||
</div>
|
||||
<div className="mt-[9px]">
|
||||
<p className="ellipsis-text text-eerie-black h-12 text-sm leading-relaxed font-semibold break-words dark:text-[#EEEEEE]">
|
||||
{chunk.metadata?.title ?? 'Untitled'}
|
||||
</p>
|
||||
<p className="mt-1 h-[110px] overflow-y-auto pr-1 text-[13px] leading-relaxed break-words text-gray-600 dark:text-gray-400">
|
||||
{chunk.text}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
))
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{!loading &&
|
||||
paginatedChunks.filter((chunk) => {
|
||||
if (!chunk.metadata?.title) return true;
|
||||
return chunk.metadata.title
|
||||
.toLowerCase()
|
||||
.includes(searchTerm.toLowerCase());
|
||||
}).length !== 0 && (
|
||||
<div className="mt-10 flex w-full items-center justify-center">
|
||||
<Pagination
|
||||
currentPage={page}
|
||||
totalPages={Math.ceil(totalChunks / perPage)}
|
||||
rowsPerPage={perPage}
|
||||
onPageChange={(page) => {
|
||||
setPage(page);
|
||||
}}
|
||||
onRowsPerPageChange={(rows) => {
|
||||
setPerPage(rows);
|
||||
setPage(1);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<ChunkModal
|
||||
type="ADD"
|
||||
modalState={addModal}
|
||||
setModalState={setAddModal}
|
||||
handleSubmit={handleAddChunk}
|
||||
/>
|
||||
{editModal.chunk && (
|
||||
<ChunkModal
|
||||
type="EDIT"
|
||||
modalState={editModal.state}
|
||||
setModalState={(state) =>
|
||||
setEditModal((prev) => ({ ...prev, state }))
|
||||
}
|
||||
handleSubmit={(title, text) => {
|
||||
handleUpdateChunk(title, text, editModal.chunk as ChunkType);
|
||||
}}
|
||||
originalText={editModal.chunk?.text ?? ''}
|
||||
originalTitle={editModal.chunk?.metadata?.title ?? ''}
|
||||
handleDelete={() => {
|
||||
handleDeleteChunk(editModal.chunk as ChunkType);
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -88,7 +88,7 @@ export default function General() {
|
||||
{' '}
|
||||
<div className="flex flex-col gap-4">
|
||||
{' '}
|
||||
<label className="text-base font-medium text-jet dark:text-bright-gray">
|
||||
<label className="text-jet dark:text-bright-gray text-base font-medium">
|
||||
{t('settings.general.selectTheme')}
|
||||
</label>
|
||||
<Dropdown
|
||||
@@ -106,7 +106,7 @@ export default function General() {
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col gap-4">
|
||||
<label className="text-base font-medium text-jet dark:text-bright-gray">
|
||||
<label className="text-jet dark:text-bright-gray text-base font-medium">
|
||||
{t('settings.general.selectLanguage')}
|
||||
</label>
|
||||
<Dropdown
|
||||
@@ -124,7 +124,7 @@ export default function General() {
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col gap-4">
|
||||
<label className="text-base font-medium text-jet dark:text-bright-gray">
|
||||
<label className="text-jet dark:text-bright-gray text-base font-medium">
|
||||
{t('settings.general.chunks')}
|
||||
</label>
|
||||
<Dropdown
|
||||
@@ -137,7 +137,7 @@ export default function General() {
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col gap-4">
|
||||
<label className="text-base font-medium text-jet dark:text-bright-gray">
|
||||
<label className="text-jet dark:text-bright-gray text-base font-medium">
|
||||
{t('settings.general.convHistory')}
|
||||
</label>
|
||||
<Dropdown
|
||||
@@ -169,13 +169,14 @@ export default function General() {
|
||||
dispatch(setPrompt({ name: name, id: id, type: type }))
|
||||
}
|
||||
setPrompts={setPrompts}
|
||||
dropdownProps={{ size: 'w-56', rounded: '3xl', border: 'border' }}
|
||||
/>
|
||||
</div>
|
||||
<hr className="my-4 w-[calc(min(665px,100%))] border-t border-silver dark:border-silver/40" />
|
||||
<hr className="border-silver dark:border-silver/40 my-4 w-[calc(min(665px,100%))] border-t" />
|
||||
<div className="flex flex-col gap-2">
|
||||
<button
|
||||
title={t('settings.general.deleteAllLabel')}
|
||||
className="flex w-fit cursor-pointer items-center justify-between rounded-3xl border border-solid border-rosso-corsa bg-transparent px-5 py-3 text-sm font-medium tracking-[0.015em] text-rosso-corsa transition-colors hover:bg-rosso-corsa hover:font-bold hover:tracking-normal hover:text-white"
|
||||
className="border-rosso-corsa text-rosso-corsa hover:bg-rosso-corsa flex w-fit cursor-pointer items-center justify-between rounded-3xl border border-solid bg-transparent px-5 py-3 text-sm font-medium tracking-[0.015em] transition-colors hover:font-bold hover:tracking-normal hover:text-white"
|
||||
onClick={() => dispatch(setModalStateDeleteConv('ACTIVE'))}
|
||||
>
|
||||
{t('settings.general.deleteAllBtn')}
|
||||
|
||||
@@ -4,17 +4,29 @@ import { useSelector } from 'react-redux';
|
||||
|
||||
import userService from '../api/services/userService';
|
||||
import Dropdown from '../components/Dropdown';
|
||||
import { DropdownProps } from '../components/types/Dropdown.types';
|
||||
import ConfirmationModal from '../modals/ConfirmationModal';
|
||||
import { ActiveState, PromptProps } from '../models/misc';
|
||||
import { selectToken } from '../preferences/preferenceSlice';
|
||||
import PromptsModal from '../preferences/PromptsModal';
|
||||
import ConfirmationModal from '../modals/ConfirmationModal';
|
||||
|
||||
type ExtendedPromptProps = PromptProps & {
|
||||
title?: string;
|
||||
titleClassName?: string;
|
||||
dropdownProps?: Partial<DropdownProps>;
|
||||
showAddButton?: boolean;
|
||||
};
|
||||
|
||||
export default function Prompts({
|
||||
prompts,
|
||||
selectedPrompt,
|
||||
onSelectPrompt,
|
||||
setPrompts,
|
||||
}: PromptProps) {
|
||||
title,
|
||||
titleClassName = 'dark:text-bright-gray font-medium',
|
||||
dropdownProps = {},
|
||||
showAddButton = true,
|
||||
}: ExtendedPromptProps) {
|
||||
const handleSelectPrompt = ({
|
||||
name,
|
||||
id,
|
||||
@@ -27,6 +39,7 @@ export default function Prompts({
|
||||
setEditPromptName(name);
|
||||
onSelectPrompt(name, id, type);
|
||||
};
|
||||
|
||||
const token = useSelector(selectToken);
|
||||
const [newPromptName, setNewPromptName] = React.useState('');
|
||||
const [newPromptContent, setNewPromptContent] = React.useState('');
|
||||
@@ -164,18 +177,19 @@ export default function Prompts({
|
||||
return (
|
||||
<>
|
||||
<div>
|
||||
<div className="flex flex-col gap-4">
|
||||
<p className="font-medium dark:text-bright-gray">
|
||||
{t('settings.general.prompt')}
|
||||
<div className="flex flex-col gap-3">
|
||||
<p className={titleClassName}>
|
||||
{title ? title : t('settings.general.prompt')}
|
||||
</p>
|
||||
<div className="flex flex-row items-baseline justify-start gap-6">
|
||||
<div className="flex flex-row flex-wrap items-baseline justify-start gap-6">
|
||||
<Dropdown
|
||||
options={prompts}
|
||||
selectedValue={selectedPrompt.name}
|
||||
options={prompts.map((prompt: any) =>
|
||||
typeof prompt === 'string'
|
||||
? { name: prompt, id: prompt, type: '' }
|
||||
: prompt,
|
||||
)}
|
||||
selectedValue={selectedPrompt ? selectedPrompt.name : ''}
|
||||
onSelect={handleSelectPrompt}
|
||||
size="w-56"
|
||||
rounded="3xl"
|
||||
border="border"
|
||||
showEdit
|
||||
showDelete={(prompt) => prompt.type !== 'public'}
|
||||
onEdit={({
|
||||
@@ -185,26 +199,29 @@ export default function Prompts({
|
||||
}: {
|
||||
id: string;
|
||||
name: string;
|
||||
type: string;
|
||||
type?: string;
|
||||
}) => {
|
||||
setModalType('EDIT');
|
||||
setEditPromptName(name);
|
||||
handleFetchPromptContent(id);
|
||||
setCurrentPromptEdit({ id: id, name: name, type: type });
|
||||
setCurrentPromptEdit({ id: id, name: name, type: type ?? '' });
|
||||
setModalState('ACTIVE');
|
||||
}}
|
||||
onDelete={handleDeletePrompt}
|
||||
placeholder={'Select a prompt'}
|
||||
{...dropdownProps}
|
||||
/>
|
||||
|
||||
<button
|
||||
className="h-10 w-20 rounded-3xl border border-solid border-violets-are-blue text-sm text-violets-are-blue transition-colors hover:bg-violets-are-blue hover:text-white"
|
||||
onClick={() => {
|
||||
setModalType('ADD');
|
||||
setModalState('ACTIVE');
|
||||
}}
|
||||
>
|
||||
{t('settings.general.add')}
|
||||
</button>
|
||||
{showAddButton && (
|
||||
<button
|
||||
className="border-violets-are-blue text-violets-are-blue hover:bg-violets-are-blue h-10 w-20 rounded-3xl border border-solid text-sm transition-colors hover:text-white"
|
||||
onClick={() => {
|
||||
setModalType('ADD');
|
||||
setModalState('ACTIVE');
|
||||
}}
|
||||
>
|
||||
{t('settings.general.add')}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
519
frontend/src/settings/Sources.tsx
Normal file
519
frontend/src/settings/Sources.tsx
Normal file
@@ -0,0 +1,519 @@
|
||||
|
||||
import React, { useCallback, useEffect, useRef, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useDispatch, useSelector } from 'react-redux';
|
||||
|
||||
import userService from '../api/services/userService';
|
||||
|
||||
import EyeView from '../assets/eye-view.svg';
|
||||
import NoFilesIcon from '../assets/no-files.svg';
|
||||
import NoFilesDarkIcon from '../assets/no-files-dark.svg';
|
||||
import Trash from '../assets/red-trash.svg';
|
||||
import SyncIcon from '../assets/sync.svg';
|
||||
import ThreeDots from '../assets/three-dots.svg';
|
||||
import CalendarIcon from '../assets/calendar.svg';
|
||||
import DiscIcon from '../assets/disc.svg';
|
||||
import ContextMenu, { MenuOption } from '../components/ContextMenu';
|
||||
import Pagination from '../components/DocumentPagination';
|
||||
import DropdownMenu from '../components/DropdownMenu';
|
||||
import SkeletonLoader from '../components/SkeletonLoader';
|
||||
import { useDarkTheme, useLoaderState } from '../hooks';
|
||||
import ConfirmationModal from '../modals/ConfirmationModal';
|
||||
import { ActiveState, Doc, DocumentsProps } from '../models/misc';
|
||||
import { getDocs, getDocsWithPagination } from '../preferences/preferenceApi';
|
||||
import {
|
||||
selectToken,
|
||||
setPaginatedDocuments,
|
||||
setSourceDocs,
|
||||
} from '../preferences/preferenceSlice';
|
||||
import Upload from '../upload/Upload';
|
||||
import { formatDate } from '../utils/dateTimeUtils';
|
||||
import FileTreeComponent from '../components/FileTreeComponent';
|
||||
import ConnectorTreeComponent from '../components/ConnectorTreeComponent';
|
||||
import Chunks from '../components/Chunks';
|
||||
|
||||
const formatTokens = (tokens: number): string => {
|
||||
const roundToTwoDecimals = (num: number): string => {
|
||||
return (Math.round((num + Number.EPSILON) * 100) / 100).toString();
|
||||
};
|
||||
|
||||
if (tokens >= 1_000_000_000) {
|
||||
return roundToTwoDecimals(tokens / 1_000_000_000) + 'b';
|
||||
} else if (tokens >= 1_000_000) {
|
||||
return roundToTwoDecimals(tokens / 1_000_000) + 'm';
|
||||
} else if (tokens >= 1_000) {
|
||||
return roundToTwoDecimals(tokens / 1_000) + 'k';
|
||||
} else {
|
||||
return tokens.toString();
|
||||
}
|
||||
};
|
||||
|
||||
export default function Sources({
|
||||
paginatedDocuments,
|
||||
handleDeleteDocument,
|
||||
}: DocumentsProps) {
|
||||
const { t } = useTranslation();
|
||||
const [isDarkTheme] = useDarkTheme();
|
||||
const dispatch = useDispatch();
|
||||
const token = useSelector(selectToken);
|
||||
|
||||
const [searchTerm, setSearchTerm] = useState<string>('');
|
||||
const [debouncedSearchTerm, setDebouncedSearchTerm] = useState<string>('');
|
||||
const [modalState, setModalState] = useState<ActiveState>('INACTIVE');
|
||||
const [isOnboarding, setIsOnboarding] = useState<boolean>(false);
|
||||
const [loading, setLoading] = useLoaderState(false);
|
||||
const [sortField, setSortField] = useState<'date' | 'tokens'>('date');
|
||||
const [sortOrder, setSortOrder] = useState<'asc' | 'desc'>('desc');
|
||||
// Pagination
|
||||
const [currentPage, setCurrentPage] = useState<number>(1);
|
||||
const [rowsPerPage, setRowsPerPage] = useState<number>(10);
|
||||
const [totalPages, setTotalPages] = useState<number>(1);
|
||||
|
||||
const [activeMenuId, setActiveMenuId] = useState<string | null>(null);
|
||||
const menuRefs = useRef<{
|
||||
[key: string]: React.RefObject<HTMLDivElement | null>;
|
||||
}>({});
|
||||
|
||||
// Create or get a ref for each document wrapper div (not the td)
|
||||
const getMenuRef = (docId: string) => {
|
||||
if (!menuRefs.current[docId]) {
|
||||
menuRefs.current[docId] = React.createRef<HTMLDivElement>();
|
||||
}
|
||||
return menuRefs.current[docId];
|
||||
};
|
||||
|
||||
const handleMenuClick = (e: React.MouseEvent, docId: string) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
|
||||
const isAnyMenuOpen =
|
||||
(syncMenuState.isOpen && syncMenuState.docId === docId) ||
|
||||
activeMenuId === docId;
|
||||
|
||||
if (isAnyMenuOpen) {
|
||||
setSyncMenuState((prev) => ({ ...prev, isOpen: false, docId: null }));
|
||||
setActiveMenuId(null);
|
||||
return;
|
||||
}
|
||||
setActiveMenuId(docId);
|
||||
};
|
||||
|
||||
const currentDocuments = paginatedDocuments ?? [];
|
||||
const syncOptions = [
|
||||
{ label: t('settings.sources.syncFrequency.never'), value: 'never' },
|
||||
{ label: t('settings.sources.syncFrequency.daily'), value: 'daily' },
|
||||
{ label: t('settings.sources.syncFrequency.weekly'), value: 'weekly' },
|
||||
{ label: t('settings.sources.syncFrequency.monthly'), value: 'monthly' },
|
||||
];
|
||||
const [documentToView, setDocumentToView] = useState<Doc>();
|
||||
const [isMenuOpen, setIsMenuOpen] = useState(false);
|
||||
const [syncMenuState, setSyncMenuState] = useState<{
|
||||
isOpen: boolean;
|
||||
docId: string | null;
|
||||
document: Doc | null;
|
||||
}>({
|
||||
isOpen: false,
|
||||
docId: null,
|
||||
document: null,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
const timer = setTimeout(() => {
|
||||
setDebouncedSearchTerm(searchTerm);
|
||||
}, 500);
|
||||
|
||||
return () => clearTimeout(timer);
|
||||
}, [searchTerm]);
|
||||
|
||||
const refreshDocs = useCallback(
|
||||
(
|
||||
field: 'date' | 'tokens' | undefined,
|
||||
pageNumber?: number,
|
||||
rows?: number,
|
||||
) => {
|
||||
const page = pageNumber ?? currentPage;
|
||||
const rowsPerPg = rows ?? rowsPerPage;
|
||||
|
||||
// If field is undefined, (Pagination or Search) use the current sortField
|
||||
const newSortField = field ?? sortField;
|
||||
|
||||
// If field is undefined, (Pagination or Search) use the current sortOrder
|
||||
const newSortOrder =
|
||||
field === sortField
|
||||
? sortOrder === 'asc'
|
||||
? 'desc'
|
||||
: 'asc'
|
||||
: sortOrder;
|
||||
|
||||
// If field is defined, update the sortField and sortOrder
|
||||
if (field) {
|
||||
setSortField(newSortField);
|
||||
setSortOrder(newSortOrder);
|
||||
}
|
||||
|
||||
setLoading(true);
|
||||
getDocsWithPagination(
|
||||
newSortField,
|
||||
newSortOrder,
|
||||
page,
|
||||
rowsPerPg,
|
||||
debouncedSearchTerm,
|
||||
token,
|
||||
)
|
||||
.then((data) => {
|
||||
dispatch(setPaginatedDocuments(data ? data.docs : []));
|
||||
setTotalPages(data ? data.totalPages : 0);
|
||||
})
|
||||
.catch((error) => console.error(error))
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
},
|
||||
[currentPage, rowsPerPage, sortField, sortOrder, debouncedSearchTerm],
|
||||
);
|
||||
|
||||
const handleManageSync = (doc: Doc, sync_frequency: string) => {
|
||||
setLoading(true);
|
||||
userService
|
||||
.manageSync({ source_id: doc.id, sync_frequency }, token)
|
||||
.then(() => {
|
||||
return getDocs(token);
|
||||
})
|
||||
.then((data) => {
|
||||
dispatch(setSourceDocs(data));
|
||||
return getDocsWithPagination(
|
||||
sortField,
|
||||
sortOrder,
|
||||
currentPage,
|
||||
rowsPerPage,
|
||||
searchTerm,
|
||||
token,
|
||||
);
|
||||
})
|
||||
.then((paginatedData) => {
|
||||
dispatch(
|
||||
setPaginatedDocuments(paginatedData ? paginatedData.docs : []),
|
||||
);
|
||||
setTotalPages(paginatedData ? paginatedData.totalPages : 0);
|
||||
})
|
||||
.catch((error) => console.error('Error in handleManageSync:', error))
|
||||
.finally(() => {
|
||||
setLoading(false);
|
||||
});
|
||||
};
|
||||
|
||||
const [documentToDelete, setDocumentToDelete] = useState<{
|
||||
index: number;
|
||||
document: Doc;
|
||||
} | null>(null);
|
||||
const [deleteModalState, setDeleteModalState] =
|
||||
useState<ActiveState>('INACTIVE');
|
||||
|
||||
const handleDeleteConfirmation = (index: number, document: Doc) => {
|
||||
setDocumentToDelete({ index, document });
|
||||
setDeleteModalState('ACTIVE');
|
||||
};
|
||||
|
||||
const handleConfirmedDelete = () => {
|
||||
if (documentToDelete) {
|
||||
handleDeleteDocument(documentToDelete.index, documentToDelete.document);
|
||||
setDeleteModalState('INACTIVE');
|
||||
setDocumentToDelete(null);
|
||||
}
|
||||
};
|
||||
|
||||
const getActionOptions = (index: number, document: Doc): MenuOption[] => {
|
||||
const actions: MenuOption[] = [
|
||||
{
|
||||
icon: EyeView,
|
||||
label: t('settings.sources.view'),
|
||||
onClick: () => {
|
||||
setDocumentToView(document);
|
||||
},
|
||||
iconWidth: 18,
|
||||
iconHeight: 18,
|
||||
variant: 'primary',
|
||||
},
|
||||
];
|
||||
|
||||
if (document.syncFrequency) {
|
||||
actions.push({
|
||||
icon: SyncIcon,
|
||||
label: t('settings.sources.sync'),
|
||||
onClick: () => {
|
||||
setSyncMenuState({
|
||||
isOpen: true,
|
||||
docId: document.id ?? null,
|
||||
document: document,
|
||||
});
|
||||
},
|
||||
iconWidth: 14,
|
||||
iconHeight: 14,
|
||||
variant: 'primary',
|
||||
});
|
||||
}
|
||||
|
||||
actions.push({
|
||||
icon: Trash,
|
||||
label: t('convTile.delete'),
|
||||
onClick: () => {
|
||||
handleDeleteConfirmation(index, document);
|
||||
},
|
||||
iconWidth: 18,
|
||||
iconHeight: 18,
|
||||
variant: 'danger',
|
||||
});
|
||||
|
||||
return actions;
|
||||
};
|
||||
useEffect(() => {
|
||||
refreshDocs(undefined, 1, rowsPerPage);
|
||||
}, [debouncedSearchTerm]);
|
||||
|
||||
return documentToView ? (
|
||||
<div className="mt-8 flex flex-col">
|
||||
{documentToView.isNested ? (
|
||||
documentToView.type === 'connector' ? (
|
||||
<ConnectorTreeComponent
|
||||
docId={documentToView.id || ''}
|
||||
sourceName={documentToView.name}
|
||||
onBackToDocuments={() => setDocumentToView(undefined)}
|
||||
/>
|
||||
) : (
|
||||
<FileTreeComponent
|
||||
docId={documentToView.id || ''}
|
||||
sourceName={documentToView.name}
|
||||
onBackToDocuments={() => setDocumentToView(undefined)}
|
||||
/>
|
||||
)
|
||||
) : (
|
||||
<Chunks
|
||||
documentId={documentToView.id || ''}
|
||||
documentName={documentToView.name}
|
||||
handleGoBack={() => setDocumentToView(undefined)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
) : (
|
||||
<div className="mt-8 flex w-full max-w-full flex-col overflow-hidden">
|
||||
<div className="relative flex grow flex-col">
|
||||
<div className="mb-6">
|
||||
<h2 className="text-sonic-silver text-base font-medium">
|
||||
{t('settings.sources.title')}
|
||||
</h2>
|
||||
</div>
|
||||
<div className="mb-6 flex flex-col items-start justify-between gap-3 sm:flex-row sm:items-center">
|
||||
<div className="w-full sm:w-auto">
|
||||
<label htmlFor="document-search-input" className="sr-only">
|
||||
{t('settings.sources.searchPlaceholder')}
|
||||
</label>
|
||||
<div className="relative w-[280px]">
|
||||
<input
|
||||
maxLength={256}
|
||||
placeholder={t('settings.sources.searchPlaceholder')}
|
||||
name="Document-search-input"
|
||||
type="text"
|
||||
id="document-search-input"
|
||||
value={searchTerm}
|
||||
onChange={(e) => {
|
||||
setSearchTerm(e.target.value);
|
||||
setCurrentPage(1);
|
||||
}}
|
||||
className="w-full h-[32px] rounded-full border border-silver dark:border-silver/40 bg-transparent px-3 text-sm text-jet dark:text-bright-gray placeholder:text-gray-400 dark:placeholder:text-gray-500 outline-none focus:border-silver dark:focus:border-silver/60"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<button
|
||||
className="bg-purple-30 hover:bg-violets-are-blue flex h-[38px] min-w-[108px] items-center justify-center rounded-full px-4 text-[14px] whitespace-normal text-white"
|
||||
title={t('settings.sources.addSource')}
|
||||
onClick={() => {
|
||||
setIsOnboarding(false);
|
||||
setModalState('ACTIVE');
|
||||
}}
|
||||
>
|
||||
{t('settings.sources.addSource')}
|
||||
</button>
|
||||
</div>
|
||||
<div className="relative w-full">
|
||||
{loading ? (
|
||||
<div className="w-full grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4 gap-6 px-2 py-4">
|
||||
<SkeletonLoader component="sourceCards" count={rowsPerPage} />
|
||||
</div>
|
||||
) : !currentDocuments?.length ? (
|
||||
<div className="flex flex-col items-center justify-center py-12">
|
||||
<img
|
||||
src={isDarkTheme ? NoFilesDarkIcon : NoFilesIcon}
|
||||
alt={t('settings.sources.noData')}
|
||||
className="mx-auto mb-6 h-32 w-32"
|
||||
/>
|
||||
<p className="text-center text-lg text-gray-500 dark:text-gray-400">
|
||||
{t('settings.sources.noData')}
|
||||
</p>
|
||||
</div>
|
||||
) : (
|
||||
<div className="w-full grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4 gap-6 px-2 py-4">
|
||||
{currentDocuments.map((document, index) => {
|
||||
const docId = document.id ? document.id.toString() : '';
|
||||
|
||||
return (
|
||||
<div key={docId} className="relative">
|
||||
<div
|
||||
className={`flex h-[130px] w-full flex-col rounded-2xl bg-[#F9F9F9] p-3 transition-all duration-200 dark:bg-[#383838] ${
|
||||
activeMenuId === docId || syncMenuState.docId === docId
|
||||
? 'scale-[1.05]'
|
||||
: 'hover:scale-[1.05]'
|
||||
}`}
|
||||
>
|
||||
<div className="w-full flex-1">
|
||||
<div className="flex w-full items-center justify-between gap-2">
|
||||
<h3
|
||||
className="font-inter dark:text-bright-gray line-clamp-3 text-[13px] leading-[18px] font-semibold break-words text-[#18181B]"
|
||||
title={document.name}
|
||||
>
|
||||
{document.name}
|
||||
</h3>
|
||||
<div
|
||||
ref={getMenuRef(docId)}
|
||||
className="relative flex items-center justify-end"
|
||||
>
|
||||
{document.syncFrequency && (
|
||||
<DropdownMenu
|
||||
name={t('settings.sources.sync')}
|
||||
options={syncOptions}
|
||||
onSelect={(value: string) => {
|
||||
handleManageSync(document, value);
|
||||
}}
|
||||
defaultValue={document.syncFrequency}
|
||||
icon={SyncIcon}
|
||||
isOpen={
|
||||
syncMenuState.docId === docId &&
|
||||
syncMenuState.isOpen
|
||||
}
|
||||
onOpenChange={(isOpen) => {
|
||||
setSyncMenuState((prev) => ({
|
||||
...prev,
|
||||
isOpen,
|
||||
docId: isOpen ? docId : null,
|
||||
document: isOpen ? document : null,
|
||||
}));
|
||||
}}
|
||||
anchorRef={getMenuRef(docId)}
|
||||
position="bottom-left"
|
||||
offset={{ x: -8, y: 8 }}
|
||||
className="min-w-[120px]"
|
||||
/>
|
||||
)}
|
||||
<button
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
handleMenuClick(e, docId);
|
||||
}}
|
||||
className="inline-flex h-[35px] w-[24px] shrink-0 items-center justify-center rounded-md transition-colors hover:bg-[#EBEBEB] dark:hover:bg-[#26272E]"
|
||||
aria-label={t('settings.sources.menuAlt')}
|
||||
data-testid={`menu-button-${docId}`}
|
||||
>
|
||||
<img
|
||||
src={ThreeDots}
|
||||
alt={t('settings.sources.menuAlt')}
|
||||
className="opacity-60 hover:opacity-100"
|
||||
/>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col items-start justify-start gap-1">
|
||||
<div className="flex items-center gap-2">
|
||||
<img
|
||||
src={CalendarIcon}
|
||||
alt=""
|
||||
className="w-[14px] h-[14px]"
|
||||
/>
|
||||
<span className="font-inter text-[12px] leading-[18px] font-[500] text-[#848484] dark:text-[#848484]">
|
||||
{document.date ? formatDate(document.date) : ''}
|
||||
</span>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<img
|
||||
src={DiscIcon}
|
||||
alt=""
|
||||
className="w-[14px] h-[14px]"
|
||||
/>
|
||||
<span className="font-inter text-[12px] leading-[18px] font-[500] text-[#848484] dark:text-[#848484]">
|
||||
{document.tokens
|
||||
? formatTokens(+document.tokens)
|
||||
: ''}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<ContextMenu
|
||||
isOpen={activeMenuId === docId}
|
||||
setIsOpen={(isOpen) => {
|
||||
setActiveMenuId(isOpen ? docId : null);
|
||||
}}
|
||||
options={getActionOptions(index, document)}
|
||||
anchorRef={getMenuRef(docId)}
|
||||
position="bottom-left"
|
||||
offset={{ x: -8, y: 8 }}
|
||||
className="z-50"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{currentDocuments.length > 0 && totalPages > 1 && (
|
||||
<div className="mt-auto pt-4">
|
||||
<Pagination
|
||||
currentPage={currentPage}
|
||||
totalPages={totalPages}
|
||||
rowsPerPage={rowsPerPage}
|
||||
onPageChange={(page) => {
|
||||
setCurrentPage(page);
|
||||
refreshDocs(undefined, page, rowsPerPage);
|
||||
}}
|
||||
onRowsPerPageChange={(rows) => {
|
||||
setRowsPerPage(rows);
|
||||
setCurrentPage(1);
|
||||
refreshDocs(undefined, 1, rows);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{modalState === 'ACTIVE' && (
|
||||
<Upload
|
||||
receivedFile={[]}
|
||||
setModalState={setModalState}
|
||||
isOnboarding={isOnboarding}
|
||||
renderTab={null}
|
||||
close={() => setModalState('INACTIVE')}
|
||||
onSuccessfulUpload={() =>
|
||||
refreshDocs(undefined, currentPage, rowsPerPage)
|
||||
}
|
||||
/>
|
||||
)}
|
||||
|
||||
{deleteModalState === 'ACTIVE' && documentToDelete && (
|
||||
<ConfirmationModal
|
||||
message={t('settings.sources.deleteWarning', {
|
||||
name: documentToDelete.document.name,
|
||||
})}
|
||||
modalState={deleteModalState}
|
||||
setModalState={setDeleteModalState}
|
||||
handleSubmit={handleConfirmedDelete}
|
||||
handleCancel={() => {
|
||||
setDeleteModalState('INACTIVE');
|
||||
setDocumentToDelete(null);
|
||||
}}
|
||||
submitLabel={t('convTile.delete')}
|
||||
variant="danger"
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -21,7 +21,7 @@ import {
|
||||
setSourceDocs,
|
||||
} from '../preferences/preferenceSlice';
|
||||
import Analytics from './Analytics';
|
||||
import Documents from './Documents';
|
||||
import Sources from './Sources';
|
||||
import General from './General';
|
||||
import Logs from './Logs';
|
||||
import Tools from './Tools';
|
||||
@@ -38,8 +38,8 @@ export default function Settings() {
|
||||
|
||||
const getActiveTabFromPath = () => {
|
||||
const path = location.pathname;
|
||||
if (path.includes('/settings/documents'))
|
||||
return t('settings.documents.label');
|
||||
if (path.includes('/settings/sources'))
|
||||
return t('settings.sources.label');
|
||||
if (path.includes('/settings/analytics'))
|
||||
return t('settings.analytics.label');
|
||||
if (path.includes('/settings/logs')) return t('settings.logs.label');
|
||||
@@ -53,8 +53,8 @@ export default function Settings() {
|
||||
const handleTabChange = (tab: string) => {
|
||||
setActiveTab(tab);
|
||||
if (tab === t('settings.general.label')) navigate('/settings');
|
||||
else if (tab === t('settings.documents.label'))
|
||||
navigate('/settings/documents');
|
||||
else if (tab === t('settings.sources.label'))
|
||||
navigate('/settings/sources');
|
||||
else if (tab === t('settings.analytics.label'))
|
||||
navigate('/settings/analytics');
|
||||
else if (tab === t('settings.logs.label')) navigate('/settings/logs');
|
||||
@@ -113,9 +113,9 @@ export default function Settings() {
|
||||
<Routes>
|
||||
<Route index element={<General />} />
|
||||
<Route
|
||||
path="documents"
|
||||
path="sources"
|
||||
element={
|
||||
<Documents
|
||||
<Sources
|
||||
paginatedDocuments={paginatedDocuments}
|
||||
handleDeleteDocument={handleDeleteClick}
|
||||
/>
|
||||
|
||||
@@ -4,6 +4,9 @@ import { useTranslation } from 'react-i18next';
|
||||
import { useDispatch, useSelector } from 'react-redux';
|
||||
|
||||
import userService from '../api/services/userService';
|
||||
import { getSessionToken, setSessionToken, removeSessionToken } from '../utils/providerUtils';
|
||||
import { formatDate } from '../utils/dateTimeUtils';
|
||||
import { formatBytes } from '../utils/stringUtils';
|
||||
import FileUpload from '../assets/file_upload.svg';
|
||||
import WebsiteCollect from '../assets/website_collect.svg';
|
||||
import Dropdown from '../components/Dropdown';
|
||||
@@ -25,6 +28,9 @@ import {
|
||||
IngestorFormSchemas,
|
||||
IngestorType,
|
||||
} from './types/ingestor';
|
||||
import FileIcon from '../assets/file.svg';
|
||||
import FolderIcon from '../assets/folder.svg';
|
||||
import ConnectorAuth from '../components/ConnectorAuth';
|
||||
|
||||
function Upload({
|
||||
receivedFile = [],
|
||||
@@ -48,6 +54,21 @@ function Upload({
|
||||
const [activeTab, setActiveTab] = useState<string | null>(renderTab);
|
||||
const [showAdvancedOptions, setShowAdvancedOptions] = useState(false);
|
||||
|
||||
// Google Drive state
|
||||
const [isGoogleDriveConnected, setIsGoogleDriveConnected] = useState(false);
|
||||
const [googleDriveFiles, setGoogleDriveFiles] = useState<any[]>([]);
|
||||
const [selectedFiles, setSelectedFiles] = useState<string[]>([]);
|
||||
const [isLoadingFiles, setIsLoadingFiles] = useState(false);
|
||||
const [isAuthenticating, setIsAuthenticating] = useState(false);
|
||||
const [userEmail, setUserEmail] = useState<string>('');
|
||||
const [authError, setAuthError] = useState<string>('');
|
||||
const [currentFolderId, setCurrentFolderId] = useState<string | null>(null);
|
||||
const [folderPath, setFolderPath] = useState<Array<{id: string | null, name: string}>>([{id: null, name: 'My Drive'}]);
|
||||
|
||||
const [nextPageToken, setNextPageToken] = useState<string | null>(null);
|
||||
const [hasMoreFiles, setHasMoreFiles] = useState<boolean>(false);
|
||||
const scrollContainerRef = useRef<HTMLDivElement | null>(null);
|
||||
|
||||
const renderFormFields = () => {
|
||||
const schema = IngestorFormSchemas[ingestor.type];
|
||||
if (!schema) return null;
|
||||
@@ -204,6 +225,7 @@ function Upload({
|
||||
{ label: 'Link', value: 'url' },
|
||||
{ label: 'GitHub', value: 'github' },
|
||||
{ label: 'Reddit', value: 'reddit' },
|
||||
{ label: 'Google Drive', value: 'google_drive' },
|
||||
];
|
||||
|
||||
const sourceDocs = useSelector(selectSourceDocs);
|
||||
@@ -315,8 +337,7 @@ function Upload({
|
||||
data?.find(
|
||||
(d: Doc) => d.type?.toLowerCase() === 'local',
|
||||
),
|
||||
),
|
||||
);
|
||||
));
|
||||
});
|
||||
setProgress(
|
||||
(progress) =>
|
||||
@@ -428,29 +449,32 @@ function Upload({
|
||||
formData.append('user', 'local');
|
||||
formData.append('source', ingestor.type);
|
||||
|
||||
const defaultConfig = IngestorDefaultConfigs[ingestor.type].config;
|
||||
let configData;
|
||||
|
||||
const mergedConfig = { ...defaultConfig, ...ingestor.config };
|
||||
const filteredConfig = Object.entries(mergedConfig).reduce(
|
||||
(acc, [key, value]) => {
|
||||
const field = IngestorFormSchemas[ingestor.type].find(
|
||||
(f) => f.name === key,
|
||||
);
|
||||
// Include the field if:
|
||||
// 1. It's required, or
|
||||
// 2. It's optional and has a non-empty value
|
||||
if (
|
||||
field?.required ||
|
||||
(value !== undefined && value !== null && value !== '')
|
||||
) {
|
||||
acc[key] = value;
|
||||
}
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, any>,
|
||||
);
|
||||
if (ingestor.type === 'google_drive') {
|
||||
const sessionToken = getSessionToken(ingestor.type);
|
||||
|
||||
formData.append('data', JSON.stringify(filteredConfig));
|
||||
const selectedItems = googleDriveFiles.filter(file => selectedFiles.includes(file.id));
|
||||
const selectedFolderIds = selectedItems
|
||||
.filter(item => item.type === 'application/vnd.google-apps.folder' || item.isFolder)
|
||||
.map(folder => folder.id);
|
||||
|
||||
const selectedFileIds = selectedItems
|
||||
.filter(item => item.type !== 'application/vnd.google-apps.folder' && !item.isFolder)
|
||||
.map(file => file.id);
|
||||
|
||||
configData = {
|
||||
file_ids: selectedFileIds,
|
||||
folder_ids: selectedFolderIds,
|
||||
recursive: ingestor.config.recursive,
|
||||
session_token: sessionToken || null
|
||||
};
|
||||
} else {
|
||||
|
||||
configData = { ...ingestor.config };
|
||||
}
|
||||
|
||||
formData.append('data', JSON.stringify(configData));
|
||||
|
||||
const apiHost: string = import.meta.env.VITE_API_HOST;
|
||||
const xhr = new XMLHttpRequest();
|
||||
@@ -477,6 +501,171 @@ function Upload({
|
||||
xhr.setRequestHeader('Authorization', `Bearer ${token}`);
|
||||
xhr.send(formData);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (ingestor.type === 'google_drive') {
|
||||
const sessionToken = getSessionToken(ingestor.type);
|
||||
|
||||
if (sessionToken) {
|
||||
// Auto-authenticate if session token exists
|
||||
setIsGoogleDriveConnected(true);
|
||||
setAuthError('');
|
||||
|
||||
// Fetch user email and files using the existing session token
|
||||
|
||||
fetchUserEmailAndLoadFiles(sessionToken);
|
||||
}
|
||||
}
|
||||
}, [ingestor.type]);
|
||||
|
||||
const fetchUserEmailAndLoadFiles = async (sessionToken: string) => {
|
||||
try {
|
||||
const apiHost = import.meta.env.VITE_API_HOST;
|
||||
|
||||
const validateResponse = await fetch(`${apiHost}/api/connectors/validate-session`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${token}`
|
||||
},
|
||||
body: JSON.stringify({ provider: 'google_drive', session_token: sessionToken })
|
||||
});
|
||||
|
||||
if (!validateResponse.ok) {
|
||||
removeSessionToken(ingestor.type);
|
||||
setIsGoogleDriveConnected(false);
|
||||
setAuthError('Session expired. Please reconnect to Google Drive.');
|
||||
return;
|
||||
}
|
||||
|
||||
const validateData = await validateResponse.json();
|
||||
|
||||
if (validateData.success) {
|
||||
setUserEmail(validateData.user_email || 'Connected User');
|
||||
// reset pagination state and files
|
||||
setGoogleDriveFiles([]);
|
||||
|
||||
|
||||
|
||||
setNextPageToken(null);
|
||||
setHasMoreFiles(false);
|
||||
loadGoogleDriveFiles(sessionToken, null, null, false);
|
||||
} else {
|
||||
removeSessionToken(ingestor.type);
|
||||
setIsGoogleDriveConnected(false);
|
||||
setAuthError(validateData.error || 'Session expired. Please reconnect your Google Drive account and make sure to grant offline access.');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error validating Google Drive session:', error);
|
||||
setAuthError('Failed to validate session. Please reconnect.');
|
||||
setIsGoogleDriveConnected(false);
|
||||
}
|
||||
};
|
||||
|
||||
const loadGoogleDriveFiles = async (
|
||||
sessionToken: string,
|
||||
folderId?: string | null,
|
||||
pageToken?: string | null,
|
||||
append: boolean = false,
|
||||
) => {
|
||||
setIsLoadingFiles(true);
|
||||
|
||||
try {
|
||||
const apiHost = import.meta.env.VITE_API_HOST;
|
||||
const requestBody: any = {
|
||||
session_token: sessionToken,
|
||||
limit: 10,
|
||||
};
|
||||
if (folderId) {
|
||||
requestBody.folder_id = folderId;
|
||||
}
|
||||
if (pageToken) {
|
||||
requestBody.page_token = pageToken;
|
||||
}
|
||||
|
||||
const filesResponse = await fetch(`${apiHost}/api/connectors/files`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${token}`
|
||||
},
|
||||
body: JSON.stringify({ ...requestBody, provider: 'google_drive' })
|
||||
});
|
||||
|
||||
if (!filesResponse.ok) {
|
||||
throw new Error(`Failed to load files: ${filesResponse.status}`);
|
||||
}
|
||||
|
||||
const filesData = await filesResponse.json();
|
||||
|
||||
if (filesData.success && Array.isArray(filesData.files)) {
|
||||
setGoogleDriveFiles(prev => append ? [...prev, ...filesData.files] : filesData.files);
|
||||
setNextPageToken(filesData.next_page_token || null);
|
||||
setHasMoreFiles(Boolean(filesData.has_more));
|
||||
} else {
|
||||
throw new Error(filesData.error || 'Failed to load files');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error loading Google Drive files:', error);
|
||||
setAuthError(error instanceof Error ? error.message : 'Failed to load files. Please make sure your Google Drive account is properly connected and you granted offline access during authorization.');
|
||||
} finally {
|
||||
setIsLoadingFiles(false);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
// Handle file selection
|
||||
const handleFileSelect = (fileId: string) => {
|
||||
setSelectedFiles(prev => {
|
||||
if (prev.includes(fileId)) {
|
||||
return prev.filter(id => id !== fileId);
|
||||
} else {
|
||||
return [...prev, fileId];
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const handleFolderClick = (folderId: string, folderName: string) => {
|
||||
const sessionToken = getSessionToken(ingestor.type);
|
||||
if (sessionToken) {
|
||||
setCurrentFolderId(folderId);
|
||||
setFolderPath(prev => [...prev, {id: folderId, name: folderName}]);
|
||||
|
||||
setGoogleDriveFiles([]);
|
||||
setNextPageToken(null);
|
||||
setHasMoreFiles(false);
|
||||
setSelectedFiles([]);
|
||||
loadGoogleDriveFiles(sessionToken, folderId, null, false);
|
||||
}
|
||||
};
|
||||
|
||||
const navigateBack = (index: number) => {
|
||||
const sessionToken = getSessionToken(ingestor.type);
|
||||
if (sessionToken) {
|
||||
const newPath = folderPath.slice(0, index + 1);
|
||||
const targetFolderId = newPath[newPath.length - 1]?.id;
|
||||
|
||||
setCurrentFolderId(targetFolderId as string | null);
|
||||
setFolderPath(newPath);
|
||||
|
||||
setGoogleDriveFiles([]);
|
||||
setNextPageToken(null);
|
||||
setHasMoreFiles(false);
|
||||
setSelectedFiles([]);
|
||||
loadGoogleDriveFiles(sessionToken, targetFolderId ?? null, null, false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSelectAll = () => {
|
||||
if (selectedFiles.length === googleDriveFiles.length) {
|
||||
setSelectedFiles([]);
|
||||
} else {
|
||||
setSelectedFiles(googleDriveFiles.map(file => file.id));
|
||||
}
|
||||
};
|
||||
|
||||
const { getRootProps, getInputProps, isDragActive } = useDropzone({
|
||||
onDrop,
|
||||
multiple: true,
|
||||
@@ -515,6 +704,10 @@ function Upload({
|
||||
if (!remoteName?.trim()) {
|
||||
return true;
|
||||
}
|
||||
if (ingestor.type === 'google_drive') {
|
||||
return !isGoogleDriveConnected || selectedFiles.length === 0;
|
||||
}
|
||||
|
||||
const formFields: FormField[] = IngestorFormSchemas[ingestor.type];
|
||||
for (const field of formFields) {
|
||||
if (field.required) {
|
||||
@@ -636,7 +829,7 @@ function Upload({
|
||||
{files.map((file) => (
|
||||
<p
|
||||
key={file.name}
|
||||
className="text-gray-6000 truncate overflow-hidden text-ellipsis"
|
||||
className="text-gray-6000 dark:text-[#ececf1] truncate overflow-hidden text-ellipsis"
|
||||
title={file.name}
|
||||
>
|
||||
{file.name}
|
||||
@@ -679,6 +872,202 @@ function Upload({
|
||||
required={true}
|
||||
labelBgClassName="bg-white dark:bg-charleston-green-2"
|
||||
/>
|
||||
{ingestor.type === 'google_drive' && (
|
||||
<div className="space-y-4">
|
||||
{authError && (
|
||||
<div className="rounded-lg border border-red-200 bg-red-50 p-3 dark:border-red-600 dark:bg-red-900/20">
|
||||
<p className="text-sm text-red-600 dark:text-red-400">
|
||||
⚠️ {authError}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{!isGoogleDriveConnected ? (
|
||||
<ConnectorAuth
|
||||
provider="google_drive"
|
||||
onSuccess={(data) => {
|
||||
setUserEmail(data.user_email);
|
||||
setIsGoogleDriveConnected(true);
|
||||
setIsAuthenticating(false);
|
||||
setAuthError('');
|
||||
|
||||
if (data.session_token) {
|
||||
setSessionToken(ingestor.type, data.session_token);
|
||||
loadGoogleDriveFiles(data.session_token, null);
|
||||
}
|
||||
}}
|
||||
onError={(error) => {
|
||||
setAuthError(error);
|
||||
setIsAuthenticating(false);
|
||||
setIsGoogleDriveConnected(false);
|
||||
}}
|
||||
/>
|
||||
) : (
|
||||
<div className="space-y-4">
|
||||
{/* Connection Status */}
|
||||
<div className="w-full flex items-center justify-between rounded-lg bg-green-500 px-4 py-2 text-white text-sm">
|
||||
<div className="flex items-center gap-2">
|
||||
<svg className="h-4 w-4" viewBox="0 0 24 24">
|
||||
<path fill="currentColor" d="M9 16.17L4.83 12l-1.42 1.41L9 19 21 7l-1.41-1.41z"/>
|
||||
</svg>
|
||||
<span>Connected as {userEmail}</span>
|
||||
</div>
|
||||
<button
|
||||
onClick={() => {
|
||||
removeSessionToken(ingestor.type);
|
||||
|
||||
setIsGoogleDriveConnected(false);
|
||||
setGoogleDriveFiles([]);
|
||||
setSelectedFiles([]);
|
||||
setUserEmail('');
|
||||
setAuthError('');
|
||||
|
||||
const apiHost = import.meta.env.VITE_API_HOST;
|
||||
fetch(`${apiHost}/api/connectors/disconnect`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${token}`
|
||||
},
|
||||
body: JSON.stringify({ provider: ingestor.type, session_token: getSessionToken(ingestor.type) })
|
||||
}).catch(err => console.error('Error disconnecting from Google Drive:', err));
|
||||
}}
|
||||
className="text-white hover:text-gray-200 text-xs underline"
|
||||
>
|
||||
Disconnect
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* File Browser */}
|
||||
<div className="border border-gray-200 rounded-lg dark:border-gray-600">
|
||||
<div className="p-3 border-b border-gray-200 dark:border-gray-600 bg-gray-50 dark:bg-gray-800 rounded-t-lg">
|
||||
{/* Breadcrumb navigation */}
|
||||
<div className="flex items-center gap-1 mb-2">
|
||||
{folderPath.map((path, index) => (
|
||||
<div key={path.id || 'root'} className="flex items-center gap-1">
|
||||
{index > 0 && <span className="text-gray-400">/</span>}
|
||||
<button
|
||||
onClick={() => navigateBack(index)}
|
||||
className="text-sm text-blue-600 hover:text-blue-800 dark:text-blue-400 hover:underline"
|
||||
disabled={index === folderPath.length - 1}
|
||||
>
|
||||
{path.name}
|
||||
</button>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<div className="flex items-center justify-between">
|
||||
<h4 className="text-sm font-medium text-gray-700 dark:text-gray-300">
|
||||
Select Files from Google Drive
|
||||
</h4>
|
||||
{googleDriveFiles.length > 0 && (
|
||||
<button
|
||||
onClick={handleSelectAll}
|
||||
className="text-xs text-blue-600 hover:text-blue-800 dark:text-blue-400"
|
||||
>
|
||||
{selectedFiles.length === googleDriveFiles.length ? 'Deselect All' : 'Select All'}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
{selectedFiles.length > 0 && (
|
||||
<p className="text-xs text-gray-500 mt-1">
|
||||
{selectedFiles.length} file{selectedFiles.length !== 1 ? 's' : ''} selected
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="max-h-72 overflow-y-auto" ref={scrollContainerRef}>
|
||||
{isLoadingFiles && googleDriveFiles.length === 0 ? (
|
||||
<div className="p-4 text-center">
|
||||
<div className="inline-flex items-center gap-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
<div className="h-4 w-4 animate-spin rounded-full border-2 border-blue-500 border-t-transparent"></div>
|
||||
Loading files...
|
||||
</div>
|
||||
</div>
|
||||
) : googleDriveFiles.length === 0 ? (
|
||||
<div className="p-4 text-center text-sm text-gray-500 dark:text-gray-400">
|
||||
No files found in your Google Drive
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div className="divide-y divide-gray-200 dark:divide-gray-600">
|
||||
{googleDriveFiles.map((file) => (
|
||||
<div
|
||||
key={file.id}
|
||||
className={`p-3 transition-colors ${
|
||||
selectedFiles.includes(file.id) ? 'bg-blue-50 dark:bg-blue-900/20' : ''
|
||||
}`}
|
||||
>
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="flex-shrink-0">
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={selectedFiles.includes(file.id)}
|
||||
onChange={() => handleFileSelect(file.id)}
|
||||
className="h-4 w-4 text-blue-600 rounded border-gray-300 focus:ring-blue-500"
|
||||
/>
|
||||
</div>
|
||||
{file.type === 'application/vnd.google-apps.folder' || file.isFolder ? (
|
||||
<div
|
||||
className="text-lg cursor-pointer hover:text-blue-600"
|
||||
onClick={() => handleFolderClick(file.id, file.name)}
|
||||
>
|
||||
<img src={FolderIcon} alt="Folder" className="h-6 w-6" />
|
||||
</div>
|
||||
) : (
|
||||
<div className="text-lg">
|
||||
<img src={FileIcon} alt="File" className="h-6 w-6" />
|
||||
</div>
|
||||
)}
|
||||
<div className="flex-1 min-w-0">
|
||||
<p
|
||||
className={`text-sm font-medium truncate dark:text-[#ececf1] ${
|
||||
file.type === 'application/vnd.google-apps.folder' || file.isFolder
|
||||
? 'cursor-pointer hover:text-blue-600'
|
||||
: ''
|
||||
}`}
|
||||
onClick={() => {
|
||||
if (file.type === 'application/vnd.google-apps.folder' || file.isFolder) {
|
||||
handleFolderClick(file.id, file.name);
|
||||
}
|
||||
}}
|
||||
>
|
||||
{file.name}
|
||||
</p>
|
||||
<p className="text-xs text-gray-500 dark:text-gray-400">
|
||||
{file.size && `${formatBytes(file.size)} • `}Modified {formatDate(file.modifiedTime)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<div className="p-4 flex items-center justify-center border-t border-gray-100 dark:border-gray-800">
|
||||
{isLoadingFiles && (
|
||||
<div className="inline-flex items-center gap-2 text-sm text-gray-600 dark:text-gray-400">
|
||||
<div className="h-4 w-4 animate-spin rounded-full border-2 border-blue-500 border-t-transparent"></div>
|
||||
Loading more files...
|
||||
</div>
|
||||
)}
|
||||
{!hasMoreFiles && !isLoadingFiles && (
|
||||
<span className="text-sm text-gray-500 dark:text-gray-400">All files loaded</span>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="hidden" aria-hidden="true">
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{renderFormFields()}
|
||||
{IngestorFormSchemas[ingestor.type].some(
|
||||
(field) => field.advanced,
|
||||
@@ -719,7 +1108,10 @@ function Upload({
|
||||
: 'bg-purple-30 hover:bg-violets-are-blue cursor-pointer text-white'
|
||||
}`}
|
||||
>
|
||||
{t('modals.uploadDoc.train')}
|
||||
{ingestor.type === 'google_drive' && selectedFiles.length > 0
|
||||
? `Train with ${selectedFiles.length} file${selectedFiles.length !== 1 ? 's' : ''}`
|
||||
: t('modals.uploadDoc.train')
|
||||
}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
@@ -727,6 +1119,30 @@ function Upload({
|
||||
);
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
const scrollContainer = scrollContainerRef.current;
|
||||
|
||||
const handleScroll = () => {
|
||||
if (!scrollContainer) return;
|
||||
|
||||
const { scrollTop, scrollHeight, clientHeight } = scrollContainer;
|
||||
const isNearBottom = scrollHeight - scrollTop - clientHeight < 50;
|
||||
|
||||
if (isNearBottom && hasMoreFiles && !isLoadingFiles && nextPageToken) {
|
||||
const sessionToken = getSessionToken(ingestor.type);
|
||||
if (sessionToken) {
|
||||
loadGoogleDriveFiles(sessionToken, currentFolderId, nextPageToken, true);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
scrollContainer?.addEventListener('scroll', handleScroll);
|
||||
|
||||
return () => {
|
||||
scrollContainer?.removeEventListener('scroll', handleScroll);
|
||||
};
|
||||
}, [hasMoreFiles, isLoadingFiles, nextPageToken, currentFolderId, ingestor.type]);
|
||||
|
||||
return (
|
||||
<WrapperModal
|
||||
isPerformingTask={progress !== undefined && progress.percentage < 100}
|
||||
|
||||
@@ -22,7 +22,14 @@ export interface UrlIngestorConfig extends BaseIngestorConfig {
|
||||
url: string;
|
||||
}
|
||||
|
||||
export type IngestorType = 'crawler' | 'github' | 'reddit' | 'url';
|
||||
export interface GoogleDriveIngestorConfig extends BaseIngestorConfig {
|
||||
folder_id?: string;
|
||||
file_ids?: string;
|
||||
recursive?: boolean;
|
||||
token_info?: any;
|
||||
}
|
||||
|
||||
export type IngestorType = 'crawler' | 'github' | 'reddit' | 'url' | 'google_drive';
|
||||
|
||||
export interface IngestorConfig {
|
||||
type: IngestorType;
|
||||
@@ -31,7 +38,8 @@ export interface IngestorConfig {
|
||||
| RedditIngestorConfig
|
||||
| GithubIngestorConfig
|
||||
| CrawlerIngestorConfig
|
||||
| UrlIngestorConfig;
|
||||
| UrlIngestorConfig
|
||||
| GoogleDriveIngestorConfig;
|
||||
}
|
||||
|
||||
export type IngestorFormData = {
|
||||
@@ -109,6 +117,14 @@ export const IngestorFormSchemas: Record<IngestorType, FormField[]> = {
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
google_drive: [
|
||||
{
|
||||
name: 'recursive',
|
||||
label: 'Include subfolders',
|
||||
type: 'boolean',
|
||||
required: false,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
export const IngestorDefaultConfigs: Record<
|
||||
@@ -143,4 +159,12 @@ export const IngestorDefaultConfigs: Record<
|
||||
repo_url: '',
|
||||
} as GithubIngestorConfig,
|
||||
},
|
||||
google_drive: {
|
||||
name: '',
|
||||
config: {
|
||||
folder_id: '',
|
||||
file_ids: '',
|
||||
recursive: true,
|
||||
} as GoogleDriveIngestorConfig,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -34,6 +34,20 @@ export function formatDate(dateString: string): string {
|
||||
day: 'numeric',
|
||||
year: 'numeric',
|
||||
});
|
||||
} else if (
|
||||
/^[A-Za-z]{3}, \d{2} [A-Za-z]{3} \d{4} \d{2}:\d{2}:\d{2} GMT$/.test(
|
||||
dateString,
|
||||
)
|
||||
) {
|
||||
// Format: "Fri, 08 Jul 2025 06:00:00 GMT"
|
||||
const dateTime = new Date(dateString);
|
||||
return dateTime.toLocaleDateString('en-US', {
|
||||
month: 'short',
|
||||
day: 'numeric',
|
||||
year: 'numeric',
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
});
|
||||
} else {
|
||||
return dateString;
|
||||
}
|
||||
|
||||
17
frontend/src/utils/providerUtils.ts
Normal file
17
frontend/src/utils/providerUtils.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
/**
|
||||
* Utility functions for managing session tokens for different cloud service providers.
|
||||
* Follows the convention: {provider}_session_token
|
||||
*/
|
||||
|
||||
|
||||
export const getSessionToken = (provider: string): string | null => {
|
||||
return localStorage.getItem(`${provider}_session_token`);
|
||||
};
|
||||
|
||||
export const setSessionToken = (provider: string, token: string): void => {
|
||||
localStorage.setItem(`${provider}_session_token`, token);
|
||||
};
|
||||
|
||||
export const removeSessionToken = (provider: string): void => {
|
||||
localStorage.removeItem(`${provider}_session_token`);
|
||||
};
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user