diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 5d620820..2ea8961f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,10 +1,8 @@
name: Build and push DocsGPT Docker image
on:
- workflow_dispatch:
- push:
- branches:
- - main
+ release:
+ types: [published]
jobs:
deploy:
@@ -43,5 +41,7 @@ jobs:
context: ./application
push: true
tags: |
- ${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
- ghcr.io/${{ github.repository_owner }}/docsgpt:latest
+ ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }},${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
+ ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }},ghcr.io/${{ github.repository_owner }}/docsgpt:latest
+ cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
+ cache-to: type=inline
diff --git a/.github/workflows/cife.yml b/.github/workflows/cife.yml
index 67aadfbb..73a97755 100644
--- a/.github/workflows/cife.yml
+++ b/.github/workflows/cife.yml
@@ -1,10 +1,8 @@
name: Build and push DocsGPT-FE Docker image
on:
- workflow_dispatch:
- push:
- branches:
- - main
+ release:
+ types: [published]
jobs:
deploy:
@@ -44,5 +42,7 @@ jobs:
context: ./frontend
push: true
tags: |
- ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
- ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest
+ ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }},${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
+ ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }},ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest
+ cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
+ cache-to: type=inline
diff --git a/.github/workflows/docker-develop-build.yml b/.github/workflows/docker-develop-build.yml
new file mode 100644
index 00000000..5edc69d7
--- /dev/null
+++ b/.github/workflows/docker-develop-build.yml
@@ -0,0 +1,49 @@
+name: Build and push DocsGPT Docker image for development
+
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - main
+
+jobs:
+ deploy:
+ if: github.repository == 'arc53/DocsGPT'
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ packages: write
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v1
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v1
+
+ - name: Login to DockerHub
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Login to ghcr.io
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.repository_owner }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Build and push Docker images to docker.io and ghcr.io
+ uses: docker/build-push-action@v4
+ with:
+ file: './application/Dockerfile'
+ platforms: linux/amd64
+ context: ./application
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop
+ ghcr.io/${{ github.repository_owner }}/docsgpt:develop
+ cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:develop
+ cache-to: type=inline
diff --git a/.github/workflows/docker-develop-fe-build.yml b/.github/workflows/docker-develop-fe-build.yml
new file mode 100644
index 00000000..29ad4524
--- /dev/null
+++ b/.github/workflows/docker-develop-fe-build.yml
@@ -0,0 +1,49 @@
+name: Build and push DocsGPT FE Docker image for development
+
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - main
+
+jobs:
+ deploy:
+ if: github.repository == 'arc53/DocsGPT'
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ packages: write
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v1
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v1
+
+ - name: Login to DockerHub
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Login to ghcr.io
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.repository_owner }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Build and push Docker images to docker.io and ghcr.io
+ uses: docker/build-push-action@v4
+ with:
+ file: './frontend/Dockerfile'
+ platforms: linux/amd64
+ context: ./frontend
+ push: true
+ tags: |
+ ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop
+ ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop
+ cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop
+ cache-to: type=inline
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5200794b..1b0567e4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -6,7 +6,7 @@ Thank you for choosing to contribute to DocsGPT! We are all very grateful!
📣 **Discussions** - Engage in conversations, start new topics, or help answer questions.
-🐞 **Issues** - This is where we keep track of tasks. It could be bugs,fixes or suggestions for new features.
+🐞 **Issues** - This is where we keep track of tasks. It could be bugs, fixes or suggestions for new features.
🛠️ **Pull requests** - Suggest changes to our repository, either by working on existing issues or adding new features.
@@ -21,8 +21,9 @@ Thank you for choosing to contribute to DocsGPT! We are all very grateful!
- If you're interested in contributing code, here are some important things to know:
- We have a frontend built on React (Vite) and a backend in Python.
-=======
-Before creating issues, please check out how the latest version of our app looks and works by launching it via [Quickstart](https://github.com/arc53/DocsGPT#quickstart) the version on our live demo is slightly modified with login. Your issues should relate to the version that you can launch via [Quickstart](https://github.com/arc53/DocsGPT#quickstart).
+
+
+Before creating issues, please check out how the latest version of our app looks and works by launching it via [Quickstart](https://github.com/arc53/DocsGPT#quickstart) the version on our live demo is slightly modified with login. Your issues should relate to the version you can launch via [Quickstart](https://github.com/arc53/DocsGPT#quickstart).
### 👨💻 If you're interested in contributing code, here are some important things to know:
@@ -43,7 +44,7 @@ Please try to follow the guidelines.
### 🖥 If you are looking to contribute to Backend (🐍 Python):
-- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) or [`/scripts`](https://github.com/arc53/DocsGPT/tree/main/scripts) (please disregard old [`ingest_rst.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst.py) [`ingest_rst_sphinx.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst_sphinx.py) files; they will be deprecated soon).
+- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) or [`/scripts`](https://github.com/arc53/DocsGPT/tree/main/scripts) (please disregard old [`ingest_rst.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst.py) [`ingest_rst_sphinx.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst_sphinx.py) files; these will be deprecated soon).
- All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [`/tests`](https://github.com/arc53/DocsGPT/tree/main/tests) folder.
- Before submitting your Pull Request, ensure it can be queried after ingesting some test data.
@@ -125,4 +126,4 @@ Thank you for considering contributing to DocsGPT! 🙏
## Questions/collaboration
Feel free to join our [Discord](https://discord.gg/n5BX8dh8rU). We're very friendly and welcoming to new contributors, so don't hesitate to reach out.
-# Thank you so much for considering to contribute DocsGPT!🙏
+# Thank you so much for considering to contributing DocsGPT!🙏
diff --git a/HACKTOBERFEST.md b/HACKTOBERFEST.md
index 47679960..8656bd84 100644
--- a/HACKTOBERFEST.md
+++ b/HACKTOBERFEST.md
@@ -4,7 +4,7 @@ Welcome, contributors! We're excited to announce that DocsGPT is participating i
All contributors with accepted PRs will receive a cool Holopin! 🤩 (Watch out for a reply in your PR to collect it).
-### 🏆 Top 50 contributors will recieve a special T-shirt
+### 🏆 Top 50 contributors will receive a special T-shirt
### 🏆 [LLM Document analysis by LexEU competition](https://github.com/arc53/DocsGPT/blob/main/lexeu-competition.md):
A separate competition is available for those who submit new retrieval / workflow method that will analyze a Document using EU laws.
@@ -16,14 +16,14 @@ You can find more information [here](https://github.com/arc53/DocsGPT/blob/main/
🛠️ Code: This is the golden ticket! Make meaningful contributions through PRs.
🧩 API extension: Build an app utilising DocsGPT API. We prefer submissions that showcase original ideas and turn the API into an AI agent.
-They can be a completely separate repo.
+They can be a completely separate repos.
For example:
https://github.com/arc53/tg-bot-docsgpt-extenstion or
https://github.com/arc53/DocsGPT-cli
Non-Code Contributions:
-📚 Wiki: Improve our documentation, Create a guide or change existing documentation.
+📚 Wiki: Improve our documentation, create a guide or change existing documentation.
🖥️ Design: Improve the UI/UX or design a new feature.
@@ -37,5 +37,5 @@ Non-Code Contributions:
- Refer to the [Documentation](https://docs.docsgpt.cloud/).
- Feel free to join our [Discord](https://discord.gg/n5BX8dh8rU) server. We're here to help newcomers, so don't hesitate to jump in! Join us [here](https://discord.gg/n5BX8dh8rU).
-Thank you very much for considering contributing to DocsGPT during Hacktoberfest! 🙏 Your contributions (not just simple typo) could earn you a stylish new t-shirt and other prizes as a token of our appreciation. 🎁 Join us, and let's code together! 🚀
+Thank you very much for considering contributing to DocsGPT during Hacktoberfest! 🙏 Your contributions (not just simple typos) could earn you a stylish new t-shirt and other prizes as a token of our appreciation. 🎁 Join us, and let's code together! 🚀
diff --git a/README.md b/README.md
index f1942dc1..ee9a1af6 100644
--- a/README.md
+++ b/README.md
@@ -35,7 +35,8 @@ We're eager to provide personalized assistance when deploying your DocsGPT to a
[Send Email :email:](mailto:contact@arc53.com?subject=DocsGPT%20support%2Fsolutions)
-
+
+
## Roadmap
diff --git a/application/api/answer/routes.py b/application/api/answer/routes.py
index 9a22db84..17eb5cc3 100644
--- a/application/api/answer/routes.py
+++ b/application/api/answer/routes.py
@@ -292,6 +292,7 @@ class Stream(Resource):
def post(self):
data = request.get_json()
required_fields = ["question"]
+
missing_fields = check_required_fields(data, required_fields)
if missing_fields:
return missing_fields
@@ -422,7 +423,7 @@ class Answer(Resource):
@api.doc(description="Provide an answer based on the question and retriever")
def post(self):
data = request.get_json()
- required_fields = ["question"]
+ required_fields = ["question"]
missing_fields = check_required_fields(data, required_fields)
if missing_fields:
return missing_fields
diff --git a/application/cache.py b/application/cache.py
new file mode 100644
index 00000000..33022e45
--- /dev/null
+++ b/application/cache.py
@@ -0,0 +1,93 @@
+import redis
+import time
+import json
+import logging
+from threading import Lock
+from application.core.settings import settings
+from application.utils import get_hash
+
+logger = logging.getLogger(__name__)
+
+_redis_instance = None
+_instance_lock = Lock()
+
+def get_redis_instance():
+ global _redis_instance
+ if _redis_instance is None:
+ with _instance_lock:
+ if _redis_instance is None:
+ try:
+ _redis_instance = redis.Redis.from_url(settings.CACHE_REDIS_URL, socket_connect_timeout=2)
+ except redis.ConnectionError as e:
+ logger.error(f"Redis connection error: {e}")
+ _redis_instance = None
+ return _redis_instance
+
+def gen_cache_key(*messages, model="docgpt"):
+ if not all(isinstance(msg, dict) for msg in messages):
+ raise ValueError("All messages must be dictionaries.")
+ messages_str = json.dumps(list(messages), sort_keys=True)
+ combined = f"{model}_{messages_str}"
+ cache_key = get_hash(combined)
+ return cache_key
+
+def gen_cache(func):
+ def wrapper(self, model, messages, *args, **kwargs):
+ try:
+ cache_key = gen_cache_key(*messages)
+ redis_client = get_redis_instance()
+ if redis_client:
+ try:
+ cached_response = redis_client.get(cache_key)
+ if cached_response:
+ return cached_response.decode('utf-8')
+ except redis.ConnectionError as e:
+ logger.error(f"Redis connection error: {e}")
+
+ result = func(self, model, messages, *args, **kwargs)
+ if redis_client:
+ try:
+ redis_client.set(cache_key, result, ex=1800)
+ except redis.ConnectionError as e:
+ logger.error(f"Redis connection error: {e}")
+
+ return result
+ except ValueError as e:
+ logger.error(e)
+ return "Error: No user message found in the conversation to generate a cache key."
+ return wrapper
+
+def stream_cache(func):
+ def wrapper(self, model, messages, stream, *args, **kwargs):
+ cache_key = gen_cache_key(*messages)
+ logger.info(f"Stream cache key: {cache_key}")
+
+ redis_client = get_redis_instance()
+ if redis_client:
+ try:
+ cached_response = redis_client.get(cache_key)
+ if cached_response:
+ logger.info(f"Cache hit for stream key: {cache_key}")
+ cached_response = json.loads(cached_response.decode('utf-8'))
+ for chunk in cached_response:
+ yield chunk
+ time.sleep(0.03)
+ return
+ except redis.ConnectionError as e:
+ logger.error(f"Redis connection error: {e}")
+
+ result = func(self, model, messages, stream, *args, **kwargs)
+ stream_cache_data = []
+
+ for chunk in result:
+ stream_cache_data.append(chunk)
+ yield chunk
+
+ if redis_client:
+ try:
+ redis_client.set(cache_key, json.dumps(stream_cache_data), ex=1800)
+ logger.info(f"Stream cache saved for key: {cache_key}")
+ except redis.ConnectionError as e:
+ logger.error(f"Redis connection error: {e}")
+
+ return wrapper
\ No newline at end of file
diff --git a/application/core/settings.py b/application/core/settings.py
index e6173be4..7346da08 100644
--- a/application/core/settings.py
+++ b/application/core/settings.py
@@ -21,6 +21,9 @@ class Settings(BaseSettings):
VECTOR_STORE: str = "faiss" # "faiss" or "elasticsearch" or "qdrant" or "milvus"
RETRIEVERS_ENABLED: list = ["classic_rag", "duckduck_search"] # also brave_search
+ # LLM Cache
+ CACHE_REDIS_URL: str = "redis://localhost:6379/2"
+
API_URL: str = "http://localhost:7091" # backend url for celery worker
API_KEY: Optional[str] = None # LLM api key
diff --git a/application/llm/base.py b/application/llm/base.py
index 475b7937..1caab5d3 100644
--- a/application/llm/base.py
+++ b/application/llm/base.py
@@ -1,28 +1,29 @@
from abc import ABC, abstractmethod
from application.usage import gen_token_usage, stream_token_usage
+from application.cache import stream_cache, gen_cache
class BaseLLM(ABC):
def __init__(self):
self.token_usage = {"prompt_tokens": 0, "generated_tokens": 0}
- def _apply_decorator(self, method, decorator, *args, **kwargs):
- return decorator(method, *args, **kwargs)
+ def _apply_decorator(self, method, decorators, *args, **kwargs):
+ for decorator in decorators:
+ method = decorator(method)
+ return method(self, *args, **kwargs)
@abstractmethod
def _raw_gen(self, model, messages, stream, *args, **kwargs):
pass
def gen(self, model, messages, stream=False, *args, **kwargs):
- return self._apply_decorator(self._raw_gen, gen_token_usage)(
- self, model=model, messages=messages, stream=stream, *args, **kwargs
- )
+ decorators = [gen_token_usage, gen_cache]
+ return self._apply_decorator(self._raw_gen, decorators=decorators, model=model, messages=messages, stream=stream, *args, **kwargs)
@abstractmethod
def _raw_gen_stream(self, model, messages, stream, *args, **kwargs):
pass
def gen_stream(self, model, messages, stream=True, *args, **kwargs):
- return self._apply_decorator(self._raw_gen_stream, stream_token_usage)(
- self, model=model, messages=messages, stream=stream, *args, **kwargs
- )
+ decorators = [stream_cache, stream_token_usage]
+ return self._apply_decorator(self._raw_gen_stream, decorators=decorators, model=model, messages=messages, stream=stream, *args, **kwargs)
\ No newline at end of file
diff --git a/application/requirements.txt b/application/requirements.txt
index 6a57dd12..6ea1d1ba 100644
--- a/application/requirements.txt
+++ b/application/requirements.txt
@@ -4,7 +4,7 @@ beautifulsoup4==4.12.3
celery==5.3.6
dataclasses-json==0.6.7
docx2txt==0.8
-duckduckgo-search==6.2.6
+duckduckgo-search==6.3.0
ebooklib==0.18
elastic-transport==8.15.0
elasticsearch==8.15.1
@@ -54,7 +54,7 @@ pathable==0.4.3
pillow==10.4.0
portalocker==2.10.1
prance==23.6.21.0
-primp==0.6.2
+primp==0.6.3
prompt-toolkit==3.0.47
protobuf==5.28.2
py==1.11.0
diff --git a/application/utils.py b/application/utils.py
index f0802c39..1fc9e329 100644
--- a/application/utils.py
+++ b/application/utils.py
@@ -1,6 +1,8 @@
import tiktoken
+import hashlib
from flask import jsonify, make_response
+
_encoding = None
@@ -39,3 +41,8 @@ def check_required_fields(data, required_fields):
400,
)
return None
+
+
+def get_hash(data):
+ return hashlib.md5(data.encode()).hexdigest()
+
diff --git a/docker-compose.yaml b/docker-compose.yaml
index f3b8a363..d3f3421a 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -20,6 +20,7 @@ services:
- CELERY_BROKER_URL=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/1
- MONGO_URI=mongodb://mongo:27017/docsgpt
+ - CACHE_REDIS_URL=redis://redis:6379/2
ports:
- "7091:7091"
volumes:
@@ -41,6 +42,7 @@ services:
- CELERY_RESULT_BACKEND=redis://redis:6379/1
- MONGO_URI=mongodb://mongo:27017/docsgpt
- API_URL=http://backend:7091
+ - CACHE_REDIS_URL=redis://redis:6379/2
depends_on:
- redis
- mongo
diff --git a/docs/README.md b/docs/README.md
index 4b90b598..12ebbf08 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -46,6 +46,6 @@ yarn install
yarn dev
```
-- Now, you should be able to view the docs on your local environment by visiting `http://localhost:5000`. You can explore the different markdown files and make changes as you see fit.
+- Now, you should be able to view the docs on your local environment by visiting `http://localhost:3000`. You can explore the different markdown files and make changes as you see fit.
- **Footnotes:** This guide assumes you have Node.js and npm installed. The guide involves running a local server using yarn, and viewing the documentation offline. If you encounter any issues, it may be worth verifying your Node.js and npm installations and whether you have installed yarn correctly.
diff --git a/docs/pages/Guides/How-to-train-on-other-documentation.mdx b/docs/pages/Guides/How-to-train-on-other-documentation.mdx
index e5429a04..f0149618 100644
--- a/docs/pages/Guides/How-to-train-on-other-documentation.mdx
+++ b/docs/pages/Guides/How-to-train-on-other-documentation.mdx
@@ -28,15 +28,15 @@ Navigate to the sidebar where you will find `Source Docs` option,here you will f
### Step 2
-Click on the `Upload icon` just beside the source docs options,now borwse and upload the document which you want to train on or select the `remote` option if you have to insert the link of the documentation.
+Click on the `Upload icon` just beside the source docs options,now browse and upload the document which you want to train on or select the `remote` option if you have to insert the link of the documentation.
### Step 3
-Now you will be able to see the name of the file uploaded under the Uploaded Files ,now click on `Train`,once you click on train it might take some time to train on the document. You will be able to see the `Training progress` and once the training is completed you can click the `finish` button and there you go your docuemnt is uploaded.
+Now you will be able to see the name of the file uploaded under the Uploaded Files ,now click on `Train`,once you click on train it might take some time to train on the document. You will be able to see the `Training progress` and once the training is completed you can click the `finish` button and there you go your document is uploaded.
### Step 4
-Go to `New chat` and from the side bar select the document you uploaded under the `Source Docs` and go ahead with your chat, now you can ask qestions regarding the document you uploaded and you will get the effective answer based on it.
+Go to `New chat` and from the side bar select the document you uploaded under the `Source Docs` and go ahead with your chat, now you can ask questions regarding the document you uploaded and you will get the effective answer based on it.
diff --git a/docs/pages/Guides/How-to-use-different-LLM.mdx b/docs/pages/Guides/How-to-use-different-LLM.mdx
index 7df77742..c867fdcc 100644
--- a/docs/pages/Guides/How-to-use-different-LLM.mdx
+++ b/docs/pages/Guides/How-to-use-different-LLM.mdx
@@ -33,7 +33,7 @@ For open source you have to edit .env file with LLM_NAME with their desired LLM
All the supported LLM providers are here application/llm and you can check what env variable are needed for each
List of latest supported LLMs are https://github.com/arc53/DocsGPT/blob/main/application/llm/llm_creator.py
### Step 3
-Visit application/llm and select the file of your selected llm and there you will find the speicifc requirements needed to be filled in order to use it,i.e API key of that llm.
+Visit application/llm and select the file of your selected llm and there you will find the specific requirements needed to be filled in order to use it,i.e API key of that llm.
### For OpenAI-Compatible Endpoints:
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index 4087e4f5..75f4ea8e 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -1675,7 +1675,7 @@
"version": "18.3.0",
"resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.0.tgz",
"integrity": "sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==",
- "devOptional": true,
+ "dev": true,
"dependencies": {
"@types/react": "*"
}
diff --git a/frontend/signal-desktop-keyring.gpg b/frontend/signal-desktop-keyring.gpg
new file mode 100644
index 00000000..b5e68a04
Binary files /dev/null and b/frontend/signal-desktop-keyring.gpg differ
diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx
index e1157141..176ae518 100644
--- a/frontend/src/App.tsx
+++ b/frontend/src/App.tsx
@@ -32,7 +32,10 @@ function MainLayout() {
}
export default function App() {
- useDarkTheme();
+ const [, , componentMounted] = useDarkTheme();
+ if (!componentMounted) {
+ return
;
+ }
return (
diff --git a/frontend/src/Navigation.tsx b/frontend/src/Navigation.tsx
index 3a55525a..3d1dc614 100644
--- a/frontend/src/Navigation.tsx
+++ b/frontend/src/Navigation.tsx
@@ -11,7 +11,6 @@ import Discord from './assets/discord.svg';
import Expand from './assets/expand.svg';
import Github from './assets/github.svg';
import Hamburger from './assets/hamburger.svg';
-import Info from './assets/info.svg';
import SettingGear from './assets/settingGear.svg';
import Twitter from './assets/TwitterX.svg';
import UploadIcon from './assets/upload.svg';
@@ -41,6 +40,7 @@ import {
setSourceDocs,
} from './preferences/preferenceSlice';
import Upload from './upload/Upload';
+import Help from './components/Help';
interface NavigationProps {
navOpen: boolean;
@@ -275,7 +275,10 @@ export default function Navigation({ navOpen, setNavOpen }: NavigationProps) {
{t('newChat')}
-