diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5d620820..2ea8961f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,10 +1,8 @@ name: Build and push DocsGPT Docker image on: - workflow_dispatch: - push: - branches: - - main + release: + types: [published] jobs: deploy: @@ -43,5 +41,7 @@ jobs: context: ./application push: true tags: | - ${{ secrets.DOCKER_USERNAME }}/docsgpt:latest - ghcr.io/${{ github.repository_owner }}/docsgpt:latest + ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }},${{ secrets.DOCKER_USERNAME }}/docsgpt:latest + ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }},ghcr.io/${{ github.repository_owner }}/docsgpt:latest + cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:latest + cache-to: type=inline diff --git a/.github/workflows/cife.yml b/.github/workflows/cife.yml index 67aadfbb..73a97755 100644 --- a/.github/workflows/cife.yml +++ b/.github/workflows/cife.yml @@ -1,10 +1,8 @@ name: Build and push DocsGPT-FE Docker image on: - workflow_dispatch: - push: - branches: - - main + release: + types: [published] jobs: deploy: @@ -44,5 +42,7 @@ jobs: context: ./frontend push: true tags: | - ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest - ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest + ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }},${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest + ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }},ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest + cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest + cache-to: type=inline diff --git a/.github/workflows/docker-develop-build.yml b/.github/workflows/docker-develop-build.yml new file mode 100644 index 00000000..5edc69d7 --- /dev/null +++ b/.github/workflows/docker-develop-build.yml @@ -0,0 +1,49 @@ +name: Build and push DocsGPT Docker image for development + +on: + workflow_dispatch: + push: + branches: + - main + +jobs: + deploy: + if: github.repository == 'arc53/DocsGPT' + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Login to ghcr.io + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push Docker images to docker.io and ghcr.io + uses: docker/build-push-action@v4 + with: + file: './application/Dockerfile' + platforms: linux/amd64 + context: ./application + push: true + tags: | + ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop + ghcr.io/${{ github.repository_owner }}/docsgpt:develop + cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:develop + cache-to: type=inline diff --git a/.github/workflows/docker-develop-fe-build.yml b/.github/workflows/docker-develop-fe-build.yml new file mode 100644 index 00000000..29ad4524 --- /dev/null +++ b/.github/workflows/docker-develop-fe-build.yml @@ -0,0 +1,49 @@ +name: Build and push DocsGPT FE Docker image for development + +on: + workflow_dispatch: + push: + branches: + - main + +jobs: + deploy: + if: github.repository == 'arc53/DocsGPT' + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Login to ghcr.io + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and push Docker images to docker.io and ghcr.io + uses: docker/build-push-action@v4 + with: + file: './frontend/Dockerfile' + platforms: linux/amd64 + context: ./frontend + push: true + tags: | + ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop + ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop + cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop + cache-to: type=inline diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5200794b..1b0567e4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,7 +6,7 @@ Thank you for choosing to contribute to DocsGPT! We are all very grateful! 📣 **Discussions** - Engage in conversations, start new topics, or help answer questions. -🐞 **Issues** - This is where we keep track of tasks. It could be bugs,fixes or suggestions for new features. +🐞 **Issues** - This is where we keep track of tasks. It could be bugs, fixes or suggestions for new features. 🛠️ **Pull requests** - Suggest changes to our repository, either by working on existing issues or adding new features. @@ -21,8 +21,9 @@ Thank you for choosing to contribute to DocsGPT! We are all very grateful! - If you're interested in contributing code, here are some important things to know: - We have a frontend built on React (Vite) and a backend in Python. -======= -Before creating issues, please check out how the latest version of our app looks and works by launching it via [Quickstart](https://github.com/arc53/DocsGPT#quickstart) the version on our live demo is slightly modified with login. Your issues should relate to the version that you can launch via [Quickstart](https://github.com/arc53/DocsGPT#quickstart). + + +Before creating issues, please check out how the latest version of our app looks and works by launching it via [Quickstart](https://github.com/arc53/DocsGPT#quickstart) the version on our live demo is slightly modified with login. Your issues should relate to the version you can launch via [Quickstart](https://github.com/arc53/DocsGPT#quickstart). ### 👨‍💻 If you're interested in contributing code, here are some important things to know: @@ -43,7 +44,7 @@ Please try to follow the guidelines. ### 🖥 If you are looking to contribute to Backend (🐍 Python): -- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) or [`/scripts`](https://github.com/arc53/DocsGPT/tree/main/scripts) (please disregard old [`ingest_rst.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst.py) [`ingest_rst_sphinx.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst_sphinx.py) files; they will be deprecated soon). +- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) or [`/scripts`](https://github.com/arc53/DocsGPT/tree/main/scripts) (please disregard old [`ingest_rst.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst.py) [`ingest_rst_sphinx.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst_sphinx.py) files; these will be deprecated soon). - All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [`/tests`](https://github.com/arc53/DocsGPT/tree/main/tests) folder. - Before submitting your Pull Request, ensure it can be queried after ingesting some test data. @@ -125,4 +126,4 @@ Thank you for considering contributing to DocsGPT! 🙏 ## Questions/collaboration Feel free to join our [Discord](https://discord.gg/n5BX8dh8rU). We're very friendly and welcoming to new contributors, so don't hesitate to reach out. -# Thank you so much for considering to contribute DocsGPT!🙏 +# Thank you so much for considering to contributing DocsGPT!🙏 diff --git a/HACKTOBERFEST.md b/HACKTOBERFEST.md index 631f73ba..8656bd84 100644 --- a/HACKTOBERFEST.md +++ b/HACKTOBERFEST.md @@ -4,10 +4,10 @@ Welcome, contributors! We're excited to announce that DocsGPT is participating i All contributors with accepted PRs will receive a cool Holopin! 🤩 (Watch out for a reply in your PR to collect it). -### 🏆 Top 50 contributors will recieve a special T-shirt +### 🏆 Top 50 contributors will receive a special T-shirt ### 🏆 [LLM Document analysis by LexEU competition](https://github.com/arc53/DocsGPT/blob/main/lexeu-competition.md): -A separate competition is available for those who sumbit new retrieval / workflow method that will analyze a Document using EU laws. +A separate competition is available for those who submit new retrieval / workflow method that will analyze a Document using EU laws. With 200$, 100$, 50$ prize for 1st, 2nd and 3rd place respectively. You can find more information [here](https://github.com/arc53/DocsGPT/blob/main/lexeu-competition.md) @@ -16,14 +16,14 @@ You can find more information [here](https://github.com/arc53/DocsGPT/blob/main/ 🛠️ Code: This is the golden ticket! Make meaningful contributions through PRs. 🧩 API extension: Build an app utilising DocsGPT API. We prefer submissions that showcase original ideas and turn the API into an AI agent. -They can be a completely separate repo. +They can be a completely separate repos. For example: https://github.com/arc53/tg-bot-docsgpt-extenstion or https://github.com/arc53/DocsGPT-cli Non-Code Contributions: -📚 Wiki: Improve our documentation, Create a guide or change existing documentation. +📚 Wiki: Improve our documentation, create a guide or change existing documentation. 🖥️ Design: Improve the UI/UX or design a new feature. @@ -37,5 +37,5 @@ Non-Code Contributions: - Refer to the [Documentation](https://docs.docsgpt.cloud/). - Feel free to join our [Discord](https://discord.gg/n5BX8dh8rU) server. We're here to help newcomers, so don't hesitate to jump in! Join us [here](https://discord.gg/n5BX8dh8rU). -Thank you very much for considering contributing to DocsGPT during Hacktoberfest! 🙏 Your contributions (not just simple typo) could earn you a stylish new t-shirt and other prizes as a token of our appreciation. 🎁 Join us, and let's code together! 🚀 +Thank you very much for considering contributing to DocsGPT during Hacktoberfest! 🙏 Your contributions (not just simple typos) could earn you a stylish new t-shirt and other prizes as a token of our appreciation. 🎁 Join us, and let's code together! 🚀 diff --git a/README.md b/README.md index a88f2fc5..f1942dc1 100644 --- a/README.md +++ b/README.md @@ -25,8 +25,6 @@ Say goodbye to time-consuming manual searches, and let None: + """Init params.""" + super().__init__(*args, **kwargs) + self._concat_rows = concat_rows + self._col_joiner = col_joiner + self._row_joiner = row_joiner + self._pandas_config = pandas_config + + def _init_parser(self) -> Dict: + """Init parser.""" + return {} + + def parse_file(self, file: Path, errors: str = "ignore") -> Union[str, List[str]]: + """Parse file.""" + try: + import pandas as pd + except ImportError: + raise ValueError("pandas module is required to read Excel files.") + + df = pd.read_excel(file, **self._pandas_config) + + text_list = df.apply( + lambda row: (self._col_joiner).join(row.astype(str).tolist()), axis=1 + ).tolist() + + if self._concat_rows: + return (self._row_joiner).join(text_list) + else: + return text_list \ No newline at end of file diff --git a/application/parser/remote/github_loader.py b/application/parser/remote/github_loader.py index e69de29b..49f0ae9c 100644 --- a/application/parser/remote/github_loader.py +++ b/application/parser/remote/github_loader.py @@ -0,0 +1,53 @@ +import base64 +import requests +from typing import List +from application.parser.remote.base import BaseRemote +from langchain_core.documents import Document + +class GitHubLoader(BaseRemote): + def __init__(self): + self.access_token = None + self.headers = { + "Authorization": f"token {self.access_token}" + } if self.access_token else {} + return + + def fetch_file_content(self, repo_url: str, file_path: str) -> str: + url = f"https://api.github.com/repos/{repo_url}/contents/{file_path}" + response = requests.get(url, headers=self.headers) + + if response.status_code == 200: + content = response.json() + if content.get("encoding") == "base64": + try: + decoded_content = base64.b64decode(content["content"]).decode("utf-8") + return f"Filename: {file_path}\n\n{decoded_content}" + except Exception as e: + print(f"Error decoding content for {file_path}: {e}") + raise + else: + return f"Filename: {file_path}\n\n{content['content']}" + else: + response.raise_for_status() + + def fetch_repo_files(self, repo_url: str, path: str = "") -> List[str]: + url = f"https://api.github.com/repos/{repo_url}/contents/{path}" + response = requests.get(url, headers={**self.headers, "Accept": "application/vnd.github.v3.raw"}) + contents = response.json() + files = [] + for item in contents: + if item["type"] == "file": + files.append(item["path"]) + elif item["type"] == "dir": + files.extend(self.fetch_repo_files(repo_url, item["path"])) + return files + + def load_data(self, repo_url: str) -> List[Document]: + repo_name = repo_url.split("github.com/")[-1] + files = self.fetch_repo_files(repo_name) + documents = [] + for file_path in files: + content = self.fetch_file_content(repo_name, file_path) + documents.append(Document(page_content=content, metadata={"title": file_path, + "source": f"https://github.com/{repo_name}/blob/main/{file_path}"})) + return documents diff --git a/application/parser/remote/remote_creator.py b/application/parser/remote/remote_creator.py index d2a58f8d..026abd76 100644 --- a/application/parser/remote/remote_creator.py +++ b/application/parser/remote/remote_creator.py @@ -2,6 +2,7 @@ from application.parser.remote.sitemap_loader import SitemapLoader from application.parser.remote.crawler_loader import CrawlerLoader from application.parser.remote.web_loader import WebLoader from application.parser.remote.reddit_loader import RedditPostsLoaderRemote +from application.parser.remote.github_loader import GitHubLoader class RemoteCreator: @@ -10,6 +11,7 @@ class RemoteCreator: "sitemap": SitemapLoader, "crawler": CrawlerLoader, "reddit": RedditPostsLoaderRemote, + "github": GitHubLoader, } @classmethod diff --git a/application/requirements.txt b/application/requirements.txt index d7621cfd..6a57dd12 100644 --- a/application/requirements.txt +++ b/application/requirements.txt @@ -49,6 +49,7 @@ openapi3-parser==1.1.18 orjson==3.10.7 packaging==24.1 pandas==2.2.3 +openpyxl==3.1.5 pathable==0.4.3 pillow==10.4.0 portalocker==2.10.1 diff --git a/application/vectorstore/faiss.py b/application/vectorstore/faiss.py index e6c13bcd..afa55db9 100644 --- a/application/vectorstore/faiss.py +++ b/application/vectorstore/faiss.py @@ -22,7 +22,7 @@ class FaissStore(BaseVectorStore): else: self.docsearch = FAISS.load_local(self.path, embeddings, allow_dangerous_deserialization=True) except Exception: - raise # Just re-raise the exception without assigning to e + raise self.assert_embedding_dimensions(embeddings) diff --git a/application/worker.py b/application/worker.py index 2000523c..f8f38afa 100755 --- a/application/worker.py +++ b/application/worker.py @@ -131,10 +131,10 @@ def ingest_worker( logging.info(f"Ingest file: {full_path}", extra={"user": user, "job": name_job}) file_data = {"name": name_job, "file": filename, "user": user} - download_file(urljoin(settings.API_URL, "/api/download"), file_data, os.path.join(full_path, filename)) if not os.path.exists(full_path): os.makedirs(full_path) + download_file(urljoin(settings.API_URL, "/api/download"), file_data, os.path.join(full_path, filename)) # check if file is .zip and extract it if filename.endswith(".zip"): diff --git a/docs/README.md b/docs/README.md index 4b90b598..12ebbf08 100644 --- a/docs/README.md +++ b/docs/README.md @@ -46,6 +46,6 @@ yarn install yarn dev ``` -- Now, you should be able to view the docs on your local environment by visiting `http://localhost:5000`. You can explore the different markdown files and make changes as you see fit. +- Now, you should be able to view the docs on your local environment by visiting `http://localhost:3000`. You can explore the different markdown files and make changes as you see fit. - **Footnotes:** This guide assumes you have Node.js and npm installed. The guide involves running a local server using yarn, and viewing the documentation offline. If you encounter any issues, it may be worth verifying your Node.js and npm installations and whether you have installed yarn correctly. diff --git a/docs/pages/Guides/How-to-train-on-other-documentation.mdx b/docs/pages/Guides/How-to-train-on-other-documentation.mdx index e5429a04..f0149618 100644 --- a/docs/pages/Guides/How-to-train-on-other-documentation.mdx +++ b/docs/pages/Guides/How-to-train-on-other-documentation.mdx @@ -28,15 +28,15 @@ Navigate to the sidebar where you will find `Source Docs` option,here you will f ### Step 2 -Click on the `Upload icon` just beside the source docs options,now borwse and upload the document which you want to train on or select the `remote` option if you have to insert the link of the documentation. +Click on the `Upload icon` just beside the source docs options,now browse and upload the document which you want to train on or select the `remote` option if you have to insert the link of the documentation. ### Step 3 -Now you will be able to see the name of the file uploaded under the Uploaded Files ,now click on `Train`,once you click on train it might take some time to train on the document. You will be able to see the `Training progress` and once the training is completed you can click the `finish` button and there you go your docuemnt is uploaded. +Now you will be able to see the name of the file uploaded under the Uploaded Files ,now click on `Train`,once you click on train it might take some time to train on the document. You will be able to see the `Training progress` and once the training is completed you can click the `finish` button and there you go your document is uploaded. ### Step 4 -Go to `New chat` and from the side bar select the document you uploaded under the `Source Docs` and go ahead with your chat, now you can ask qestions regarding the document you uploaded and you will get the effective answer based on it. +Go to `New chat` and from the side bar select the document you uploaded under the `Source Docs` and go ahead with your chat, now you can ask questions regarding the document you uploaded and you will get the effective answer based on it. diff --git a/docs/pages/Guides/How-to-use-different-LLM.mdx b/docs/pages/Guides/How-to-use-different-LLM.mdx index 7df77742..c867fdcc 100644 --- a/docs/pages/Guides/How-to-use-different-LLM.mdx +++ b/docs/pages/Guides/How-to-use-different-LLM.mdx @@ -33,7 +33,7 @@ For open source you have to edit .env file with LLM_NAME with their desired LLM All the supported LLM providers are here application/llm and you can check what env variable are needed for each List of latest supported LLMs are https://github.com/arc53/DocsGPT/blob/main/application/llm/llm_creator.py ### Step 3 -Visit application/llm and select the file of your selected llm and there you will find the speicifc requirements needed to be filled in order to use it,i.e API key of that llm. +Visit application/llm and select the file of your selected llm and there you will find the specific requirements needed to be filled in order to use it,i.e API key of that llm. ### For OpenAI-Compatible Endpoints: diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 1a6e0ce3..4087e4f5 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -9,7 +9,6 @@ "version": "0.0.0", "dependencies": { "@reduxjs/toolkit": "^2.2.7", - "@vercel/analytics": "^1.3.1", "chart.js": "^4.4.4", "i18next": "^23.15.1", "i18next-browser-languagedetector": "^8.0.0", @@ -2089,26 +2088,6 @@ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" }, - "node_modules/@vercel/analytics": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@vercel/analytics/-/analytics-1.3.1.tgz", - "integrity": "sha512-xhSlYgAuJ6Q4WQGkzYTLmXwhYl39sWjoMA3nHxfkvG+WdBT25c563a7QhwwKivEOZtPJXifYHR1m2ihoisbWyA==", - "dependencies": { - "server-only": "^0.0.1" - }, - "peerDependencies": { - "next": ">= 13", - "react": "^18 || ^19" - }, - "peerDependenciesMeta": { - "next": { - "optional": true - }, - "react": { - "optional": true - } - } - }, "node_modules/@vitejs/plugin-react": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.3.1.tgz", @@ -8451,11 +8430,6 @@ "semver": "bin/semver.js" } }, - "node_modules/server-only": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/server-only/-/server-only-0.0.1.tgz", - "integrity": "sha512-qepMx2JxAa5jjfzxG79yPPq+8BuFToHd1hm7kI+Z4zAq1ftQiP7HcxMhDDItrbtwVeLg/cY2JnKnrcFkmiswNA==" - }, "node_modules/set-function-length": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", diff --git a/frontend/package.json b/frontend/package.json index 176c4fd9..83d531d6 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -20,7 +20,6 @@ }, "dependencies": { "@reduxjs/toolkit": "^2.2.7", - "@vercel/analytics": "^1.3.1", "chart.js": "^4.4.4", "i18next": "^23.15.1", "i18next-browser-languagedetector": "^8.0.0", diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 0537e695..1455f495 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -3,7 +3,6 @@ import Navigation from './Navigation'; import Conversation from './conversation/Conversation'; import About from './About'; import PageNotFound from './PageNotFound'; -import { inject } from '@vercel/analytics'; import { useMediaQuery } from './hooks'; import { useState } from 'react'; import Setting from './settings'; @@ -11,7 +10,6 @@ import './locale/i18n'; import { Outlet } from 'react-router-dom'; import { SharedConversation } from './conversation/SharedConversation'; import { useDarkTheme } from './hooks'; -inject(); function MainLayout() { const { isMobile } = useMediaQuery(); @@ -34,7 +32,10 @@ function MainLayout() { } export default function App() { - useDarkTheme(); + const [,,componentMounted] = useDarkTheme(); + if(!componentMounted) { + return
+ } return (
diff --git a/frontend/src/Hero.tsx b/frontend/src/Hero.tsx index 04da8769..644848dc 100644 --- a/frontend/src/Hero.tsx +++ b/frontend/src/Hero.tsx @@ -37,7 +37,7 @@ export default function Hero({

{t('sourceDocs')}

{ + if (isMobile) { + setNavOpen(!navOpen); + } + resetConversation(); + }} to="/settings" className={({ isActive }) => `my-auto mx-4 flex h-9 cursor-pointer gap-4 rounded-3xl hover:bg-gray-100 dark:hover:bg-[#28292E] ${ @@ -323,6 +361,12 @@ export default function Navigation({ navOpen, setNavOpen }: NavigationProps) {
{ + if (isMobile) { + setNavOpen(!navOpen); + } + resetConversation(); + }} to="/about" className={({ isActive }) => `my-auto mx-4 flex h-9 cursor-pointer gap-4 rounded-3xl hover:bg-gray-100 dark:hover:bg-[#28292E] ${ @@ -437,6 +481,7 @@ export default function Navigation({ navOpen, setNavOpen }: NavigationProps) { ); diff --git a/frontend/src/assets/file_upload.svg b/frontend/src/assets/file_upload.svg new file mode 100644 index 00000000..f48d8d81 --- /dev/null +++ b/frontend/src/assets/file_upload.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/frontend/src/assets/website_collect.svg b/frontend/src/assets/website_collect.svg new file mode 100644 index 00000000..b7aa60cf --- /dev/null +++ b/frontend/src/assets/website_collect.svg @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/frontend/src/components/RetryIcon.tsx b/frontend/src/components/RetryIcon.tsx index 27ed6028..8cecbd2f 100644 --- a/frontend/src/components/RetryIcon.tsx +++ b/frontend/src/components/RetryIcon.tsx @@ -4,10 +4,11 @@ const RetryIcon = (props: SVGProps) => ( diff --git a/frontend/src/components/SettingsBar.tsx b/frontend/src/components/SettingsBar.tsx new file mode 100644 index 00000000..f617c6e8 --- /dev/null +++ b/frontend/src/components/SettingsBar.tsx @@ -0,0 +1,96 @@ +import React, { useCallback, useRef, useState } from 'react'; +import ArrowLeft from '../assets/arrow-left.svg'; +import ArrowRight from '../assets/arrow-right.svg'; +import { useTranslation } from 'react-i18next'; + +type HiddenGradientType = 'left' | 'right' | undefined; + +const useTabs = () => { + const { t } = useTranslation(); + const tabs = [ + t('settings.general.label'), + t('settings.documents.label'), + t('settings.apiKeys.label'), + t('settings.analytics.label'), + t('settings.logs.label'), + ]; + return tabs; +}; + +interface SettingsBarProps { + setActiveTab: React.Dispatch>; + activeTab: string; +} + +const SettingsBar = ({ setActiveTab, activeTab }: SettingsBarProps) => { + const [hiddenGradient, setHiddenGradient] = + useState('left'); + const containerRef = useRef(null); + const tabs = useTabs(); + const scrollTabs = useCallback( + (direction: number) => { + if (containerRef.current) { + const container = containerRef.current; + container.scrollLeft += direction * 100; // Adjust the scroll amount as needed + if (container.scrollLeft === 0) { + setHiddenGradient('left'); + } else if ( + container.scrollLeft + container.offsetWidth === + container.scrollWidth + ) { + setHiddenGradient('right'); + } else { + setHiddenGradient(undefined); + } + } + }, + [containerRef.current], + ); + return ( +
+
+
+ +
+ +
+
+ {tabs.map((tab, index) => ( + + ))} +
+
+ +
+
+ ); +}; + +export default SettingsBar; diff --git a/frontend/src/components/SourceDropdown.tsx b/frontend/src/components/SourceDropdown.tsx index d5146da5..6a348161 100644 --- a/frontend/src/components/SourceDropdown.tsx +++ b/frontend/src/components/SourceDropdown.tsx @@ -11,6 +11,7 @@ type Props = { isDocsListOpen: boolean; setIsDocsListOpen: React.Dispatch>; handleDeleteClick: any; + handlePostDocumentSelect: any; }; function SourceDropdown({ @@ -20,6 +21,7 @@ function SourceDropdown({ setIsDocsListOpen, isDocsListOpen, handleDeleteClick, + handlePostDocumentSelect, // Callback function fired after a document is selected }: Props) { const dispatch = useDispatch(); const { t } = useTranslation(); @@ -85,6 +87,7 @@ function SourceDropdown({ onClick={() => { dispatch(setSelectedDocs(option)); setIsDocsListOpen(false); + handlePostDocumentSelect(option); }} > - + { + handlePostDocumentSelect(null); + }}> {t('none')}
diff --git a/frontend/src/conversation/Conversation.tsx b/frontend/src/conversation/Conversation.tsx index d5908ca3..fb819922 100644 --- a/frontend/src/conversation/Conversation.tsx +++ b/frontend/src/conversation/Conversation.tsx @@ -10,7 +10,7 @@ import SpinnerDark from '../assets/spinner-dark.svg'; import Spinner from '../assets/spinner.svg'; import RetryIcon from '../components/RetryIcon'; import Hero from '../Hero'; -import { useDarkTheme } from '../hooks'; +import { useDarkTheme, useMediaQuery } from '../hooks'; import { ShareConversationModal } from '../modals/ShareConversationModal'; import { selectConversationId } from '../preferences/preferenceSlice'; import { AppDispatch } from '../store'; @@ -39,6 +39,7 @@ export default function Conversation() { const [lastQueryReturnedErr, setLastQueryReturnedErr] = useState(false); const [isShareModalOpen, setShareModalState] = useState(false); const { t } = useTranslation(); + const { isMobile } = useMediaQuery(); const handleUserInterruption = () => { if (!eventInterrupt && status === 'loading') setEventInterrupt(true); @@ -54,10 +55,6 @@ export default function Conversation() { } }, []); - useEffect(() => { - fetchStream.current && fetchStream.current.abort(); - }, [conversationId]); - useEffect(() => { if (queries.length) { queries[queries.length - 1].error && setLastQueryReturnedErr(true); @@ -143,7 +140,7 @@ export default function Conversation() { } else if (query.error) { const retryBtn = ( ); responseView = ( diff --git a/frontend/src/conversation/ConversationBubble.tsx b/frontend/src/conversation/ConversationBubble.tsx index 543699ed..a3dec8be 100644 --- a/frontend/src/conversation/ConversationBubble.tsx +++ b/frontend/src/conversation/ConversationBubble.tsx @@ -8,7 +8,6 @@ import remarkMath from 'remark-math'; import rehypeKatex from 'rehype-katex'; import 'katex/dist/katex.min.css'; -import Alert from '../assets/alert.svg'; import DocsGPT3 from '../assets/cute_docsgpt3.svg'; import Dislike from '../assets/dislike.svg?react'; import Document from '../assets/document.svg'; @@ -59,7 +58,12 @@ const ConversationBubble = forwardRef< className={`flex flex-row-reverse self-end flex-wrap ${className}`} > -
+
{message}
@@ -233,14 +237,6 @@ const ConversationBubble = forwardRef< : 'flex-col rounded-3xl' }`} > - {type === 'ERROR' && ( - <> - alert -
- {retryBtn} -
- - )}
+ {type === 'ERROR' && ( +
+
{retryBtn}
+
+ )} {handleFeedback && ( <>
void; + onCoversationClick: () => void; //Callback to handle click on conversation tile regardless of selected or not onDeleteConversation: (arg1: string) => void; onSave: ({ name, id }: ConversationProps) => void; } @@ -29,6 +28,7 @@ interface ConversationTileProps { export default function ConversationTile({ conversation, selectConversation, + onCoversationClick, onDeleteConversation, onSave, }: ConversationTileProps) { @@ -90,20 +90,17 @@ export default function ConversationTile({ setIsHovered(false); }} onClick={() => { + onCoversationClick(); conversationId !== conversation.id && selectConversation(conversation.id); }} - className={`my-auto mx-4 mt-4 flex h-9 cursor-pointer items-center justify-between gap-4 rounded-3xl hover:bg-gray-100 dark:hover:bg-[#28292E] ${ + className={`my-auto mx-4 mt-4 flex h-9 cursor-pointer items-center justify-between pl-4 gap-4 rounded-3xl hover:bg-gray-100 dark:hover:bg-[#28292E] ${ conversationId === conversation.id || isOpen || isHovered ? 'bg-gray-100 dark:bg-[#28292E]' : '' }`} >
- {isEdit ? ( )} {isOpen && ( -
+