diff --git a/README.md b/README.md index ba042bd8..9d0a1170 100644 --- a/README.md +++ b/README.md @@ -19,10 +19,10 @@ ![X (formerly Twitter) URL](https://img.shields.io/twitter/follow/docsgptai)
- - [☁️ Cloud Version](https://app.docsgpt.cloud/) β€’ [πŸ’¬ Discord](https://discord.gg/n5BX8dh8rU) β€’ [πŸ“– Guides](https://docs.docsgpt.cloud/) + [⚑️ Quickstart](https://docs.docsgpt.cloud/quickstart) β€’ [☁️ Cloud Version](https://app.docsgpt.cloud/) β€’ [πŸ’¬ Discord](https://discord.gg/n5BX8dh8rU) +
+ [πŸ“– Documentation](https://docs.docsgpt.cloud/) β€’ [πŸ‘« Contribute](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md) β€’ [πŸ—ž Blog](https://blog.docsgpt.cloud/)
- [πŸ‘« Contribute](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md) β€’ [🏠 Self-host](https://docs.docsgpt.cloud/Guides/How-to-use-different-LLM) β€’ [⚑️ Quickstart](https://github.com/arc53/DocsGPT#quickstart)
@@ -35,6 +35,7 @@
  • πŸ—‚οΈ Wide Format Support: Reads PDF, DOCX, CSV, XLSX, EPUB, MD, RST, HTML, MDX, JSON, PPTX, and images.
  • 🌐 Web & Data Integration: Ingests from URLs, sitemaps, Reddit, GitHub and web crawlers.
  • βœ… Reliable Answers: Get accurate, hallucination-free responses with source citations viewable in a clean UI.
  • +
  • πŸ”‘ Streamlined API Keys: Generate keys linked to your settings, documents, and models, simplifying chatbot and integration setup.
  • πŸ”— Actionable Tooling: Connect to APIs, tools, and other services to enable LLM actions.
  • 🧩 Pre-built Integrations: Use readily available HTML/React chat widgets, search tools, Discord/Telegram bots, and more.
  • πŸ”Œ Flexible Deployment: Works with major LLMs (OpenAI, Google, Anthropic) and local models (Ollama, llama_cpp).
  • @@ -68,45 +69,39 @@ We're eager to provide personalized assistance when deploying your DocsGPT to a > [!Note] > Make sure you have [Docker](https://docs.docker.com/engine/install/) installed +A more detailed [Quickstart](https://docs.docsgpt.cloud/quickstart) is available in our documentation -1. Clone the repository and run the following command: - ```bash - git clone https://github.com/arc53/DocsGPT.git - cd DocsGPT - ``` +1. **Clone the repository:** -On Mac OS or Linux, write: - - -2. Run the following command: - ```bash - ./setup.sh - ``` - -It will install all the dependencies and allow you to download the local model, use OpenAI or use our LLM API. - -Otherwise, refer to this Guide for Windows: - -On windows: - -2. Create a `.env` file in your root directory and set the env variables. - It should look like this inside: - - ``` - LLM_NAME=[docsgpt or openai or others] - API_KEY=[if LLM_NAME is openai] + ```bash + git clone https://github.com/arc53/DocsGPT.git + cd DocsGPT ``` - See optional environment variables in the [/application/.env_sample](https://github.com/arc53/DocsGPT/blob/main/application/.env_sample) file. +**For macOS and Linux:** -3. Run the following command: +2. **Run the setup script:** - ```bash - docker compose -f deployment/docker-compose.yaml up --build - ``` -4. Navigate to http://localhost:5173/. + ```bash + ./setup.sh + ``` -To stop, just run `Ctrl + C`. +This interactive script will guide you through setting up DocsGPT. It offers four options: using the public API, running locally, connecting to a local inference engine, or using a cloud API provider. The script will automatically configure your `.env` file and handle necessary downloads and installations based on your chosen option. + +**For Windows:** + +2. **Follow the Docker Deployment Guide:** + + Please refer to the [Docker Deployment documentation](https://docs.docsgpt.cloud/Deploying/Docker-Deploying) for detailed step-by-step instructions on setting up DocsGPT using Docker. + +**Navigate to http://localhost:5173/** + +To stop DocsGPT, open a terminal in the `DocsGPT` directory and run: + +```bash +docker compose -f deployment/docker-compose.yaml down +``` +(or use the specific `docker compose down` command shown after running `setup.sh`). > [!Note] > For development environment setup instructions, please refer to the [Development Environment Guide](https://docs.docsgpt.cloud/Deploying/Development-Environment). diff --git a/deployment/optional/docker-compose.optional.ollama-cpu.yaml b/deployment/optional/docker-compose.optional.ollama-cpu.yaml new file mode 100644 index 00000000..d7127314 --- /dev/null +++ b/deployment/optional/docker-compose.optional.ollama-cpu.yaml @@ -0,0 +1,11 @@ +version: "3.8" +services: + ollama: + image: ollama/ollama + ports: + - "11434:11434" + volumes: + - ollama_data:/root/.ollama + +volumes: + ollama_data: \ No newline at end of file diff --git a/deployment/optional/docker-compose.optional.ollama-gpu.yaml b/deployment/optional/docker-compose.optional.ollama-gpu.yaml new file mode 100644 index 00000000..17d79100 --- /dev/null +++ b/deployment/optional/docker-compose.optional.ollama-gpu.yaml @@ -0,0 +1,16 @@ +version: "3.8" +services: + ollama: + image: ollama/ollama + ports: + - "11434:11434" + volumes: + - ollama_data:/root/.ollama + deploy: + resources: + reservations: + devices: + - capabilities: [gpu] + +volumes: + ollama_data: \ No newline at end of file diff --git a/docs/components/DeploymentCards.jsx b/docs/components/DeploymentCards.jsx new file mode 100644 index 00000000..1f91c171 --- /dev/null +++ b/docs/components/DeploymentCards.jsx @@ -0,0 +1,120 @@ +import Image from 'next/image'; + +const iconMap = { + 'Amazon Lightsail': '/lightsail.png', + 'Railway': '/railway.png', + 'Civo Compute Cloud': '/civo.png', + 'DigitalOcean Droplet': '/digitalocean.png', + 'Kamatera Cloud': '/kamatera.png', +}; + + +export function DeploymentCards({ items }) { + return ( + <> +
    + {items.map(({ title, link, description }) => { + const isExternal = link.startsWith('https://'); + const iconSrc = iconMap[title] || '/default-icon.png'; // Default icon if not found + + return ( +
    + +
    + {iconSrc &&
    {title}
    } {/* Reduced icon size */} +
    +

    {title}

    + {description &&

    {description}

    } +

    {new URL(link).hostname.replace('www.', '')}

    +
    +
    + ); + })} +
    + + + + ); +} \ No newline at end of file diff --git a/docs/pages/API/API-docs.md b/docs/pages/API/API-docs.md deleted file mode 100644 index a85ed6f8..00000000 --- a/docs/pages/API/API-docs.md +++ /dev/null @@ -1,350 +0,0 @@ -# API Endpoints Documentation - -*Currently, the application provides the following main API endpoints:* - - -### 1. /api/answer -**Description:** - -This endpoint is used to request answers to user-provided questions. - -**Request:** - -**Method**: `POST` - -**Headers**: Content-Type should be set to `application/json; charset=utf-8` - -**Request Body**: JSON object with the following fields: -* `question` β€” The user's question. -* `history` β€” (Optional) Previous conversation history. -* `api_key`β€” Your API key. -* `embeddings_key` β€” Your embeddings key. -* `active_docs` β€” The location of active documentation. - -Here is a JavaScript Fetch Request example: -```js -// answer (POST http://127.0.0.1:5000/api/answer) -fetch("http://127.0.0.1:5000/api/answer", { - "method": "POST", - "headers": { - "Content-Type": "application/json; charset=utf-8" - }, - "body": JSON.stringify({"question":"Hi","history":null,"api_key":"OPENAI_API_KEY","embeddings_key":"OPENAI_API_KEY", - "active_docs": "javascript/.project/ES2015/openai_text-embedding-ada-002/"}) -}) -.then((res) => res.text()) -.then(console.log.bind(console)) -``` - -**Response** - -In response, you will get a JSON document containing the `answer`, `query` and `result`: -```json -{ - "answer": "Hi there! How can I help you?\n", - "query": "Hi", - "result": "Hi there! How can I help you?\nSOURCES:" -} -``` - -### 2. /api/docs_check - -**Description:** - -This endpoint will make sure documentation is loaded on the server (just run it every time user is switching between libraries (documentations)). - -**Request:** - -**Method**: `POST` - -**Headers**: Content-Type should be set to `application/json; charset=utf-8` - -**Request Body**: JSON object with the field: -* `docs` β€” The location of the documentation: -```js -// docs_check (POST http://127.0.0.1:5000/api/docs_check) -fetch("http://127.0.0.1:5000/api/docs_check", { - "method": "POST", - "headers": { - "Content-Type": "application/json; charset=utf-8" - }, - "body": JSON.stringify({"docs":"javascript/.project/ES2015/openai_text-embedding-ada-002/"}) -}) -.then((res) => res.text()) -.then(console.log.bind(console)) -``` - -**Response:** - -In response, you will get a JSON document like this one indicating whether the documentation exists or not: -```json -{ - "status": "exists" -} -``` - - -### 3. /api/combine -**Description:** - -This endpoint provides information about available vectors and their locations with a simple GET request. - -**Request:** - -**Method**: `GET` - -**Response:** - -Response will include: -* `date` -* `description` -* `docLink` -* `fullName` -* `language` -* `location` (local or docshub) -* `model` -* `name` -* `version` - -Example of JSON in Docshub and local: - -image - -### 4. /api/upload -**Description:** - -This endpoint is used to upload a file that needs to be trained, response is JSON with task ID, which can be used to check on task's progress. - -**Request:** - -**Method**: `POST` - -**Request Body**: A multipart/form-data form with file upload and additional fields, including `user` and `name`. - -HTML example: - -```html -
    - - - - - -
    -``` - -**Response:** - -JSON response with a status and a task ID that can be used to check the task's progress. - - -### 5. /api/task_status -**Description:** - -This endpoint is used to get the status of a task (`task_id`) from `/api/upload` - -**Request:** - -**Method**: `GET` - -**Query Parameter**: `task_id` (task ID to check) - -**Sample JavaScript Fetch Request:** -```js -// Task status (Get http://127.0.0.1:5000/api/task_status) -fetch("http://localhost:5001/api/task_status?task_id=YOUR_TASK_ID", { - "method": "GET", - "headers": { - "Content-Type": "application/json; charset=utf-8" - }, -}) -.then((res) => res.text()) -.then(console.log.bind(console)) -``` - -**Response:** - -There are two types of responses: - -1. While the task is still running, the 'current' value will show progress from 0 to 100. - ```json - { - "result": { - "current": 1 - }, - "status": "PROGRESS" - } - ``` - -2. When task is completed: - ```json - { - "result": { - "directory": "temp", - "filename": "install.rst", - "formats": [ - ".rst", - ".md", - ".pdf" - ], - "name_job": "somename", - "user": "local" - }, - "status": "SUCCESS" - } - ``` - -### 6. /api/delete_old -**Description:** - -This endpoint is used to delete old Vector Stores. - -**Request:** - -**Method**: `GET` - -**Query Parameter**: `task_id` - -**Sample JavaScript Fetch Request:** -```js -// delete_old (GET http://127.0.0.1:5000/api/delete_old) -fetch("http://localhost:5001/api/delete_old?task_id=YOUR_TASK_ID", { - "method": "GET", - "headers": { - "Content-Type": "application/json; charset=utf-8" - }, -}) -.then((res) => res.text()) -.then(console.log.bind(console)) - -``` -**Response:** - -JSON response indicating the status of the operation: - -```json -{ "status": "ok" } -``` - -### 7. /api/get_api_keys -**Description:** - -The endpoint retrieves a list of API keys for the user. - -**Request:** - -**Method**: `GET` - -**Sample JavaScript Fetch Request:** -```js -// get_api_keys (GET http://127.0.0.1:5000/api/get_api_keys) -fetch("http://localhost:5001/api/get_api_keys", { - "method": "GET", - "headers": { - "Content-Type": "application/json; charset=utf-8" - }, -}) -.then((res) => res.text()) -.then(console.log.bind(console)) - -``` -**Response:** - -JSON response with a list of created API keys: - -```json -[ - { - "id": "string", - "name": "string", - "key": "string", - "source": "string" - }, - ... - ] -``` - -### 8. /api/create_api_key - -**Description:** - -Create a new API key for the user. - -**Request:** - -**Method**: `POST` - -**Headers**: Content-Type should be set to `application/json; charset=utf-8` - -**Request Body**: JSON object with the following fields: -* `name` β€” A name for the API key. -* `source` β€” The source documents that will be used. -* `prompt_id` β€” The prompt ID. -* `chunks` β€” The number of chunks used to process an answer. - -Here is a JavaScript Fetch Request example: -```js -// create_api_key (POST http://127.0.0.1:5000/api/create_api_key) -fetch("http://127.0.0.1:5000/api/create_api_key", { - "method": "POST", - "headers": { - "Content-Type": "application/json; charset=utf-8" - }, - "body": JSON.stringify({"name":"Example Key Name", - "source":"Example Source", - "prompt_id":"creative", - "chunks":"2"}) -}) -.then((res) => res.json()) -.then(console.log.bind(console)) -``` - -**Response** - -In response, you will get a JSON document containing the `id` and `key`: -```json -{ - "id": "string", - "key": "string" -} -``` - -### 9. /api/delete_api_key - -**Description:** - -Delete an API key for the user. - -**Request:** - -**Method**: `POST` - -**Headers**: Content-Type should be set to `application/json; charset=utf-8` - -**Request Body**: JSON object with the field: -* `id` β€” The unique identifier of the API key to be deleted. - -Here is a JavaScript Fetch Request example: -```js -// delete_api_key (POST http://127.0.0.1:5000/api/delete_api_key) -fetch("http://127.0.0.1:5000/api/delete_api_key", { - "method": "POST", - "headers": { - "Content-Type": "application/json; charset=utf-8" - }, - "body": JSON.stringify({"id":"API_KEY_ID"}) -}) -.then((res) => res.json()) -.then(console.log.bind(console)) -``` - -**Response:** - -In response, you will get a JSON document indicating the status of the operation: -```json -{ - "status": "ok" -} -``` \ No newline at end of file diff --git a/docs/pages/API/_meta.json b/docs/pages/API/_meta.json deleted file mode 100644 index 4873d38c..00000000 --- a/docs/pages/API/_meta.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "API-docs": { - "title": "πŸ—‚οΈοΈ API-docs", - "href": "/API/API-docs" - }, - "api-key-guide": { - "title": "πŸ” API Keys guide", - "href": "/API/api-key-guide" - } -} \ No newline at end of file diff --git a/docs/pages/Deploying/Hosting-the-app.md b/docs/pages/Deploying/Amazon-Lightsail.mdx similarity index 89% rename from docs/pages/Deploying/Hosting-the-app.md rename to docs/pages/Deploying/Amazon-Lightsail.mdx index 5f527aa1..24aef74b 100644 --- a/docs/pages/Deploying/Hosting-the-app.md +++ b/docs/pages/Deploying/Amazon-Lightsail.mdx @@ -1,3 +1,9 @@ +--- +title: Hosting DocsGPT on Amazon Lightsail +description: +display: hidden +--- + # Self-hosting DocsGPT on Amazon Lightsail Here's a step-by-step guide on how to set up an Amazon Lightsail instance to host DocsGPT. @@ -101,10 +107,4 @@ Repeat the process for port `7091`. #### Access your instance -Your instance is now available at your Public IP Address on port 5173. Enjoy using DocsGPT! - -## Other Deployment Options - -- [Deploy DocsGPT on Civo Compute Cloud](https://dev.to/rutamhere/deploying-docsgpt-on-civo-compute-c) -- [Deploy DocsGPT on DigitalOcean Droplet](https://dev.to/rutamhere/deploying-docsgpt-on-digitalocean-droplet-50ea) -- [Deploy DocsGPT on Kamatera Performance Cloud](https://dev.to/rutamhere/deploying-docsgpt-on-kamatera-performance-cloud-1bj) +Your instance is now available at your Public IP Address on port 5173. Enjoy using DocsGPT! \ No newline at end of file diff --git a/docs/pages/Deploying/Development-Environment.md b/docs/pages/Deploying/Development-Environment.md deleted file mode 100644 index 6a7b010c..00000000 --- a/docs/pages/Deploying/Development-Environment.md +++ /dev/null @@ -1,78 +0,0 @@ -## Development Environments - -### Spin up Mongo and Redis - -For development, only two containers are used from [docker-compose.yaml](https://github.com/arc53/DocsGPT/blob/main/deployment/docker-compose.yaml) (by deleting all services except for Redis and Mongo). -See file [docker-compose-dev.yaml](https://github.com/arc53/DocsGPT/blob/main/deployment/docker-compose-dev.yaml). - -Run - -``` -docker compose -f deployment/docker-compose-dev.yaml build -docker compose -f deployment/docker-compose-dev.yaml up -d -``` - -### Run the Backend - -> [!Note] -> Make sure you have Python 3.12 installed. - -1. Export required environment variables or prepare a `.env` file in the project folder: - - Copy [.env-template](https://github.com/arc53/DocsGPT/blob/main/application/.env-template) and create `.env`. - -(check out [`application/core/settings.py`](application/core/settings.py) if you want to see more config options.) - -2. (optional) Create a Python virtual environment: - You can follow the [Python official documentation](https://docs.python.org/3/tutorial/venv.html) for virtual environments. - -a) On Mac OS and Linux - -```commandline -python -m venv venv -. venv/bin/activate -``` - -b) On Windows - -```commandline -python -m venv venv - venv/Scripts/activate -``` - -3. Download embedding model and save it in the `model/` folder: -You can use the script below, or download it manually from [here](https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip), unzip it and save it in the `model/` folder. - -```commandline -wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip -unzip mpnet-base-v2.zip -d model -rm mpnet-base-v2.zip -``` - -4. Install dependencies for the backend: - -```commandline -pip install -r application/requirements.txt -``` - -5. Run the app using `flask --app application/app.py run --host=0.0.0.0 --port=7091`. -6. Start worker with `celery -A application.app.celery worker -l INFO`. - -> [!Note] -> You can also launch the in a debugger mode in vscode by accessing SHIFT + CMD + D or SHIFT + Windows + D on windows and selecting Flask or Celery. - - -### Start Frontend - -> [!Note] -> Make sure you have Node version 16 or higher. - -1. Navigate to the [/frontend](https://github.com/arc53/DocsGPT/tree/main/frontend) folder. -2. Install the required packages `husky` and `vite` (ignore if already installed). - -```commandline -npm install husky -g -npm install vite -g -``` - -3. Install dependencies by running `npm install --include=dev`. -4. Run the app using `npm run dev`. \ No newline at end of file diff --git a/docs/pages/Deploying/Development-Environment.mdx b/docs/pages/Deploying/Development-Environment.mdx new file mode 100644 index 00000000..2852be19 --- /dev/null +++ b/docs/pages/Deploying/Development-Environment.mdx @@ -0,0 +1,163 @@ +--- +title: Setting Up a Development Environment +description: Guide to setting up a development environment for DocsGPT, including backend and frontend setup. +--- + +# Setting Up a Development Environment + +This guide will walk you through setting up a development environment for DocsGPT. This setup allows you to modify and test the application's backend and frontend components. + +## 1. Spin Up MongoDB and Redis + +For development purposes, you can quickly start MongoDB and Redis containers, which are the primary database and caching systems used by DocsGPT. We provide a dedicated Docker Compose file, `docker-compose-dev.yaml`, located in the `deployment` directory, that includes only these essential services. + +You can find the `docker-compose-dev.yaml` file [here](https://github.com/arc53/DocsGPT/blob/main/deployment/docker-compose-dev.yaml). + +**Steps to start MongoDB and Redis:** + +1. Navigate to the root directory of your DocsGPT repository in your terminal. + +2. Run the following commands to build and start the containers defined in `docker-compose-dev.yaml`: + + ```bash + docker compose -f deployment/docker-compose-dev.yaml build + docker compose -f deployment/docker-compose-dev.yaml up -d + ``` + + These commands will start MongoDB and Redis in detached mode, running in the background. + +## 2. Run the Backend + +To run the DocsGPT backend locally, you'll need to set up a Python environment and install the necessary dependencies. + +**Prerequisites:** + +* **Python 3.12:** Ensure you have Python 3.12 installed on your system. You can check your Python version by running `python --version` or `python3 --version` in your terminal. + +**Steps to run the backend:** + +1. **Configure Environment Variables:** + + DocsGPT backend settings are configured using environment variables. You can set these either in a `.env` file or directly in the `settings.py` file. For a comprehensive overview of all settings, please refer to the [DocsGPT Settings Guide](/Deploying/DocsGPT-Settings). + + * **Option 1: Using a `.env` file (Recommended):** + * If you haven't already, create a file named `.env` in the **root directory** of your DocsGPT project. + * Modify the `.env` file to adjust settings as needed. You can find a comprehensive list of configurable options in [`application/core/settings.py`](application/core/settings.py). + + * **Option 2: Exporting Environment Variables:** + * Alternatively, you can export environment variables directly in your terminal. However, using a `.env` file is generally more organized for development. + +2. **Create a Python Virtual Environment (Optional but Recommended):** + + Using a virtual environment isolates project dependencies and avoids conflicts with system-wide Python packages. + + * **macOS and Linux:** + + ```bash + python -m venv venv + . venv/bin/activate + ``` + + * **Windows:** + + ```bash + python -m venv venv + venv/Scripts/activate + ``` + +3. **Download Embedding Model:** + + The backend requires an embedding model. Download the `mpnet-base-v2` model and place it in the `model/` directory within the project root. You can use the following script: + + ```bash + wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip + unzip mpnet-base-v2.zip -d model + rm mpnet-base-v2.zip + ``` + + Alternatively, you can manually download the zip file from [here](https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip), unzip it, and place the extracted folder in `model/`. + +4. **Install Backend Dependencies:** + + Navigate to the root of your DocsGPT repository and install the required Python packages: + + ```bash + pip install -r application/requirements.txt + ``` + +5. **Run the Flask App:** + + Start the Flask backend application: + + ```bash + flask --app application/app.py run --host=0.0.0.0 --port=7091 + ``` + + This command will launch the backend server, making it accessible on `http://localhost:7091`. + +6. **Start the Celery Worker:** + + Open a new terminal window (and activate your virtual environment if you used one). Start the Celery worker to handle background tasks: + + ```bash + celery -A application.app.celery worker -l INFO + ``` + + This command will start the Celery worker, which processes tasks such as document parsing and vector embedding. + +**Running in Debugger (VSCode):** + +For easier debugging, you can launch the Flask app and Celery worker directly from VSCode's debugger. + +* Press Shift + Cmd + D (macOS) or Shift + Windows + D (Windows) to open the Run and Debug view. +* You should see configurations named "Flask" and "Celery". Select the desired configuration and click the "Start Debugging" button (green play icon). + +## 3. Start the Frontend + +To run the DocsGPT frontend locally, you'll need Node.js and npm (Node Package Manager). + +**Prerequisites:** + +* **Node.js version 16 or higher:** Ensure you have Node.js version 16 or greater installed. You can check your Node.js version by running `node -v` in your terminal. npm is usually bundled with Node.js. + +**Steps to start the frontend:** + +1. **Navigate to the Frontend Directory:** + + In your terminal, change the current directory to the `frontend` folder within your DocsGPT repository: + + ```bash + cd frontend + ``` + +2. **Install Global Packages (If Needed):** + + If you don't have `husky` and `vite` installed globally, you can install them: + + ```bash + npm install husky -g + npm install vite -g + ``` + You can skip this step if you already have these packages installed or prefer to use local installations (though global installation simplifies running the commands in this guide). + +3. **Install Frontend Dependencies:** + + Install the project's frontend dependencies using npm: + + ```bash + npm install --include=dev + ``` + + This command reads the `package.json` file in the `frontend` directory and installs all listed dependencies, including development dependencies. + +4. **Run the Frontend App:** + + Start the frontend development server: + + ```bash + npm run dev + ``` + + This command will start the Vite development server. The frontend application will typically be accessible at [http://localhost:5173/](http://localhost:5173/). The terminal will display the exact URL where the frontend is running. + +With both the backend and frontend running, you should now have a fully functional DocsGPT development environment. You can access the application in your browser at [http://localhost:5173/](http://localhost:5173/) and start developing! \ No newline at end of file diff --git a/docs/pages/Deploying/Docker-Deploying.mdx b/docs/pages/Deploying/Docker-Deploying.mdx new file mode 100644 index 00000000..559fa4e3 --- /dev/null +++ b/docs/pages/Deploying/Docker-Deploying.mdx @@ -0,0 +1,135 @@ +--- +title: Docker Deployment of DocsGPT +description: Deploy DocsGPT using Docker and Docker Compose for easy setup and management. +--- + +# Docker Deployment of DocsGPT + +Docker is the recommended method for deploying DocsGPT, providing a consistent and isolated environment for the application to run. This guide will walk you through deploying DocsGPT using Docker and Docker Compose. + +## Prerequisites + +* **Docker Engine:** You need to have Docker Engine installed on your system. + * **macOS:** [Docker Desktop for Mac](https://docs.docker.com/desktop/install/mac-install/) + * **Linux:** [Docker Engine Installation Guide](https://docs.docker.com/engine/install/) (follow instructions for your specific distribution) + * **Windows:** [Docker Desktop for Windows](https://docs.docker.com/desktop/install/windows-install/) (requires WSL 2 backend, see notes below) +* **Docker Compose:** Docker Compose is usually included with Docker Desktop. If you are using Docker Engine separately, ensure you have Docker Compose V2 installed. + +**Important Note for Windows Users:** Docker Desktop on Windows generally requires the WSL 2 backend to function correctly, especially when using features like host networking which are utilized in DocsGPT's Docker Compose setup. Ensure WSL 2 is enabled and configured in Docker Desktop settings. + +## Quickest Setup: Using DocsGPT Public API + +The fastest way to try out DocsGPT is by using the public API endpoint. This requires minimal configuration and no local LLM setup. + +1. **Clone the DocsGPT Repository (if you haven't already):** + + ```bash + git clone https://github.com/arc53/DocsGPT.git + cd DocsGPT + ``` + +2. **Create a `.env` file:** + + In the root directory of your DocsGPT repository, create a file named `.env`. + +3. **Add Public API Configuration to `.env`:** + + Open the `.env` file and add the following lines: + + ``` + LLM_NAME=docsgpt + VITE_API_STREAMING=true + ``` + + This minimal configuration tells DocsGPT to use the public API. For more advanced settings and other LLM options, refer to the [DocsGPT Settings Guide](/Deploying/DocsGPT-Settings). + +4. **Launch DocsGPT with Docker Compose:** + + Navigate to the root directory of the DocsGPT repository in your terminal and run: + + ```bash + docker compose -f deployment/docker-compose.yaml up -d + ``` + + The `-d` flag runs Docker Compose in detached mode (in the background). + +5. **Access DocsGPT in your browser:** + + Once the containers are running, open your web browser and go to [http://localhost:5173/](http://localhost:5173/). + +6. **Stopping DocsGPT:** + + To stop the application, navigate to the same directory in your terminal and run: + + ```bash + docker compose -f deployment/docker-compose.yaml down + ``` + +## Optional Ollama Setup (Local Models) + +DocsGPT provides optional Docker Compose files to easily integrate with [Ollama](https://ollama.com/) for running local models. These files add an official Ollama container to your Docker Compose setup. These files are located in the `deployment/optional/` directory. + +There are two Ollama optional files: + +* **`docker-compose.optional.ollama-cpu.yaml`**: For running Ollama on CPU. +* **`docker-compose.optional.ollama-gpu.yaml`**: For running Ollama on GPU (requires Docker to be configured for GPU usage). + +### Launching with Ollama and Pulling a Model + +1. **Clone the DocsGPT Repository and Create `.env` (as described above).** + +2. **Launch DocsGPT with Ollama Docker Compose:** + + Choose the appropriate Ollama Compose file (CPU or GPU) and launch DocsGPT: + + **CPU:** + ```bash + docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-cpu.yaml up -d + ``` + **GPU:** + ```bash + docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-gpu.yaml up -d + ``` + +3. **Pull the Ollama Model:** + + **Crucially, after launching with Ollama, you need to pull the desired model into the Ollama container.** Find the `MODEL_NAME` you configured in your `.env` file (e.g., `llama3.2:1b`). Then execute the following command to pull the model *inside* the running Ollama container: + + ```bash + docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-cpu.yaml exec -it ollama ollama pull + ``` + or (for GPU): + ```bash + docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-gpu.yaml exec -it ollama ollama pull + ``` + Replace `` with the actual model name from your `.env` file. + +4. **Access DocsGPT in your browser:** + + Once the model is pulled and containers are running, open your web browser and go to [http://localhost:5173/](http://localhost:5173/). + +5. **Stopping Ollama Setup:** + + To stop a DocsGPT setup launched with Ollama optional files, use `docker compose down` and include all the compose files used during the `up` command: + + ```bash + docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-cpu.yaml down + ``` + or + + ```bash + docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-gpu.yaml down + ``` + +**Important for GPU Usage:** + +* **NVIDIA Container Toolkit (for NVIDIA GPUs):** If you are using NVIDIA GPUs, you need to have the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) installed and configured on your system for Docker to access your GPU. +* **Docker GPU Configuration:** Ensure Docker is configured to utilize your GPU. Refer to the [Ollama Docker Hub page](https://hub.docker.com/r/ollama/ollama) and Docker documentation for GPU setup instructions specific to your GPU type (NVIDIA, AMD, Intel). + +## Restarting After Configuration Changes + +Whenever you modify the `.env` file or any Docker Compose files, you need to restart the Docker containers for the changes to be applied. Use the same `docker compose down` and `docker compose up -d` commands you used to launch DocsGPT, ensuring you include all relevant `-f` flags for optional files if you are using them. + +## Further Configuration + +This guide covers the basic Docker deployment of DocsGPT. For detailed information on configuring various aspects of DocsGPT, such as LLM providers, models, vector stores, and more, please refer to the comprehensive [DocsGPT Settings Guide](/Deploying/DocsGPT-Settings). \ No newline at end of file diff --git a/docs/pages/Deploying/DocsGPT-Settings.mdx b/docs/pages/Deploying/DocsGPT-Settings.mdx new file mode 100644 index 00000000..ce1e46ba --- /dev/null +++ b/docs/pages/Deploying/DocsGPT-Settings.mdx @@ -0,0 +1,107 @@ +--- +title: DocsGPT Settings +description: Configure your DocsGPT application by understanding the basic settings. +--- + +# DocsGPT Settings + +DocsGPT is highly configurable, allowing you to tailor it to your specific needs and preferences. You can control various aspects of the application, from choosing the Large Language Model (LLM) provider to selecting embedding models and vector stores. + +This document will guide you through the basic settings you can configure in DocsGPT. These settings determine how DocsGPT interacts with LLMs and processes your data. + +## Configuration Methods + +There are two primary ways to configure DocsGPT settings: + +### 1. Configuration via `.env` file (Recommended) + +The easiest and recommended way to configure basic settings is by using a `.env` file. This file should be located in the **root directory** of your DocsGPT project (the same directory where `setup.sh` is located). + +**Example `.env` file structure:** + +``` +LLM_NAME=openai +API_KEY=YOUR_OPENAI_API_KEY +MODEL_NAME=gpt-4o +``` + +### 2. Configuration via `settings.py` file (Advanced) + +For more advanced configurations or if you prefer to manage settings directly in code, you can modify the `settings.py` file. This file is located in the `application/core` directory of your DocsGPT project. + +While modifying `settings.py` offers more flexibility, it's generally recommended to use the `.env` file for basic settings and reserve `settings.py` for more complex adjustments or when you need to configure settings programmatically. + +**Location of `settings.py`:** `application/core/settings.py` + +## Basic Settings Explained + +Here are some of the most fundamental settings you'll likely want to configure: + +- **`LLM_NAME`**: This setting determines which Large Language Model (LLM) provider DocsGPT will use. It tells DocsGPT which API to interact with. + + - **Common values:** + - `docsgpt`: Use the DocsGPT Public API Endpoint (simple and free, as offered in `setup.sh` option 1). + - `openai`: Use OpenAI's API (requires an API key). + - `google`: Use Google's Vertex AI or Gemini models. + - `anthropic`: Use Anthropic's Claude models. + - `groq`: Use Groq's models. + - `huggingface`: Use HuggingFace Inference API. + - `azure_openai`: Use Azure OpenAI Service. + - `openai` (when using local inference engines like Ollama, Llama.cpp, TGI, etc.): This signals DocsGPT to use an OpenAI-compatible API format, even if the actual LLM is running locally. + +- **`MODEL_NAME`**: Specifies the specific model to use from the chosen LLM provider. The available models depend on the `LLM_NAME` you've selected. + + - **Examples:** + - For `LLM_NAME=openai`: `gpt-4o` + - For `LLM_NAME=google`: `gemini-2.0-flash` + - For local models (e.g., Ollama): `llama3.2:1b` (or any model name available in your setup). + +- **`EMBEDDINGS_NAME`**: This setting defines which embedding model DocsGPT will use to generate vector embeddings for your documents. Embeddings are numerical representations of text that allow DocsGPT to understand the semantic meaning of your documents for efficient search and retrieval. + + - **Default value:** `huggingface_sentence-transformers/all-mpnet-base-v2` (a good general-purpose embedding model). + - **Other options:** You can explore other embedding models from Hugging Face Sentence Transformers or other providers if needed. + +- **`API_KEY`**: Required for most cloud-based LLM providers. This is your authentication key to access the LLM provider's API. You'll need to obtain this key from your chosen provider's platform. + +- **`OPENAI_BASE_URL`**: Specifically used when `LLM_NAME` is set to `openai` but you are connecting to a local inference engine (like Ollama, Llama.cpp, etc.) that exposes an OpenAI-compatible API. This setting tells DocsGPT where to find your local LLM server. + +## Configuration Examples + +Let's look at some concrete examples of how to configure these settings in your `.env` file. + +### Example for Cloud API Provider (OpenAI) + +To use OpenAI's `gpt-4o` model, you would configure your `.env` file like this: + +``` +LLM_NAME=openai +API_KEY=YOUR_OPENAI_API_KEY # Replace with your actual OpenAI API key +MODEL_NAME=gpt-4o +``` + +Make sure to replace `YOUR_OPENAI_API_KEY` with your actual OpenAI API key. + +### Example for Local Deployment + +To use a local Ollama server with the `llama3.2:1b` model, you would configure your `.env` file like this: + +``` +LLM_NAME=openai # Using OpenAI compatible API format for local models +API_KEY=None # API Key is not needed for local Ollama +MODEL_NAME=llama3.2:1b +OPENAI_BASE_URL=http://host.docker.internal:11434/v1 # Default Ollama API URL within Docker +EMBEDDINGS_NAME=huggingface_sentence-transformers/all-mpnet-base-v2 # You can also run embeddings locally if needed +``` + +In this case, even though you are using Ollama locally, `LLM_NAME` is set to `openai` because Ollama (and many other local inference engines) are designed to be API-compatible with OpenAI. `OPENAI_BASE_URL` points DocsGPT to the local Ollama server. + +## Exploring More Settings + +These are just the basic settings to get you started. The `settings.py` file contains many more advanced options that you can explore to further customize DocsGPT, such as: + +- Vector store configuration (`VECTOR_STORE`, Qdrant, Milvus, LanceDB settings) +- Retriever settings (`RETRIEVERS_ENABLED`) +- Cache settings (`CACHE_REDIS_URL`) +- And many more! + +For a complete list of available settings and their descriptions, refer to the `settings.py` file in `application/core`. Remember to restart your Docker containers after making changes to your `.env` file or `settings.py` for the changes to take effect. \ No newline at end of file diff --git a/docs/pages/Deploying/Hosting-the-app.mdx b/docs/pages/Deploying/Hosting-the-app.mdx new file mode 100644 index 00000000..4fb72ddd --- /dev/null +++ b/docs/pages/Deploying/Hosting-the-app.mdx @@ -0,0 +1,33 @@ +import { DeploymentCards } from '../../components/DeploymentCards'; + +# Deployment Guides + + diff --git a/docs/pages/Deploying/Kubernetes-Deploying.md b/docs/pages/Deploying/Kubernetes-Deploying.mdx similarity index 92% rename from docs/pages/Deploying/Kubernetes-Deploying.md rename to docs/pages/Deploying/Kubernetes-Deploying.mdx index 7e8768cf..8f1c8f7a 100644 --- a/docs/pages/Deploying/Kubernetes-Deploying.md +++ b/docs/pages/Deploying/Kubernetes-Deploying.mdx @@ -1,4 +1,10 @@ -# Self-hosting DocsGPT on Kubernetes +--- +title: Deploying DocsGPT on Kubernetes +description: Learn how to self-host DocsGPT on a Kubernetes cluster for scalable and robust deployments. +--- + +# Self-hosting DocsGPT + on Kubernetes This guide will walk you through deploying DocsGPT on Kubernetes. diff --git a/docs/pages/Deploying/Quickstart.md b/docs/pages/Deploying/Quickstart.md deleted file mode 100644 index cebf6bd7..00000000 --- a/docs/pages/Deploying/Quickstart.md +++ /dev/null @@ -1,64 +0,0 @@ -## Launching Web App -**Note**: Make sure you have Docker installed - -**On macOS or Linux:** -Just run the following command: - -```bash -./setup.sh -``` - -This command will install all the necessary dependencies and provide you with an option to use our LLM API, download the local model or use OpenAI. - -If you prefer to follow manual steps, refer to this guide: - -1. Open and download this repository with - ```bash - git clone https://github.com/arc53/DocsGPT.git - cd DocsGPT - ``` -2. Create a `.env` file in your root directory and set the env variables. - It should look like this inside: - - ``` - LLM_NAME=[docsgpt or openai or others] - API_KEY=[if LLM_NAME is openai] - ``` - - See optional environment variables in the [/application/.env_sample](https://github.com/arc53/DocsGPT/blob/main/application/.env_sample) file. - -3. Run the following commands: - ```bash - docker compose -f deployment/docker-compose.yaml up - ``` -4. Navigate to http://localhost:5173/. - -To stop, simply press **Ctrl + C**. - -**For WINDOWS:** - -1. Open and download this repository with - ```bash - git clone https://github.com/arc53/DocsGPT.git - cd DocsGPT - ``` - -2. Create a `.env` file in your root directory and set the env variables. - It should look like this inside: - - ``` - LLM_NAME=[docsgpt or openai or others] - API_KEY=[if LLM_NAME is openai] - ``` - - See optional environment variables in the [/application/.env_sample](https://github.com/arc53/DocsGPT/blob/main/application/.env_sample) file. - -3. Run the following command: - - ```bash - docker compose -f deployment/docker-compose.yaml up - ``` -4. Navigate to http://localhost:5173/. -5. To stop the setup, just press **Ctrl + C** in the WSL terminal - -**Important:** Ensure that Docker is installed and properly configured on your Windows system for these steps to work. diff --git a/docs/pages/Deploying/Railway-Deploying.md b/docs/pages/Deploying/Railway.mdx similarity index 96% rename from docs/pages/Deploying/Railway-Deploying.md rename to docs/pages/Deploying/Railway.mdx index 770da2f4..89928ab2 100644 --- a/docs/pages/Deploying/Railway-Deploying.md +++ b/docs/pages/Deploying/Railway.mdx @@ -1,3 +1,7 @@ +--- +title: Hosting DocsGPT on Railway +description: Learn how to deploy your own DocsGPT instance on Railway with this step-by-step tutorial +--- # Self-hosting DocsGPT on Railway diff --git a/docs/pages/Deploying/_meta.json b/docs/pages/Deploying/_meta.json index d01e1f67..706300b3 100644 --- a/docs/pages/Deploying/_meta.json +++ b/docs/pages/Deploying/_meta.json @@ -1,22 +1,32 @@ { - "Hosting-the-app": { - "title": "☁️ Hosting DocsGPT", - "href": "/Deploying/Hosting-the-app" + "DocsGPT-Settings": { + "title": "βš™οΈ App Configuration", + "href": "/Deploying/DocsGPT-Settings" }, - "Quickstart": { - "title": "⚑️Quickstart", - "href": "/Deploying/Quickstart" + "Docker-Deploying": { + "title": "πŸ›³οΈ Docker Setup", + "href": "/Deploying/Docker-Deploying" }, "Development-Environment": { "title": "πŸ› οΈDevelopment Environment", "href": "/Deploying/Development-Environment" }, - "Railway-Deploying": { - "title": "πŸš‚Deploying on Railway", - "href": "/Deploying/Railway-Deploying" - }, "Kubernetes-Deploying": { - "title": "☸️Deploying on Kubernetes", + "title": "☸️ Deploying on Kubernetes", "href": "/Deploying/Kubernetes-Deploying" + }, + "Hosting-the-app": { + "title": "☁️ Hosting DocsGPT", + "href": "/Deploying/Hosting-the-app" + }, + "Amazon-Lightsail": { + "title": "Hosting DocsGPT on Amazon Lightsail", + "href": "/Deploying/Amazon-Lightsail", + "display": "hidden" + }, + "Railway": { + "title": "Hosting DocsGPT on Railway", + "href": "/Deploying/Railway", + "display": "hidden" } } \ No newline at end of file diff --git a/docs/pages/Extensions/Chatwoot-extension.md b/docs/pages/Extensions/Chatwoot-extension.mdx similarity index 71% rename from docs/pages/Extensions/Chatwoot-extension.md rename to docs/pages/Extensions/Chatwoot-extension.mdx index d6494bbf..68abc949 100644 --- a/docs/pages/Extensions/Chatwoot-extension.md +++ b/docs/pages/Extensions/Chatwoot-extension.mdx @@ -1,8 +1,12 @@ +--- +title: Comprehensive Guide to Setting Up the Chatwoot Extension with DocsGPT +description: This step-by-step guide walks you through the process of setting up the Chatwoot extension with DocsGPT, enabling seamless integration for automated responses and enhanced customer support. Learn how to launch DocsGPT, retrieve your Chatwoot access token, configure the .env file, and start the extension. +--- ## Chatwoot Extension Setup Guide ### Step 1: Prepare and Start DocsGPT -- **Launch DocsGPT**: Follow the instructions in our [DocsGPT Wiki](https://github.com/arc53/DocsGPT/wiki) to start DocsGPT. Make sure to load your documentation. +- **Launch DocsGPT**: Follow the instructions in our [Quickstart](/quickstart) to start DocsGPT. Make sure to load your documentation. ### Step 2: Get Access Token from Chatwoot diff --git a/docs/pages/Extensions/Chrome-extension.mdx b/docs/pages/Extensions/Chrome-extension.mdx index 2eb36ceb..69b741e6 100644 --- a/docs/pages/Extensions/Chrome-extension.mdx +++ b/docs/pages/Extensions/Chrome-extension.mdx @@ -1,3 +1,7 @@ +--- +title: Add DocsGPT Chrome Extension to Your Browser +description: Install the DocsGPT Chrome extension to access AI-powered document assistance directly from your browser for enhanced productivity. +--- import {Steps} from 'nextra/components' import { Callout } from 'nextra/components' diff --git a/docs/pages/Extensions/_meta.json b/docs/pages/Extensions/_meta.json index 270367de..2ad7ab0c 100644 --- a/docs/pages/Extensions/_meta.json +++ b/docs/pages/Extensions/_meta.json @@ -1,14 +1,22 @@ { - "Chatwoot-extension": { - "title": "πŸ’¬οΈ Chatwoot Extension", - "href": "/Extensions/Chatwoot-extension" + "api-key-guide": { + "title": "πŸ”‘ Getting API key", + "href": "/Extensions/api-key-guide" }, - "react-widget": { - "title": "πŸ—οΈ Widget setup", - "href": "/Extensions/react-widget" + "chat-widget": { + "title": "πŸ’¬οΈ Chat Widget", + "href": "/Extensions/chat-widget" + }, + "search-widget": { + "title": "πŸ”Ž Search Widget", + "href": "/Extensions/search-widget" }, "Chrome-extension": { "title": "🌐 Chrome Extension", "href": "/Extensions/Chrome-extension" + }, + "Chatwoot-extension": { + "title": "πŸ—£οΈ Chatwoot Extension", + "href": "/Extensions/Chatwoot-extension" } } \ No newline at end of file diff --git a/docs/pages/API/api-key-guide.md b/docs/pages/Extensions/api-key-guide.mdx similarity index 60% rename from docs/pages/API/api-key-guide.md rename to docs/pages/Extensions/api-key-guide.mdx index 53bb4b58..f1b83633 100644 --- a/docs/pages/API/api-key-guide.md +++ b/docs/pages/Extensions/api-key-guide.mdx @@ -1,22 +1,20 @@ -## Guide to DocsGPT API Keys +--- +title: API Keys for DocsGPT Integrations +description: Learn how to obtain, understand, and use DocsGPT API keys to integrate DocsGPT into your external applications and widgets. +--- -DocsGPT API keys are essential for developers and users who wish to integrate the DocsGPT models into external applications, such as the our widget. This guide will walk you through the steps of obtaining an API key, starting from uploading your document to understanding the key variables associated with API keys. +# Guide to DocsGPT API Keys -### Uploading Your Document +DocsGPT API keys are essential for developers and users who wish to integrate the DocsGPT models into external applications, such as [our widget](/Extensions/chat-widget). This guide will walk you through the steps of obtaining an API key, starting from uploading your document to understanding the key variables associated with API keys. -Before creating your first API key, you must upload the document that will be linked to this key. You can upload your document through two methods: - -- **GUI Web App Upload:** A user-friendly graphical interface that allows for easy upload and management of documents. -- **Using `/api/upload` Method:** For users comfortable with API calls, this method provides a direct way to upload documents. - -### Obtaining Your API Key +## Obtaining Your API Key After uploading your document, you can obtain an API key either through the graphical user interface or via an API call: - **Graphical User Interface:** Navigate to the Settings section of the DocsGPT web app, find the API Keys option, and press 'Create New' to generate your key. -- **API Call:** Alternatively, you can use the `/api/create_api_key` endpoint to create a new API key. For detailed instructions, visit [DocsGPT API Documentation](https://docs.docsgpt.cloud/API/API-docs#8-apicreate_api_key). +- **API Call:** Alternatively, you can use the `/api/create_api_key` endpoint to create a new API key. For detailed instructions, visit [DocsGPT API Documentation](https://gptcloud.arc53.com/). -### Understanding Key Variables +## Understanding Key Variables Upon creating your API key, you will encounter several key variables. Each serves a specific purpose: @@ -27,4 +25,4 @@ Upon creating your API key, you will encounter several key variables. Each serve With your API key ready, you can now integrate DocsGPT into your application, such as the DocsGPT Widget or any other software, via `/api/answer` or `/stream` endpoints. The source document is preset with the API key, allowing you to bypass fields like `selectDocs` and `active_docs` during implementation. -Congratulations on taking the first step towards enhancing your applications with DocsGPT! With this guide, you're now equipped to navigate the process of obtaining and understanding DocsGPT API keys. +Congratulations on taking the first step towards enhancing your applications with DocsGPT! diff --git a/docs/pages/Extensions/react-widget.md b/docs/pages/Extensions/chat-widget copy.md similarity index 97% rename from docs/pages/Extensions/react-widget.md rename to docs/pages/Extensions/chat-widget copy.md index 8429e377..32a9fa67 100644 --- a/docs/pages/Extensions/react-widget.md +++ b/docs/pages/Extensions/chat-widget copy.md @@ -1,12 +1,12 @@ -### Setting up the DocsGPT Widget in Your React Project +# Setting up the DocsGPT Widget in Your React Project -### Introduction: +## Introduction: The DocsGPT Widget is a powerful tool that allows you to integrate AI-powered documentation assistance into your web applications. This guide will walk you through the installation and usage of the DocsGPT Widget in your React project. Whether you're building a web app or a knowledge base, this widget can enhance your user experience. -### Installation +## Installation First, make sure you have Node.js and npm installed in your project. Then go to your project and install a new dependency: `npm install docsgpt`. -### Usage +## Usage In the file where you want to use the widget, import it and include the CSS file: ```js import { DocsGPTWidget } from "docsgpt"; @@ -29,7 +29,7 @@ Now, you can use the widget in your component like this : buttonBg = "#222327" /> ``` -### Props Table for DocsGPT Widget +## Props Table for DocsGPT Widget | **Prop** | **Type** | **Default Value** | **Description** | |--------------------|------------------|-------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| @@ -47,7 +47,7 @@ Now, you can use the widget in your component like this : --- -### Notes +## Notes - **Customizing Props:** All properties can be overridden when embedding the widget. For example, you can provide a unique avatar, title, or color scheme to better align with your brand. - **Default Theme:** The widget defaults to the dark theme unless explicitly set to `"light"`. - **API Key:** If the `apiKey` is not required for your application, leave it empty. @@ -55,7 +55,7 @@ Now, you can use the widget in your component like this : This table provides a clear overview of the customization options available for tailoring the DocsGPT widget to fit your application. -### How to use DocsGPTWidget with [Nextra](https://nextra.site/) (Next.js + MDX) +## How to use DocsGPTWidget with [Nextra](https://nextra.site/) (Next.js + MDX) Install your widget as described above and then go to your `pages/` folder and create a new file `_app.js` with the following content: ```js import { DocsGPTWidget } from "docsgpt"; @@ -69,7 +69,7 @@ export default function MyApp({ Component, pageProps }) { ) } ``` -### How to use DocsGPTWidget with HTML +## How to use DocsGPTWidget with HTML ```html diff --git a/docs/pages/Extensions/chat-widget.mdx b/docs/pages/Extensions/chat-widget.mdx new file mode 100644 index 00000000..4cc887dc --- /dev/null +++ b/docs/pages/Extensions/chat-widget.mdx @@ -0,0 +1,158 @@ +--- +title: Integrate DocsGPT Chat Widget into Your Web Application +description: Embed the DocsGPT Widget in your React, HTML, or Nextra projects to provide AI-powered chat functionality to your users. +--- +import { Tabs } from 'nextra/components' + +# Integrating DocsGPT Chat Widget + +## Introduction + +The DocsGPT Widget is a powerful tool that allows you to integrate AI-driven document assistance directly into your web applications. This guide will walk you through embedding the DocsGPT Widget into your projects, whether you're using React, plain HTML, or Nextra. Enhance your user experience by providing seamless access to intelligent document search and chatbot capabilities. + +Try out the interactive widget showcase and customize its parameters at the [DocsGPT Widget Demo](https://widget.docsgpt.cloud/). + +## Setup + + + +### Installation + +Make sure you have Node.js and npm (or yarn, pnpm) installed in your project. Navigate to your project directory in the terminal and install the `docsgpt` package: + +```bash npm +npm install docsgpt +``` + +### Usage + +In your React component file, import the `DocsGPTWidget` component: + +```js +import { DocsGPTWidget } from "docsgpt"; +``` + +Now, you can embed the widget within your React component's JSX: + +```jsx + +``` + + + +### Installation + +To use the DocsGPT Widget directly in HTML, include the widget script from a CDN in your HTML file: + +```html filename="html" + +``` + +### Usage + +In your HTML ``, add a `
    ` element where you want to render the widget. Set an `id` for easy targeting. + +```html filename="html" +
    +``` + +Then, in a ` +``` + + + + +### Installation + +Make sure you have Node.js and npm (or yarn, pnpm) installed in your project. Navigate to your project directory in the terminal and install the `docsgpt` package: + +```bash npm +npm install docsgpt +``` + +### Usage with Nextra (Next.js + MDX) + +To integrate the DocsGPT Widget into a [Nextra](https://nextra.site/) documentation site (built with Next.js and MDX), create or modify your `pages/_app.js` file as follows: + +```js filename="pages/_app.js" +import { DocsGPTWidget } from "docsgpt"; + +export default function MyApp({ Component, pageProps }) { + return ( + <> + + + + ) +} +``` + + + +--- + +## Properties Table + +The DocsGPT Widget offers a range of customizable properties that allow you to tailor its appearance and behavior to perfectly match your web application. These parameters can be modified directly when embedding the widget in your React components or HTML code. Below is a detailed overview of each available prop: + +| **Prop** | **Type** | **Default Value** | **Description** | +|--------------------|------------------|-------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| +| **`apiHost`** | `string` | `"https://gptcloud.arc53.com"` | **Required.** The URL of your DocsGPT API backend. This endpoint handles vector search and chatbot queries. | +| **`apiKey`** | `string` | `"your-api-key"` | API key for authentication with your DocsGPT API. Leave empty if no authentication is required. | +| **`avatar`** | `string` | [`dino-icon-link`](https://d3dg1063dc54p9.cloudfront.net/cute-docsgpt.png) | URL for the avatar image displayed in the chatbot interface. | +| **`title`** | `string` | `"Get AI assistance"` | Title text shown in the chatbot header. | +| **`description`** | `string` | `"DocsGPT's AI Chatbot is here to help"` | Sub-title or descriptive text displayed below the title in the chatbot header. | +| **`heroTitle`** | `string` | `"Welcome to DocsGPT !"` | Welcome message displayed when the chatbot is initially opened. | +| **`heroDescription`** | `string` | `"This chatbot is built with DocsGPT and utilises GenAI, please review important information using sources."` | Introductory text providing context or disclaimers about the chatbot. | +| **`theme`** | `"dark" \| "light"` | `"dark"` | Color theme of the widget interface. Options: `"dark"` or `"light"`. Defaults to `"dark"`. | +| **`buttonIcon`** | `string` | `"https://your-icon"` | URL for the icon image used in the widget's launch button. | +| **`buttonBg`** | `string` | `"#222327"` | Background color of the widget's launch button. | +| **`size`** | `"small" \| "medium"` | `"medium"` | Size of the widget. Options: `"small"` or `"medium"`. Defaults to `"medium"`. | + +--- + +## Notes on Widget Properties + +* **Full Customization:** Every property listed in the table can be customized. Override the defaults to create a widget that perfectly matches your branding and application context. From avatars and titles to color schemes, you have fine-grained control over the widget's presentation. +* **API Key Handling:** The `apiKey` prop is optional. Only include it if your DocsGPT backend API is configured to require API key authentication. `apiHost` for DocsGPT Cloud is `https://gptcloud.arc53.com/` + +## Explore and Customize Further + +The DocsGPT Widget is fully open-source, allowing for deep customization and extension beyond the readily available props. + +The complete source code for the React-based widget is available in the `extensions/react-widget` directory within the main [DocsGPT GitHub Repository](https://github.com/arc53/DocsGPT). Feel free to explore the code, fork the repository, and tailor the widget to your exact requirements. \ No newline at end of file diff --git a/docs/pages/Extensions/search-widget.mdx b/docs/pages/Extensions/search-widget.mdx new file mode 100644 index 00000000..80db407c --- /dev/null +++ b/docs/pages/Extensions/search-widget.mdx @@ -0,0 +1,116 @@ +--- +title: Integrate DocsGPT Search Bar into Your Web Application +description: Embed the DocsGPT Search Bar Widget in your React or HTML projects to provide AI-powered document search functionality to your users. +--- +import { Tabs } from 'nextra/components' + +# Integrating DocsGPT Search Bar Widget + +## Introduction + +The DocsGPT Search Bar Widget offers a simple yet powerful way to embed AI-powered document search directly into your web applications. This widget allows users to perform searches across your documents or pages, enabling them to quickly find the information they need. This guide will walk you through embedding the Search Bar Widget into your projects, whether you're using React or plain HTML. + +Try out the interactive widget showcase and customize its parameters at the [DocsGPT Widget Demo](https://widget.docsgpt.cloud/). + +## Setup + + + +## React Setup + +### Installation + +Make sure you have Node.js and npm (or yarn, pnpm) installed in your project. Navigate to your project directory in the terminal and install the `docsgpt` package: + +```bash npm +npm install docsgpt +``` + +### Usage + +In your React component file, import the `SearchBar` component: + +```js +import { SearchBar } from "docsgpt"; +``` + +Now, you can embed the widget within your React component's JSX: + +```jsx + +``` + + + +### Installation + +To use the DocsGPT Search Bar Widget directly in HTML, include the widget script from a CDN in your HTML file: + +```html filename="html" + +``` + +### Usage + +In your HTML ``, add a `
    ` element where you want to render the Search Bar Widget. Set an `id` for easy targeting. + +```html filename="html" +
    +``` + +Then, in a ` +``` + + + + +--- + +## Properties Table + +The DocsGPT Search Bar Widget offers a range of customizable properties that allow you to tailor its appearance and behavior to perfectly match your web application. These parameters can be modified directly when embedding the widget in your React components or HTML code. Below is a detailed overview of each available prop: + +| **Prop** | **Type** | **Default Value** | **Description** | +|-----------------|-----------|-------------------------------------|--------------------------------------------------------------------------------------------------| +| **`apiKey`** | `string` | `"your-api-key"` | API key for authentication with your DocsGPT API. Leave empty if no authentication is required. | +| **`apiHost`** | `string` | `"https://gptcloud.arc53.com"` | **Required.** The URL of your DocsGPT API backend. This endpoint handles vector similarity search queries. | +| **`theme`** | `"dark" \| "light"` | `"dark"` | Color theme of the search bar. Options: `"dark"` or `"light"`. Defaults to `"dark"`. | +| **`placeholder`** | `string` | `"Search or Ask AI..."` | Placeholder text displayed in the search input field. | +| **`width`** | `string` | `"256px"` | Width of the search bar. Accepts any valid CSS width value (e.g., `"300px"`, `"100%"`, `"20rem"`). | + +--- + +## Notes on Widget Properties + +* **Full Customization:** Every property listed in the table can be customized. Override the defaults to create a Search Bar Widget that perfectly matches your branding and application context. +* **API Key Handling:** The `apiKey` prop is optional. Only include it if your DocsGPT backend API is configured to require API key authentication. `apiHost` for DocsGPT Cloud is `https://gptcloud.arc53.com/` + +## Explore and Customize Further + +The DocsGPT Search Bar Widget is fully open-source, allowing for deep customization and extension beyond the readily available props. + +The complete source code for the React-based widget is available in the `extensions/react-widget` directory within the main [DocsGPT GitHub Repository](https://github.com/arc53/DocsGPT). Feel free to explore the code, fork the repository, and tailor the widget to your exact requirements. \ No newline at end of file diff --git a/docs/pages/Guides/Customising-prompts.mdx b/docs/pages/Guides/Customising-prompts.mdx index 41be967d..a0032a00 100644 --- a/docs/pages/Guides/Customising-prompts.mdx +++ b/docs/pages/Guides/Customising-prompts.mdx @@ -1,3 +1,8 @@ +--- +title: Customizing Prompts +description: This guide will explain how to change prompts in DocsGPT and why it might be benefitial. Additionaly this article expains additional variables that can be used in prompts. +--- + import Image from 'next/image' # Customizing the Main Prompt @@ -34,6 +39,8 @@ When using code examples, use the following format: {summaries} ``` +Note that `{summaries}` allows model to see and respond to your upploaded documents. If you don't want this functionality you can safely remove it from the customized prompt. + Feel free to customize the prompt to align it with your specific use case or the kind of responses you want from the AI. For example, you can focus on specific document types, industries, or topics to get more targeted results. ## Conclusion diff --git a/docs/pages/Guides/How-to-train-on-other-documentation.mdx b/docs/pages/Guides/How-to-train-on-other-documentation.mdx index f0149618..4e11d6fa 100644 --- a/docs/pages/Guides/How-to-train-on-other-documentation.mdx +++ b/docs/pages/Guides/How-to-train-on-other-documentation.mdx @@ -1,3 +1,7 @@ +--- +title: How to Train on Other Documentation +description: A step-by-step guide on how to effectively train DocsGPT on additional documentation sources. +--- import { Callout } from 'nextra/components' import Image from 'next/image' diff --git a/docs/pages/Guides/How-to-use-different-LLM.mdx b/docs/pages/Guides/How-to-use-different-LLM.mdx index c867fdcc..3bc8477d 100644 --- a/docs/pages/Guides/How-to-use-different-LLM.mdx +++ b/docs/pages/Guides/How-to-use-different-LLM.mdx @@ -1,3 +1,7 @@ +--- +title: +description: +--- import { Callout } from 'nextra/components' import Image from 'next/image' @@ -26,24 +30,13 @@ Choose the LLM of your choice. prompts ### For Open source llm change: - + ### Step 1 -For open source you have to edit .env file with LLM_NAME with their desired LLM name. +For open source version please edit `LLM_NAME`, `MODEL_NAME` and others in the .env file. Refer to [βš™οΈ App Configuration](/Deploying/DocsGPT-Settings) for more information. ### Step 2 -All the supported LLM providers are here application/llm and you can check what env variable are needed for each -List of latest supported LLMs are https://github.com/arc53/DocsGPT/blob/main/application/llm/llm_creator.py -### Step 3 -Visit application/llm and select the file of your selected llm and there you will find the specific requirements needed to be filled in order to use it,i.e API key of that llm. +Visit [☁️ Cloud Providers](/Models/cloud-providers) for the updated list of online models. Make sure you have the right API_KEY and correct LLM_NAME. +For self-hosted please visit [πŸ–₯️ Local Inference](/Models/local-inference). -### For OpenAI-Compatible Endpoints: -DocsGPT supports the use of OpenAI-compatible endpoints through base URL substitution. This feature allows you to use alternative AI models or services that implement the OpenAI API interface. - - -Set the OPENAI_BASE_URL in your environment. You can change .env file with OPENAI_BASE_URL with the desired base URL or docker-compose.yml file and add the environment variable to the backend container. - -> Make sure you have the right API_KEY and correct LLM_NAME. - - diff --git a/docs/pages/Guides/My-AI-answers-questions-using-external-knowledge.md b/docs/pages/Guides/My-AI-answers-questions-using-external-knowledge.mdx similarity index 95% rename from docs/pages/Guides/My-AI-answers-questions-using-external-knowledge.md rename to docs/pages/Guides/My-AI-answers-questions-using-external-knowledge.mdx index 99e3c757..318bf41b 100644 --- a/docs/pages/Guides/My-AI-answers-questions-using-external-knowledge.md +++ b/docs/pages/Guides/My-AI-answers-questions-using-external-knowledge.mdx @@ -1,3 +1,8 @@ +--- +title: +description: +--- + # Avoiding hallucinations If your AI uses external knowledge and is not explicit enough, it is ok, because we try to make DocsGPT friendly. diff --git a/docs/pages/Guides/_meta.json b/docs/pages/Guides/_meta.json index 454670fc..1a331167 100644 --- a/docs/pages/Guides/_meta.json +++ b/docs/pages/Guides/_meta.json @@ -9,10 +9,12 @@ }, "How-to-use-different-LLM": { "title": "οΈπŸ€– How to use different LLM's", - "href": "/Guides/How-to-use-different-LLM" + "href": "/Guides/How-to-use-different-LLM", + "display": "hidden" }, "My-AI-answers-questions-using-external-knowledge": { "title": "πŸ’­οΈ Avoiding hallucinations", - "href": "/Guides/My-AI-answers-questions-using-external-knowledge" + "href": "/Guides/My-AI-answers-questions-using-external-knowledge", + "display": "hidden" } } \ No newline at end of file diff --git a/docs/pages/Models/_meta.json b/docs/pages/Models/_meta.json new file mode 100644 index 00000000..d1256cd1 --- /dev/null +++ b/docs/pages/Models/_meta.json @@ -0,0 +1,14 @@ +{ + "cloud-providers": { + "title": "☁️ Cloud Providers", + "href": "/Models/cloud-providers" + }, + "local-inference": { + "title": "πŸ–₯️ Local Inference", + "href": "/Models/local-inference" + }, + "embeddings": { + "title": "πŸ“ Embeddings", + "href": "/Models/embeddings" + } +} \ No newline at end of file diff --git a/docs/pages/Models/cloud-providers.mdx b/docs/pages/Models/cloud-providers.mdx new file mode 100644 index 00000000..86f2d132 --- /dev/null +++ b/docs/pages/Models/cloud-providers.mdx @@ -0,0 +1,55 @@ +--- +title: Connecting DocsGPT to Cloud LLM Providers +description: Connect DocsGPT to various Cloud Large Language Model (LLM) providers to power your document Q&A. +--- + +# Connecting DocsGPT to Cloud LLM Providers + +DocsGPT is designed to seamlessly integrate with a variety of Cloud Large Language Model (LLM) providers, giving you access to state-of-the-art AI models for document question answering. + +## Configuration via `.env` file + +The primary method for configuring your LLM provider in DocsGPT is through the `.env` file. For a comprehensive understanding of all available settings, please refer to the detailed [DocsGPT Settings Guide](/Deploying/DocsGPT-Settings). + +To connect to a cloud LLM provider, you will typically need to configure the following basic settings in your `.env` file: + +* **`LLM_NAME`**: This setting is essential and identifies the specific cloud provider you wish to use (e.g., `openai`, `google`, `anthropic`). +* **`MODEL_NAME`**: Specifies the exact model you want to utilize from your chosen provider (e.g., `gpt-4o`, `gemini-2.0-flash`, `claude-3-5-sonnet-latest`). Refer to your provider's documentation for a list of available models. +* **`API_KEY`**: Almost all cloud LLM providers require an API key for authentication. Obtain your API key from your chosen provider's platform and securely store it in your `.env` file. + +## Explicitly Supported Cloud Providers + +DocsGPT offers direct, streamlined support for the following cloud LLM providers, making configuration straightforward. The table below outlines the `LLM_NAME` and example `MODEL_NAME` values to use for each provider in your `.env` file. + +| Provider | `LLM_NAME` | Example `MODEL_NAME` | +| :--------------------------- | :------------- | :-------------------------- | +| DocsGPT Public API | `docsgpt` | `None` | +| OpenAI | `openai` | `gpt-4o` | +| Google (Vertex AI, Gemini) | `google` | `gemini-2.0-flash` | +| Anthropic (Claude) | `anthropic` | `claude-3-5-sonnet-latest` | +| Groq | `groq` | `llama-3.1-8b-instant` | +| HuggingFace Inference API | `huggingface` | `meta-llama/Llama-3.1-8B-Instruct` | +| Azure OpenAI | `azure_openai` | `gpt-4o` | + +## Connecting to OpenAI-Compatible Cloud APIs + +DocsGPT's flexible architecture allows you to connect to any cloud provider that offers an API compatible with the OpenAI API standard. This opens up a vast ecosystem of LLM services. + +To connect to an OpenAI-compatible cloud provider, you will still use `LLM_NAME=openai` in your `.env` file. However, you will also need to specify the API endpoint of your chosen provider using the `OPENAI_BASE_URL` setting. You will also likely need to provide an `API_KEY` and `MODEL_NAME` as required by that provider. + +**Example for DeepSeek (OpenAI-Compatible API):** + +To connect to DeepSeek, which offers an OpenAI-compatible API, your `.env` file could be configured as follows: + +``` +LLM_NAME=openai +API_KEY=YOUR_API_KEY # Your DeepSeek API key +MODEL_NAME=deepseek-chat # Or your desired DeepSeek model name +OPENAI_BASE_URL=https://api.deepseek.com/v1 # DeepSeek's OpenAI API URL +``` + +Remember to consult the documentation of your chosen OpenAI-compatible cloud provider for their specific API endpoint, required model names, and authentication methods. + +## Adding Support for Other Cloud Providers + +If you wish to connect to a cloud provider that is not explicitly listed above or doesn't offer OpenAI API compatibility, you can extend DocsGPT to support it. Within the DocsGPT repository, navigate to the `application/llm` directory. Here, you will find Python files defining the existing LLM integrations. You can use these files as examples to create a new module for your desired cloud provider. After creating your new LLM module, you will need to register it within the `llm_creator.py` file. This process involves some coding, but it allows for virtually unlimited extensibility to connect to any cloud-based LLM service with an accessible API. \ No newline at end of file diff --git a/docs/pages/Models/embeddings.md b/docs/pages/Models/embeddings.md new file mode 100644 index 00000000..6dfb89b6 --- /dev/null +++ b/docs/pages/Models/embeddings.md @@ -0,0 +1,72 @@ +--- +title: Understanding and Configuring Embedding Models in DocsGPT +description: Learn about embedding models, their importance in DocsGPT, and how to configure them for optimal performance. +--- + +# Understanding and Configuring Embedding Models in DocsGPT + +Embedding models are a crucial component of DocsGPT, enabling its powerful document understanding and question-answering capabilities. This guide will explain what embedding models are, why they are essential for DocsGPT, and how to configure them. + +## What are Embedding Models? + +In simple terms, an embedding model is a type of language model that converts text into numerical vectors. These vectors, known as embeddings, capture the semantic meaning of the text. Think of it as translating words and sentences into a language that computers can understand mathematically, where similar meanings are represented by vectors that are close to each other in vector space. + +**Why are embedding models important for DocsGPT?** + +DocsGPT uses embedding models for several key tasks: + +* **Semantic Search:** When you upload documents to DocsGPT, the application uses an embedding model to generate embeddings for each document chunk. These embeddings are stored in a vector store. When you ask a question, your query is also converted into an embedding. DocsGPT then performs a semantic search in the vector store, finding document chunks whose embeddings are most similar to your query embedding. This allows DocsGPT to retrieve relevant information based on the *meaning* of your question and documents, not just keyword matching. +* **Document Understanding:** Embeddings help DocsGPT understand the underlying meaning of your documents, enabling it to answer questions accurately and contextually, even if the exact keywords from your question are not present in the retrieved document chunks. + +In essence, embedding models are the bridge that allows DocsGPT to understand the nuances of human language and connect your questions to the relevant information within your documents. + +## Out-of-the-Box Embedding Model Support in DocsGPT + +DocsGPT is designed to be flexible and supports a wide range of embedding models right out of the box. Currently, DocsGPT provides native support for models from two major sources: + +* **Sentence Transformers:** DocsGPT supports all models available through the [Sentence Transformers library](https://www.sbert.net/). This library offers a vast selection of pre-trained embedding models, known for their quality and efficiency in various semantic tasks. +* **OpenAI Embeddings:** DocsGPT also supports using embedding models from OpenAI, specifically the `text-embedding-ada-002` model, which is a powerful and widely used embedding model from OpenAI's API. + +## Configuring Sentence Transformer Models + +To utilize Sentence Transformer models within DocsGPT, you need to follow these steps: + +1. **Download the Model:** Sentence Transformer models are typically hosted on Hugging Face Model Hub. You need to download your chosen model and place it in the `model/` folder in the root directory of your DocsGPT project. + + For example, to use the `all-mpnet-base-v2` model, you would set `EMBEDDINGS_NAME` as described below, and ensure that the model files are available locally (DocsGPT will attempt to download it if it's not found, but local download is recommended for development and offline use). + +2. **Set `EMBEDDINGS_NAME` in `.env` (or `settings.py`):** You need to configure the `EMBEDDINGS_NAME` setting in your `.env` file (or `settings.py`) to point to the desired Sentence Transformer model. + + * **Using a pre-downloaded model from `model/` folder:** You can specify a path to the downloaded model within the `model/` directory. For instance, if you downloaded `all-mpnet-base-v2` and it's in `model/all-mpnet-base-v2`, you could potentially use a relative path like (though direct path to the model name is usually sufficient): + + ``` + EMBEDDINGS_NAME=huggingface_sentence-transformers/all-mpnet-base-v2 + ``` + or simply use the model identifier: + ``` + EMBEDDINGS_NAME=sentence-transformers/all-mpnet-base-v2 + ``` + + * **Using a model directly from Hugging Face Model Hub:** You can directly specify the model identifier from Hugging Face Model Hub: + + ``` + EMBEDDINGS_NAME=huggingface_sentence-transformers/all-mpnet-base-v2 + ``` + +## Using OpenAI Embeddings + +To use OpenAI's `text-embedding-ada-002` embedding model, you need to set `EMBEDDINGS_NAME` to `openai_text-embedding-ada-002` and ensure you have your OpenAI API key configured correctly via `API_KEY` in your `.env` file (if you are not using Azure OpenAI). + +**Example `.env` configuration for OpenAI Embeddings:** + +``` +LLM_NAME=openai +API_KEY=YOUR_OPENAI_API_KEY # Your OpenAI API Key +EMBEDDINGS_NAME=openai_text-embedding-ada-002 +``` + +## Adding Support for Other Embedding Models + +If you wish to use an embedding model that is not supported out-of-the-box, a good starting point for adding custom embedding model support is to examine the `base.py` file located in the `application/vectorstore` directory. + +Specifically, pay attention to the `EmbeddingsWrapper` and `EmbeddingsSingleton` classes. `EmbeddingsWrapper` provides a way to wrap different embedding model libraries into a consistent interface for DocsGPT. `EmbeddingsSingleton` manages the instantiation and retrieval of embedding model instances. By understanding these classes and the existing embedding model implementations, you can create your own custom integration for virtually any embedding model library you desire. \ No newline at end of file diff --git a/docs/pages/Models/local-inference.mdx b/docs/pages/Models/local-inference.mdx new file mode 100644 index 00000000..4aa6bca2 --- /dev/null +++ b/docs/pages/Models/local-inference.mdx @@ -0,0 +1,44 @@ +--- +title: Connecting DocsGPT to Local Inference Engines +description: Connect DocsGPT to local inference engines for running LLMs directly on your hardware. +--- + +# Connecting DocsGPT to Local Inference Engines + +DocsGPT can be configured to leverage local inference engines, allowing you to run Large Language Models directly on your own infrastructure. This approach offers enhanced privacy and control over your LLM processing. + +Currently, DocsGPT primarily supports local inference engines that are compatible with the OpenAI API format. This means you can connect DocsGPT to various local LLM servers that mimic the OpenAI API structure. + +## Configuration via `.env` file + +Setting up a local inference engine with DocsGPT is configured through environment variables in the `.env` file. For a detailed explanation of all settings, please consult the [DocsGPT Settings Guide](/Deploying/DocsGPT-Settings). + +To connect to a local inference engine, you will generally need to configure these settings in your `.env` file: + +* **`LLM_NAME`**: Crucially set this to `openai`. This tells DocsGPT to use the OpenAI-compatible API format for communication, even though the LLM is local. +* **`MODEL_NAME`**: Specify the model name as recognized by your local inference engine. This might be a model identifier or left as `None` if the engine doesn't require explicit model naming in the API request. +* **`OPENAI_BASE_URL`**: This is essential. Set this to the base URL of your local inference engine's API endpoint. This tells DocsGPT where to find your local LLM server. +* **`API_KEY`**: Generally, for local inference engines, you can set `API_KEY=None` as authentication is usually not required in local setups. + +## Supported Local Inference Engines (OpenAI API Compatible) + +DocsGPT is readily configurable to work with the following local inference engines, all communicating via the OpenAI API format. Here are example `OPENAI_BASE_URL` values for each, based on default setups: + +| Inference Engine | `LLM_NAME` | `OPENAI_BASE_URL` | +| :---------------------------- | :--------- | :------------------------- | +| LLaMa.cpp | `openai` | `http://localhost:8000/v1` | +| Ollama | `openai` | `http://localhost:11434/v1` | +| Text Generation Inference (TGI)| `openai` | `http://localhost:8080/v1` | +| SGLang | `openai` | `http://localhost:30000/v1` | +| vLLM | `openai` | `http://localhost:8000/v1` | +| Aphrodite | `openai` | `http://localhost:2242/v1` | +| FriendliAI | `openai` | `http://localhost:8997/v1` | +| LMDeploy | `openai` | `http://localhost:23333/v1` | + +**Important Note on `localhost` vs `host.docker.internal`:** + +The `OPENAI_BASE_URL` examples above use `http://localhost`. If you are running DocsGPT within Docker and your local inference engine is running on your host machine (outside of Docker), you will likely need to replace `localhost` with `http://host.docker.internal` to ensure Docker can correctly access your host's services. For example, `http://host.docker.internal:11434/v1` for Ollama. + +## Adding Support for Other Local Engines + +While DocsGPT currently focuses on OpenAI API compatible local engines, you can extend its capabilities to support other local inference solutions. To do this, navigate to the `application/llm` directory in the DocsGPT repository. Examine the existing Python files for examples of LLM integrations. You can create a new module for your desired local engine, and then register it in the `llm_creator.py` file within the same directory. This allows for custom integration with a wide range of local LLM servers beyond those listed above. \ No newline at end of file diff --git a/docs/pages/_meta.json b/docs/pages/_meta.json new file mode 100644 index 00000000..000b569d --- /dev/null +++ b/docs/pages/_meta.json @@ -0,0 +1,18 @@ + +{ + "index": "Home", + "quickstart": "Quickstart", + "Deploying": "Deploying", + "Models": "Models", + "Extensions": "Extensions", + "https://gptcloud.arc53.com/": { + "title": "API", + "href": "https://gptcloud.arc53.com/", + "newWindow": true + }, + "Guides": "Guides", + "changelog": { + "title": "Changelog", + "display": "hidden" + } +} \ No newline at end of file diff --git a/docs/pages/changelog.mdx b/docs/pages/changelog.mdx new file mode 100644 index 00000000..504854f3 --- /dev/null +++ b/docs/pages/changelog.mdx @@ -0,0 +1,3 @@ +--- +title: 'Changelog' +--- \ No newline at end of file diff --git a/docs/pages/index.mdx b/docs/pages/index.mdx index 423a0a01..1163f0a6 100644 --- a/docs/pages/index.mdx +++ b/docs/pages/index.mdx @@ -1,28 +1,65 @@ --- title: 'Home' +description: Documentation of DocsGPT - quickstart, deployment guides, model configuration, and widget integration documentation. --- import { Cards, Card } from 'nextra/components' import Image from 'next/image' -import deployingGuides from './Deploying/_meta.json'; -import developingGuides from './API/_meta.json'; -import extensionGuides from './Extensions/_meta.json'; -import mainGuides from './Guides/_meta.json'; - - - - export const allGuides = { - ...deployingGuides, - ...developingGuides, - ...extensionGuides, - ...mainGuides, + "quickstart": { + "title": "⚑️ Quickstart", + "href": "/quickstart" + }, + "DocsGPT-Settings": { + "title": "βš™οΈ App Configuration", + "href": "/Deploying/DocsGPT-Settings" + }, + "Docker-Deploying": { + "title": "πŸ›³οΈ Docker Setup", + "href": "/Deploying/Docker-Deploying" + }, + "Development-Environment": { + "title": "πŸ› οΈDevelopment Environment", + "href": "/Deploying/Development-Environment" + }, + "https://gptcloud.arc53.com/": { + "title": "πŸ§‘β€πŸ’»οΈ API", + "href": "https://gptcloud.arc53.com/", + "newWindow": true + }, + "cloud-providers": { + "title": "☁️ Cloud Providers", + "href": "/Models/cloud-providers" + }, + "local-inference": { + "title": "πŸ–₯️ Local Inference", + "href": "/Models/local-inference" + }, + "embeddings": { + "title": "πŸ“ Embeddings", + "href": "/Models/embeddings" + }, + "api-key-guide": { + "title": "πŸ”‘ Getting API key", + "href": "/Extensions/api-key-guide" + }, + "chat-widget": { + "title": "πŸ’¬οΈ Chat Widget", + "href": "/Extensions/chat-widget" + }, + "search-widget": { + "title": "πŸ”Ž Search Widget", + "href": "/Extensions/search-widget" + }, + "Customising-prompts": { + "title": "οΈπŸ’» Customising Prompts", + "href": "/Guides/Customising-prompts" + } }; -### **DocsGPT πŸ¦–** - -DocsGPT πŸ¦– is an innovative open-source tool designed to simplify the retrieval of information from project documentation using advanced GPT models πŸ€–. Eliminate lengthy manual searches πŸ” and enhance your documentation experience with DocsGPT, and consider contributing to its AI-powered future πŸš€. +# **DocsGPT πŸ¦–** +DocsGPT is an open-source genAI tool that helps users get reliable answers from any knowledge source, while avoiding hallucinations. It enables quick and reliable information retrieval, with tooling and agentic system capability built in.