Merge branch 'main' into feat/sources-in-react-widget

This commit is contained in:
Manish Madan
2025-02-17 15:41:34 +05:30
committed by GitHub
257 changed files with 14591 additions and 10233 deletions

15
.devcontainer/Dockerfile Normal file
View File

@@ -0,0 +1,15 @@
FROM python:3.12-bookworm
# Install Node.js 20.x
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
&& apt-get install -y nodejs \
&& rm -rf /var/lib/apt/lists/*
# Install global npm packages
RUN npm install -g husky vite
# Create and activate Python virtual environment
RUN python -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
WORKDIR /workspace

View File

@@ -0,0 +1,49 @@
# Welcome to DocsGPT Devcontainer
Welcome to the DocsGPT development environment! This guide will help you get started quickly.
## Starting Services
To run DocsGPT, you need to start three main services: Flask (backend), Celery (task queue), and Vite (frontend). Here are the commands to start each service within the devcontainer:
### Vite (Frontend)
```bash
cd frontend
npm run dev -- --host
```
### Flask (Backend)
```bash
flask --app application/app.py run --host=0.0.0.0 --port=7091
```
### Celery (Task Queue)
```bash
celery -A application.app.celery worker -l INFO
```
## Github Codespaces Instructions
### 1. Make Ports Public:
Go to the "Ports" panel in Codespaces (usually located at the bottom of the VS Code window).
For both port 5173 and 7091, right-click on the port and select "Make Public".
![CleanShot 2025-02-12 at 09 46 14@2x](https://github.com/user-attachments/assets/00a34b16-a7ef-47af-9648-87a7e3008475)
### 2. Update VITE_API_HOST:
After making port 7091 public, copy the public URL provided by Codespaces for port 7091.
Open the file frontend/.env.development.
Find the line VITE_API_HOST=http://localhost:7091.
Replace http://localhost:7091 with the public URL you copied from Codespaces.
![CleanShot 2025-02-12 at 09 46 56@2x](https://github.com/user-attachments/assets/c472242f-1079-4cd8-bc0b-2d78db22b94c)

View File

@@ -0,0 +1,24 @@
{
"name": "DocsGPT Dev Container",
"dockerComposeFile": ["docker-compose-dev.yaml", "docker-compose.override.yaml"],
"service": "dev",
"workspaceFolder": "/workspace",
"postCreateCommand": ".devcontainer/post-create-command.sh",
"forwardPorts": [7091, 5173, 6379, 27017],
"customizations": {
"vscode": {
"extensions": [
"ms-python.python",
"ms-toolsai.jupyter",
"esbenp.prettier-vscode",
"dbaeumer.vscode-eslint"
]
},
"codespaces": {
"openFiles": [
".devcontainer/devc-welcome.md",
"CONTRIBUTING.md"
]
}
}
}

View File

@@ -0,0 +1,40 @@
version: '3.8'
services:
dev:
build:
context: .
dockerfile: Dockerfile
volumes:
- ../:/workspace:cached
command: sleep infinity
depends_on:
redis:
condition: service_healthy
mongo:
condition: service_healthy
environment:
- CELERY_BROKER_URL=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/1
- MONGO_URI=mongodb://mongo:27017/docsgpt
- CACHE_REDIS_URL=redis://redis:6379/2
networks:
- default
redis:
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 30s
retries: 5
mongo:
healthcheck:
test: ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"]
interval: 5s
timeout: 30s
retries: 5
networks:
default:
name: docsgpt-dev-network

View File

@@ -0,0 +1,32 @@
#!/bin/bash
set -e # Exit immediately if a command exits with a non-zero status
if [ ! -f frontend/.env.development ]; then
cp -n .env-template frontend/.env.development || true # Assuming .env-template is in the root
fi
# Determine VITE_API_HOST based on environment
if [ -n "$CODESPACES" ]; then
# Running in Codespaces
CODESPACE_NAME=$(echo "$CODESPACES" | cut -d'-' -f1) # Extract codespace name
PUBLIC_API_HOST="https://${CODESPACE_NAME}-7091.${GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN}"
echo "Setting VITE_API_HOST for Codespaces: $PUBLIC_API_HOST in frontend/.env.development"
sed -i "s|VITE_API_HOST=.*|VITE_API_HOST=$PUBLIC_API_HOST|" frontend/.env.development
else
# Not running in Codespaces (local devcontainer)
DEFAULT_API_HOST="http://localhost:7091"
echo "Setting VITE_API_HOST for local dev: $DEFAULT_API_HOST in frontend/.env.development"
sed -i "s|VITE_API_HOST=.*|VITE_API_HOST=$DEFAULT_API_HOST|" frontend/.env.development
fi
mkdir -p model
if [ ! -d model/all-mpnet-base-v2 ]; then
wget -q https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip -O model/mpnet-base-v2.zip
unzip -q model/mpnet-base-v2.zip -d model
rm model/mpnet-base-v2.zip
fi
pip install -r application/requirements.txt
cd frontend
npm install --include=dev

3
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,3 @@
# These are supported funding model platforms
github: arc53

View File

@@ -8,12 +8,12 @@ updates:
- package-ecosystem: "pip" # See documentation for possible values
directory: "/application" # Location of package manifests
schedule:
interval: "weekly"
interval: "daily"
- package-ecosystem: "npm" # See documentation for possible values
directory: "/frontend" # Location of package manifests
schedule:
interval: "weekly"
interval: "daily"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
interval: "daily"

40
.github/workflows/bandit.yaml vendored Normal file
View File

@@ -0,0 +1,40 @@
name: Bandit Security Scan
on:
push:
branches:
- main
pull_request:
types: [opened, synchronize, reopened]
jobs:
bandit_scan:
if: ${{ github.repository == 'arc53/DocsGPT' }}
runs-on: ubuntu-latest
permissions:
security-events: write
actions: read
contents: read
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install bandit # Bandit is needed for this action
if [ -f application/requirements.txt ]; then pip install -r application/requirements.txt; fi
- name: Run Bandit scan
uses: PyCQA/bandit-action@v1
with:
severity: medium
confidence: medium
targets: application/
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -5,20 +5,33 @@ on:
types: [published]
jobs:
deploy:
build:
if: github.repository == 'arc53/DocsGPT'
runs-on: ubuntu-latest
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
suffix: amd64
- platform: linux/arm64
runner: ubuntu-24.04-arm
suffix: arm64
runs-on: ${{ matrix.runner }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
- name: Set up QEMU # Only needed for emulation, not for native arm64 builds
if: matrix.platform == 'linux/arm64'
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: docker-container
install: true
- name: Login to DockerHub
uses: docker/login-action@v3
@@ -33,15 +46,67 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker images to docker.io and ghcr.io
- name: Build and push platform-specific images
uses: docker/build-push-action@v6
with:
file: './application/Dockerfile'
platforms: linux/amd64
platforms: ${{ matrix.platform }}
context: ./application
push: true
tags: |
${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }},${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }},ghcr.io/${{ github.repository_owner }}/docsgpt:latest
${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
provenance: false
sbom: false
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
cache-to: type=inline
manifest:
if: github.repository == 'arc53/DocsGPT'
needs: build
runs-on: ubuntu-latest
permissions:
packages: write
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: docker-container
install: true
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to ghcr.io
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create and push manifest for DockerHub
run: |
set -e
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }} \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-arm64
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt:latest \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:${{ github.event.release.tag_name }}-arm64
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt:latest
- name: Create and push manifest for ghcr.io
run: |
set -e
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }} \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-arm64
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt:latest \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-amd64 \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:${{ github.event.release.tag_name }}-arm64
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt:latest

View File

@@ -5,20 +5,33 @@ on:
types: [published]
jobs:
deploy:
build:
if: github.repository == 'arc53/DocsGPT'
runs-on: ubuntu-latest
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
suffix: amd64
- platform: linux/arm64
runner: ubuntu-24.04-arm
suffix: arm64
runs-on: ${{ matrix.runner }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
- name: Set up QEMU # Only needed for emulation, not for native arm64 builds
if: matrix.platform == 'linux/arm64'
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: docker-container
install: true
- name: Login to DockerHub
uses: docker/login-action@v3
@@ -33,16 +46,67 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
# Runs a single command using the runners shell
- name: Build and push Docker images to docker.io and ghcr.io
- name: Build and push platform-specific images
uses: docker/build-push-action@v6
with:
file: './frontend/Dockerfile'
platforms: linux/amd64, linux/arm64
platforms: ${{ matrix.platform }}
context: ./frontend
push: true
tags: |
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }},${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }},ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-${{ matrix.suffix }}
provenance: false
sbom: false
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
cache-to: type=inline
manifest:
if: github.repository == 'arc53/DocsGPT'
needs: build
runs-on: ubuntu-latest
permissions:
packages: write
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: docker-container
install: true
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to ghcr.io
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create and push manifest for DockerHub
run: |
set -e
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }} \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:latest
- name: Create and push manifest for ghcr.io
run: |
set -e
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }} \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-amd64 \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:${{ github.event.release.tag_name }}-arm64
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt-fe:latest

View File

@@ -1,4 +1,4 @@
name: Build and push DocsGPT Docker image for development
name: Build and push multi-arch DocsGPT Docker image
on:
workflow_dispatch:
@@ -7,27 +7,36 @@ on:
- main
jobs:
deploy:
build:
if: github.repository == 'arc53/DocsGPT'
runs-on: ubuntu-latest
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
suffix: amd64
- platform: linux/arm64
runner: ubuntu-24.04-arm
suffix: arm64
runs-on: ${{ matrix.runner }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: docker-container
install: true
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to ghcr.io
uses: docker/login-action@v3
with:
@@ -35,15 +44,57 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker images to docker.io and ghcr.io
- name: Build and push platform-specific images
uses: docker/build-push-action@v6
with:
file: './application/Dockerfile'
platforms: linux/amd64
platforms: ${{ matrix.platform }}
context: ./application
push: true
tags: |
${{ secrets.DOCKER_USERNAME }}/docsgpt:develop
ghcr.io/${{ github.repository_owner }}/docsgpt:develop
${{ secrets.DOCKER_USERNAME }}/docsgpt:develop-${{ matrix.suffix }}
ghcr.io/${{ github.repository_owner }}/docsgpt:develop-${{ matrix.suffix }}
provenance: false
sbom: false
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt:develop
cache-to: type=inline
manifest:
if: github.repository == 'arc53/DocsGPT'
needs: build
runs-on: ubuntu-latest
permissions:
packages: write
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: docker-container
install: true
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to ghcr.io
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create and push manifest for DockerHub
run: |
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop-amd64 \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop-arm64
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt:develop
- name: Create and push manifest for ghcr.io
run: |
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt:develop \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:develop-amd64 \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt:develop-arm64
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt:develop

View File

@@ -7,20 +7,33 @@ on:
- main
jobs:
deploy:
build:
if: github.repository == 'arc53/DocsGPT'
runs-on: ubuntu-latest
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
suffix: amd64
- platform: linux/arm64
runner: ubuntu-24.04-arm
suffix: arm64
runs-on: ${{ matrix.runner }}
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
- name: Set up QEMU # Only needed for emulation, not for native arm64 builds
if: matrix.platform == 'linux/arm64'
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: docker-container
install: true
- name: Login to DockerHub
uses: docker/login-action@v3
@@ -35,15 +48,57 @@ jobs:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Docker images to docker.io and ghcr.io
- name: Build and push platform-specific images
uses: docker/build-push-action@v6
with:
file: './frontend/Dockerfile'
platforms: linux/amd64
platforms: ${{ matrix.platform }}
context: ./frontend
push: true
tags: |
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop
${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop-${{ matrix.suffix }}
ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop-${{ matrix.suffix }}
provenance: false
sbom: false
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop
cache-to: type=inline
manifest:
if: github.repository == 'arc53/DocsGPT'
needs: build
runs-on: ubuntu-latest
permissions:
packages: write
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: docker-container
install: true
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to ghcr.io
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create and push manifest for DockerHub
run: |
docker manifest create ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop-amd64 \
--amend ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop-arm64
docker manifest push ${{ secrets.DOCKER_USERNAME }}/docsgpt-fe:develop
- name: Create and push manifest for ghcr.io
run: |
docker manifest create ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop-amd64 \
--amend ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop-arm64
docker manifest push ghcr.io/${{ github.repository_owner }}/docsgpt-fe:develop

View File

@@ -6,7 +6,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.11"]
python-version: ["3.12"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
@@ -23,8 +23,8 @@ jobs:
run: |
python -m pytest --cov=application --cov-report=xml
- name: Upload coverage reports to Codecov
if: github.event_name == 'pull_request' && matrix.python-version == '3.11'
uses: codecov/codecov-action@v4
if: github.event_name == 'pull_request' && matrix.python-version == '3.12'
uses: codecov/codecov-action@v5
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

38
.vscode/launch.json vendored
View File

@@ -11,6 +11,44 @@
"skipFiles": [
"<node_internals>/**"
]
},
{
"name": "Flask Debugger",
"type": "debugpy",
"request": "launch",
"module": "flask",
"env": {
"FLASK_APP": "application/app.py",
"PYTHONPATH": "${workspaceFolder}",
"FLASK_ENV": "development",
"FLASK_DEBUG": "1",
"FLASK_RUN_PORT": "7091",
"FLASK_RUN_HOST": "0.0.0.0"
},
"args": [
"run",
"--no-debugger"
],
"cwd": "${workspaceFolder}",
},
{
"name": "Celery Debugger",
"type": "debugpy",
"request": "launch",
"module": "celery",
"env": {
"PYTHONPATH": "${workspaceFolder}",
},
"args": [
"-A",
"application.app.celery",
"worker",
"-l",
"INFO",
"--pool=solo"
],
"cwd": "${workspaceFolder}"
}
]
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

View File

@@ -27,6 +27,7 @@ Before creating issues, please check out how the latest version of our app looks
### 👨‍💻 If you're interested in contributing code, here are some important things to know:
For instructions on setting up a development environment, please refer to our [Development Deployment Guide](https://docs.docsgpt.cloud/Deploying/Development-Environment).
Tech Stack Overview:
@@ -34,19 +35,40 @@ Tech Stack Overview:
- 🖥 Backend: Developed in Python 🐍
### 🌐 If you are looking to contribute to frontend (⚛React, Vite):
### 🌐 Frontend Contributions (⚛️ React, Vite)
- The current frontend is being migrated from [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) to [`/frontend`](https://github.com/arc53/DocsGPT/tree/main/frontend) with a new design, so please contribute to the new one.
- Check out this [milestone](https://github.com/arc53/DocsGPT/milestone/1) and its issues.
- The updated Figma design can be found [here](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1).
* The updated Figma design can be found [here](https://www.figma.com/file/OXLtrl1EAy885to6S69554/DocsGPT?node-id=0%3A1&t=hjWVuxRg9yi5YkJ9-1). Please try to follow the guidelines.
* **Coding Style:** We follow a strict coding style enforced by ESLint and Prettier. Please ensure your code adheres to the configuration provided in our repository's `fronetend/.eslintrc.js` file. We recommend configuring your editor with ESLint and Prettier to help with this.
* **Component Structure:** Strive for small, reusable components. Favor functional components and hooks over class components where possible.
* **State Management** If you need to add stores, please use Redux.
Please try to follow the guidelines.
### 🖥 Backend Contributions (🐍 Python)
### 🖥 If you are looking to contribute to Backend (🐍 Python):
- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application) or [`/scripts`](https://github.com/arc53/DocsGPT/tree/main/scripts) (please disregard old [`ingest_rst.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst.py) [`ingest_rst_sphinx.py`](https://github.com/arc53/DocsGPT/blob/main/scripts/old/ingest_rst_sphinx.py) files; these will be deprecated soon).
- Review our issues and contribute to [`/application`](https://github.com/arc53/DocsGPT/tree/main/application)
- All new code should be covered with unit tests ([pytest](https://github.com/pytest-dev/pytest)). Please find tests under [`/tests`](https://github.com/arc53/DocsGPT/tree/main/tests) folder.
- Before submitting your Pull Request, ensure it can be queried after ingesting some test data.
- **Coding Style:** We adhere to the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide for Python code. We use `ruff` as our linter and code formatter. Please ensure your code is formatted correctly and passes `ruff` checks before submitting.
- **Type Hinting:** Please use type hints for all function arguments and return values. This improves code readability and helps catch errors early. Example:
```python
def my_function(name: str, count: int) -> list[str]:
...
```
- **Docstrings:** All functions and classes should have docstrings explaining their purpose, parameters, and return values. We prefer the [Google style docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). Example:
```python
def my_function(name: str, count: int) -> list[str]:
"""Does something with a name and a count.
Args:
name: The name to use.
count: The number of times to do it.
Returns:
A list of strings.
"""
...
```
### Testing

View File

@@ -1,41 +0,0 @@
# **🎉 Join the Hacktoberfest with DocsGPT and win a Free T-shirt and other prizes! 🎉**
Welcome, contributors! We're excited to announce that DocsGPT is participating in Hacktoberfest. Get involved by submitting meaningful pull requests.
All contributors with accepted PRs will receive a cool Holopin! 🤩 (Watch out for a reply in your PR to collect it).
### 🏆 Top 50 contributors will receive a special T-shirt
### 🏆 [LLM Document analysis by LexEU competition](https://github.com/arc53/DocsGPT/blob/main/lexeu-competition.md):
A separate competition is available for those who submit new retrieval / workflow method that will analyze a Document using EU laws.
With 200$, 100$, 50$ prize for 1st, 2nd and 3rd place respectively.
You can find more information [here](https://github.com/arc53/DocsGPT/blob/main/lexeu-competition.md)
## 📜 Here's How to Contribute:
```text
🛠️ Code: This is the golden ticket! Make meaningful contributions through PRs.
🧩 API extension: Build an app utilising DocsGPT API. We prefer submissions that showcase original ideas and turn the API into an AI agent.
They can be a completely separate repos.
For example:
https://github.com/arc53/tg-bot-docsgpt-extenstion or
https://github.com/arc53/DocsGPT-cli
Non-Code Contributions:
📚 Wiki: Improve our documentation, create a guide or change existing documentation.
🖥️ Design: Improve the UI/UX or design a new feature.
📝 Blogging or Content Creation: Write articles or create videos to showcase DocsGPT or highlight your contributions!
```
### 📝 Guidelines for Pull Requests:
- Familiarize yourself with the current contributions and our [Roadmap](https://github.com/orgs/arc53/projects/2).
- Before contributing we highly advise that you check existing [issues](https://github.com/arc53/DocsGPT/issues) or [create](https://github.com/arc53/DocsGPT/issues/new/choose) an issue and wait to get assigned.
- Once you are finished with your contribution, please fill in this [form](https://airtable.com/appikMaJwdHhC1SDP/pagoblCJ9W29wf6Hf/form).
- Refer to the [Documentation](https://docs.docsgpt.cloud/).
- Feel free to join our [Discord](https://discord.gg/n5BX8dh8rU) server. We're here to help newcomers, so don't hesitate to jump in! Join us [here](https://discord.gg/n5BX8dh8rU).
Thank you very much for considering contributing to DocsGPT during Hacktoberfest! 🙏 Your contributions (not just simple typos) could earn you a stylish new t-shirt and other prizes as a token of our appreciation. 🎁 Join us, and let's code together! 🚀

214
README.md
View File

@@ -3,13 +3,11 @@
</h1>
<p align="center">
<strong>Open-Source Documentation Assistant</strong>
<strong>Open-Source RAG Assistant</strong>
</p>
<p align="left">
<strong><a href="https://www.docsgpt.cloud/">DocsGPT</a></strong> is a cutting-edge open-source solution that streamlines the process of finding information in the project documentation. With its integration of the powerful <strong>GPT</strong> models, developers can easily ask questions about a project and receive accurate answers.
Say goodbye to time-consuming manual searches, and let <strong><a href="https://www.docsgpt.cloud/">DocsGPT</a></strong> help you quickly find the information you need. Try it out and see how it revolutionizes your project documentation experience. Contribute to its development and be a part of the future of AI-powered assistance.
<strong><a href="https://www.docsgpt.cloud/">DocsGPT</a></strong> is an open-source genAI tool that helps users get reliable answers from any knowledge source, while avoiding hallucinations. It enables quick and reliable information retrieval, with tooling and agentic system capability built in.
</p>
<div align="center">
@@ -17,180 +15,120 @@ Say goodbye to time-consuming manual searches, and let <strong><a href="https://
<a href="https://github.com/arc53/DocsGPT">![link to main GitHub showing Stars number](https://img.shields.io/github/stars/arc53/docsgpt?style=social)</a>
<a href="https://github.com/arc53/DocsGPT">![link to main GitHub showing Forks number](https://img.shields.io/github/forks/arc53/docsgpt?style=social)</a>
<a href="https://github.com/arc53/DocsGPT/blob/main/LICENSE">![link to license file](https://img.shields.io/github/license/arc53/docsgpt)</a>
<a href="https://www.bestpractices.dev/projects/9907"><img src="https://www.bestpractices.dev/projects/9907/badge"></a>
<a href="https://discord.gg/n5BX8dh8rU">![link to discord](https://img.shields.io/discord/1070046503302877216)</a>
<a href="https://twitter.com/docsgptai">![X (formerly Twitter) URL](https://img.shields.io/twitter/follow/docsgptai)</a>
<a href="https://docs.docsgpt.cloud/quickstart">⚡️ Quickstart</a><a href="https://app.docsgpt.cloud/">☁️ Cloud Version</a><a href="https://discord.gg/n5BX8dh8rU">💬 Discord</a>
<br>
<a href="https://docs.docsgpt.cloud/">📖 Documentation</a><a href="https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md">👫 Contribute</a><a href="https://blog.docsgpt.cloud/">🗞 Blog</a>
<br>
</div>
<div align="center">
<img src="https://d3dg1063dc54p9.cloudfront.net/videos/demov7.gif" alt="video-example-of-docs-gpt" width="800" height="450">
</div>
<h3 align="left">
<strong>Key Features:</strong>
</h3>
<ul align="left">
<li><strong>🗂️ Wide Format Support:</strong> Reads PDF, DOCX, CSV, XLSX, EPUB, MD, RST, HTML, MDX, JSON, PPTX, and images.</li>
<li><strong>🌐 Web & Data Integration:</strong> Ingests from URLs, sitemaps, Reddit, GitHub and web crawlers.</li>
<li><strong>✅ Reliable Answers:</strong> Get accurate, hallucination-free responses with source citations viewable in a clean UI.</li>
<li><strong>🔑 Streamlined API Keys:</strong> Generate keys linked to your settings, documents, and models, simplifying chatbot and integration setup.</li>
<li><strong>🔗 Actionable Tooling:</strong> Connect to APIs, tools, and other services to enable LLM actions.</li>
<li><strong>🧩 Pre-built Integrations:</strong> Use readily available HTML/React chat widgets, search tools, Discord/Telegram bots, and more.</li>
<li><strong>🔌 Flexible Deployment:</strong> Works with major LLMs (OpenAI, Google, Anthropic) and local models (Ollama, llama_cpp).</li>
<li><strong>🏢 Secure & Scalable:</strong> Run privately and securely with Kubernetes support, designed for enterprise-grade reliability.</li>
</ul>
## Roadmap
- [x] Full GoogleAI compatibility (Jan 2025)
- [x] Add tools (Jan 2025)
- [x] Manually updating chunks in the app UI (Feb 2025)
- [x] Devcontainer for easy development (Feb 2025)
- [ ] Anthropic Tool compatibility
- [ ] Add triggerable actions / tools (webhook)
- [ ] Add OAuth 2.0 authentication for tools and sources
- [ ] Chatbots menu re-design to handle tools, scheduling, and more
You can find our full roadmap [here](https://github.com/orgs/arc53/projects/2). Please don't hesitate to contribute or create issues, it helps us improve DocsGPT!
### Production Support / Help for Companies:
We're eager to provide personalized assistance when deploying your DocsGPT to a live environment.
<a href ="https://cal.com/arc53/docsgpt-demo-b2b">
<img alt="Let's chat" src="https://cal.com/book-with-cal-dark.svg" />
</a>
[Get a Demo :wave:](https://www.docsgpt.cloud/contact)
[Send Email :email:](mailto:contact@arc53.com?subject=DocsGPT%20support%2Fsolutions)
[Send Email :email:](mailto:support@docsgpt.cloud?subject=DocsGPT%20support%2Fsolutions)
<img src="https://github.com/user-attachments/assets/9a1f21de-7a15-4e42-9424-70d22ba5a913" alt="video-example-of-docs-gpt" width="1000" height="500">
## Roadmap
You can find our roadmap [here](https://github.com/orgs/arc53/projects/2). Please don't hesitate to contribute or create issues, it helps us improve DocsGPT!
## Our Open-Source Models Optimized for DocsGPT:
| Name | Base Model | Requirements (or similar) |
| --------------------------------------------------------------------- | ----------- | ------------------------- |
| [Docsgpt-7b-mistral](https://huggingface.co/Arc53/docsgpt-7b-mistral) | Mistral-7b | 1xA10G gpu |
| [Docsgpt-14b](https://huggingface.co/Arc53/docsgpt-14b) | llama-2-14b | 2xA10 gpu's |
| [Docsgpt-40b-falcon](https://huggingface.co/Arc53/docsgpt-40b-falcon) | falcon-40b | 8xA10G gpu's |
If you don't have enough resources to run it, you can use bitsnbytes to quantize.
## End to End AI Framework for Information Retrieval
![Architecture chart](https://github.com/user-attachments/assets/fc6a7841-ddfc-45e6-b5a0-d05fe648cbe2)
## Useful Links
- :mag: :fire: [Cloud Version](https://app.docsgpt.cloud/)
- :speech_balloon: :tada: [Join our Discord](https://discord.gg/n5BX8dh8rU)
- :books: :sunglasses: [Guides](https://docs.docsgpt.cloud/)
- :couple: [Interested in contributing?](https://github.com/arc53/DocsGPT/blob/main/CONTRIBUTING.md)
- :file_folder: :rocket: [How to use any other documentation](https://docs.docsgpt.cloud/Guides/How-to-train-on-other-documentation)
- :house: :closed_lock_with_key: [How to host it locally (so all data will stay on-premises)](https://docs.docsgpt.cloud/Guides/How-to-use-different-LLM)
## Project Structure
- Application - Flask app (main application).
- Extensions - Chrome extension.
- Scripts - Script that creates similarity search index for other libraries.
- Frontend - Frontend uses <a href="https://vitejs.dev/">Vite</a> and <a href="https://react.dev/">React</a>.
## QuickStart
> [!Note]
> Make sure you have [Docker](https://docs.docker.com/engine/install/) installed
On Mac OS or Linux, write:
A more detailed [Quickstart](https://docs.docsgpt.cloud/quickstart) is available in our documentation
`./setup.sh`
1. **Clone the repository:**
It will install all the dependencies and allow you to download the local model, use OpenAI or use our LLM API.
Otherwise, refer to this Guide for Windows:
1. Download and open this repository with `git clone https://github.com/arc53/DocsGPT.git`
2. Create a `.env` file in your root directory and set the env variables and `VITE_API_STREAMING` to true or false, depending on whether you want streaming answers or not.
It should look like this inside:
```
LLM_NAME=[docsgpt or openai or others]
VITE_API_STREAMING=true
API_KEY=[if LLM_NAME is openai]
```bash
git clone https://github.com/arc53/DocsGPT.git
cd DocsGPT
```
See optional environment variables in the [/.env-template](https://github.com/arc53/DocsGPT/blob/main/.env-template) and [/application/.env_sample](https://github.com/arc53/DocsGPT/blob/main/application/.env_sample) files.
**For macOS and Linux:**
3. Run [./run-with-docker-compose.sh](https://github.com/arc53/DocsGPT/blob/main/run-with-docker-compose.sh).
4. Navigate to http://localhost:5173/.
2. **Run the setup script:**
To stop, just run `Ctrl + C`.
```bash
./setup.sh
```
## Development Environments
This interactive script will guide you through setting up DocsGPT. It offers four options: using the public API, running locally, connecting to a local inference engine, or using a cloud API provider. The script will automatically configure your `.env` file and handle necessary downloads and installations based on your chosen option.
### Spin up Mongo and Redis
**For Windows:**
For development, only two containers are used from [docker-compose.yaml](https://github.com/arc53/DocsGPT/blob/main/docker-compose.yaml) (by deleting all services except for Redis and Mongo).
See file [docker-compose-dev.yaml](./docker-compose-dev.yaml).
2. **Follow the Docker Deployment Guide:**
Run
Please refer to the [Docker Deployment documentation](https://docs.docsgpt.cloud/Deploying/Docker-Deploying) for detailed step-by-step instructions on setting up DocsGPT using Docker.
**Navigate to http://localhost:5173/**
To stop DocsGPT, open a terminal in the `DocsGPT` directory and run:
```bash
docker compose -f deployment/docker-compose.yaml down
```
docker compose -f docker-compose-dev.yaml build
docker compose -f docker-compose-dev.yaml up -d
```
### Run the Backend
(or use the specific `docker compose down` command shown after running `setup.sh`).
> [!Note]
> Make sure you have Python 3.10 or 3.11 installed.
1. Export required environment variables or prepare a `.env` file in the project folder:
- Copy [.env-template](https://github.com/arc53/DocsGPT/blob/main/application/.env-template) and create `.env`.
(check out [`application/core/settings.py`](application/core/settings.py) if you want to see more config options.)
2. (optional) Create a Python virtual environment:
You can follow the [Python official documentation](https://docs.python.org/3/tutorial/venv.html) for virtual environments.
a) On Mac OS and Linux
```commandline
python -m venv venv
. venv/bin/activate
```
b) On Windows
```commandline
python -m venv venv
venv/Scripts/activate
```
3. Download embedding model and save it in the `model/` folder:
You can use the script below, or download it manually from [here](https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip), unzip it and save it in the `model/` folder.
```commandline
wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip
unzip mpnet-base-v2.zip -d model
rm mpnet-base-v2.zip
```
4. Install dependencies for the backend:
```commandline
pip install -r application/requirements.txt
```
5. Run the app using `flask --app application/app.py run --host=0.0.0.0 --port=7091`.
6. Start worker with `celery -A application.app.celery worker -l INFO`.
### Start Frontend
> [!Note]
> Make sure you have Node version 16 or higher.
1. Navigate to the [/frontend](https://github.com/arc53/DocsGPT/tree/main/frontend) folder.
2. Install the required packages `husky` and `vite` (ignore if already installed).
```commandline
npm install husky -g
npm install vite -g
```
3. Install dependencies by running `npm install --include=dev`.
4. Run the app using `npm run dev`.
> For development environment setup instructions, please refer to the [Development Environment Guide](https://docs.docsgpt.cloud/Deploying/Development-Environment).
## Contributing
Please refer to the [CONTRIBUTING.md](CONTRIBUTING.md) file for information about how to get involved. We welcome issues, questions, and pull requests.
## Architecture
![Architecture chart](https://github.com/user-attachments/assets/fc6a7841-ddfc-45e6-b5a0-d05fe648cbe2)
## Project Structure
- Application - Flask app (main application).
- Extensions - Extensions, like react widget or discord bot.
- Frontend - Frontend uses <a href="https://vitejs.dev/">Vite</a> and <a href="https://react.dev/">React</a>.
- Scripts - Miscellaneous scripts.
## Code Of Conduct
We as members, contributors, and leaders, pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. Please refer to the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file for more information about contributing.
## Many Thanks To Our Contributors⚡
<a href="https://github.com/arc53/DocsGPT/graphs/contributors" alt="View Contributors">

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

View File

@@ -6,21 +6,20 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa && \
# Install necessary packages and Python
apt-get update && \
apt-get install -y --no-install-recommends gcc wget unzip libc6-dev python3.11 python3.11-distutils python3.11-venv && \
apt-get install -y --no-install-recommends gcc wget unzip libc6-dev python3.12 python3.12-venv && \
rm -rf /var/lib/apt/lists/*
# Verify Python installation and setup symlink
RUN if [ -f /usr/bin/python3.11 ]; then \
ln -s /usr/bin/python3.11 /usr/bin/python; \
RUN if [ -f /usr/bin/python3.12 ]; then \
ln -s /usr/bin/python3.12 /usr/bin/python; \
else \
echo "Python 3.11 not found"; exit 1; \
echo "Python 3.12 not found"; exit 1; \
fi
# Download and unzip the model
RUN wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip && \
unzip mpnet-base-v2.zip -d model && \
unzip mpnet-base-v2.zip -d models && \
rm mpnet-base-v2.zip
# Install Rust
@@ -33,7 +32,7 @@ RUN apt-get remove --purge -y wget unzip && apt-get autoremove -y && rm -rf /var
COPY requirements.txt .
# Setup Python virtual environment
RUN python3.11 -m venv /venv
RUN python3.12 -m venv /venv
# Activate virtual environment and install Python packages
ENV PATH="/venv/bin:$PATH"
@@ -49,9 +48,8 @@ FROM ubuntu:24.04 as final
RUN apt-get update && \
apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa && \
# Install Python
apt-get update && apt-get install -y --no-install-recommends python3.11 && \
ln -s /usr/bin/python3.11 /usr/bin/python && \
apt-get update && apt-get install -y --no-install-recommends python3.12 && \
ln -s /usr/bin/python3.12 /usr/bin/python && \
rm -rf /var/lib/apt/lists/*
# Set working directory
@@ -63,7 +61,8 @@ RUN groupadd -r appuser && \
# Copy the virtual environment and model from the builder stage
COPY --from=builder /venv /venv
COPY --from=builder /model /app/model
COPY --from=builder /models /app/models
# Copy your application code
COPY . /app/application

View File

@@ -1,14 +1,13 @@
import asyncio
import datetime
import json
import logging
import os
import sys
import traceback
import logging
from bson.dbref import DBRef
from bson.objectid import ObjectId
from flask import Blueprint, current_app, make_response, request, Response
from flask import Blueprint, make_response, request, Response
from flask_restx import fields, Namespace, Resource
@@ -18,7 +17,7 @@ from application.error import bad_request
from application.extensions import api
from application.llm.llm_creator import LLMCreator
from application.retriever.retriever_creator import RetrieverCreator
from application.utils import check_required_fields
from application.utils import check_required_fields, limit_chat_history
logger = logging.getLogger(__name__)
@@ -37,7 +36,7 @@ api.add_namespace(answer_ns)
gpt_model = ""
# to have some kind of default behaviour
if settings.LLM_NAME == "openai":
gpt_model = "gpt-3.5-turbo"
gpt_model = "gpt-4o-mini"
elif settings.LLM_NAME == "anthropic":
gpt_model = "claude-2"
elif settings.LLM_NAME == "groq":
@@ -89,9 +88,6 @@ def get_data_from_api_key(api_key):
if data is None:
raise Exception("Invalid API Key, please generate new key", 401)
if "retriever" not in data:
data["retriever"] = None
if "source" in data and isinstance(data["source"], DBRef):
source_doc = db.dereference(data["source"])
data["source"] = str(source_doc["_id"])
@@ -118,8 +114,27 @@ def is_azure_configured():
)
def save_conversation(conversation_id, question, response, source_log_docs, llm):
if conversation_id is not None and conversation_id != "None":
def save_conversation(
conversation_id, question, response, source_log_docs, tool_calls, llm, index=None
):
if conversation_id is not None and index is not None:
conversations_collection.update_one(
{"_id": ObjectId(conversation_id), f"queries.{index}": {"$exists": True}},
{
"$set": {
f"queries.{index}.prompt": question,
f"queries.{index}.response": response,
f"queries.{index}.sources": source_log_docs,
f"queries.{index}.tool_calls": tool_calls,
}
},
)
##remove following queries from the array
conversations_collection.update_one(
{"_id": ObjectId(conversation_id), f"queries.{index}": {"$exists": True}},
{"$push": {"queries": {"$each": [], "$slice": index + 1}}},
)
elif conversation_id is not None and conversation_id != "None":
conversations_collection.update_one(
{"_id": ObjectId(conversation_id)},
{
@@ -128,6 +143,7 @@ def save_conversation(conversation_id, question, response, source_log_docs, llm)
"prompt": question,
"response": response,
"sources": source_log_docs,
"tool_calls": tool_calls,
}
}
},
@@ -141,17 +157,13 @@ def save_conversation(conversation_id, question, response, source_log_docs, llm)
"role": "assistant",
"content": "Summarise following conversation in no more than 3 "
"words, respond ONLY with the summary, use the same "
"language as the system \n\nUser: "
+ question
+ "\n\n"
+ "AI: "
+ response,
"language as the system",
},
{
"role": "user",
"content": "Summarise following conversation in no more than 3 words, "
"respond ONLY with the summary, use the same language as the "
"system",
"system \n\nUser: " + question + "\n\n" + "AI: " + response,
},
]
@@ -166,6 +178,7 @@ def save_conversation(conversation_id, question, response, source_log_docs, llm)
"prompt": question,
"response": response,
"sources": source_log_docs,
"tool_calls": tool_calls,
}
],
}
@@ -186,12 +199,13 @@ def get_prompt(prompt_id):
def complete_stream(
question, retriever, conversation_id, user_api_key, isNoneDoc=False
question, retriever, conversation_id, user_api_key, isNoneDoc=False, index=None
):
try:
response_full = ""
source_log_docs = []
tool_calls = []
answer = retriever.gen()
sources = retriever.search()
for source in sources:
@@ -200,6 +214,7 @@ def complete_stream(
if len(sources) > 0:
data = json.dumps({"type": "source", "source": sources})
yield f"data: {data}\n\n"
for line in answer:
if "answer" in line:
response_full += str(line["answer"])
@@ -207,6 +222,10 @@ def complete_stream(
yield f"data: {data}\n\n"
elif "source" in line:
source_log_docs.append(line["source"])
elif "tool_calls" in line:
tool_calls = line["tool_calls"]
data = json.dumps({"type": "tool_calls", "tool_calls": tool_calls})
yield f"data: {data}\n\n"
if isNoneDoc:
for doc in source_log_docs:
@@ -217,7 +236,13 @@ def complete_stream(
)
if user_api_key is None:
conversation_id = save_conversation(
conversation_id, question, response_full, source_log_docs, llm
conversation_id,
question,
response_full,
source_log_docs,
tool_calls,
llm,
index,
)
# send data.type = "end" to indicate that the stream has ended as json
data = json.dumps({"type": "id", "id": str(conversation_id)})
@@ -240,13 +265,12 @@ def complete_stream(
data = json.dumps({"type": "end"})
yield f"data: {data}\n\n"
except Exception as e:
print("\033[91merr", str(e), file=sys.stderr)
traceback.print_exc()
logger.error(f"Error in stream: {str(e)}")
logger.error(traceback.format_exc())
data = json.dumps(
{
"type": "error",
"error": "Please try again later. We apologize for any inconvenience.",
"error_exception": str(e),
}
)
yield f"data: {data}\n\n"
@@ -282,6 +306,9 @@ class Stream(Resource):
"isNoneDoc": fields.Boolean(
required=False, description="Flag indicating if no document is used"
),
"index": fields.Integer(
required=False, description="The position where query is to be updated"
),
},
)
@@ -290,19 +317,21 @@ class Stream(Resource):
def post(self):
data = request.get_json()
required_fields = ["question"]
if "index" in data:
required_fields = ["question", "conversation_id"]
missing_fields = check_required_fields(data, required_fields)
if missing_fields:
return missing_fields
try:
question = data["question"]
history = data.get("history", [])
history = json.loads(history)
history = limit_chat_history(
json.loads(data.get("history", [])), gpt_model=gpt_model
)
conversation_id = data.get("conversation_id")
prompt_id = data.get("prompt_id", "default")
index = data.get("index", None)
chunks = int(data.get("chunks", 2))
token_limit = data.get("token_limit", settings.DEFAULT_MAX_HISTORY)
retriever_name = data.get("retriever", "classic")
@@ -324,7 +353,7 @@ class Stream(Resource):
source = {}
user_api_key = None
current_app.logger.info(
logger.info(
f"/stream - request_data: {data}, source: {source}",
extra={"data": json.dumps({"request_data": data, "source": source})},
)
@@ -351,30 +380,27 @@ class Stream(Resource):
conversation_id=conversation_id,
user_api_key=user_api_key,
isNoneDoc=data.get("isNoneDoc"),
index=index,
),
mimetype="text/event-stream",
)
except ValueError:
message = "Malformed request body"
print("\033[91merr", str(message), file=sys.stderr)
logger.error(f"/stream - error: {message}")
return Response(
error_stream_generate(message),
status=400,
mimetype="text/event-stream",
)
except Exception as e:
current_app.logger.error(
logger.error(
f"/stream - error: {str(e)} - traceback: {traceback.format_exc()}",
extra={"error": str(e), "traceback": traceback.format_exc()},
)
message = e.args[0]
status_code = 400
# Custom exceptions with two arguments, index 1 as status code
if len(e.args) >= 2:
status_code = e.args[1]
return Response(
error_stream_generate(message),
error_stream_generate("Unknown error occurred"),
status=status_code,
mimetype="text/event-stream",
)
@@ -421,14 +447,16 @@ class Answer(Resource):
@api.doc(description="Provide an answer based on the question and retriever")
def post(self):
data = request.get_json()
required_fields = ["question"]
required_fields = ["question"]
missing_fields = check_required_fields(data, required_fields)
if missing_fields:
return missing_fields
try:
question = data["question"]
history = data.get("history", [])
history = limit_chat_history(
json.loads(data.get("history", [])), gpt_model=gpt_model
)
conversation_id = data.get("conversation_id")
prompt_id = data.get("prompt_id", "default")
chunks = int(data.get("chunks", 2))
@@ -452,7 +480,7 @@ class Answer(Resource):
prompt = get_prompt(prompt_id)
current_app.logger.info(
logger.info(
f"/api/answer - request_data: {data}, source: {source}",
extra={"data": json.dumps({"request_data": data, "source": source})},
)
@@ -469,13 +497,16 @@ class Answer(Resource):
user_api_key=user_api_key,
)
source_log_docs = []
response_full = ""
source_log_docs = []
tool_calls = []
for line in retriever.gen():
if "source" in line:
source_log_docs.append(line["source"])
elif "answer" in line:
response_full += line["answer"]
elif "tool_calls" in line:
tool_calls.append(line["tool_calls"])
if data.get("isNoneDoc"):
for doc in source_log_docs:
@@ -488,7 +519,12 @@ class Answer(Resource):
result = {"answer": response_full, "sources": source_log_docs}
result["conversation_id"] = str(
save_conversation(
conversation_id, question, response_full, source_log_docs, llm
conversation_id,
question,
response_full,
source_log_docs,
tool_calls,
llm,
)
)
retriever_params = retriever.get_params()
@@ -507,7 +543,7 @@ class Answer(Resource):
)
except Exception as e:
current_app.logger.error(
logger.error(
f"/api/answer - error: {str(e)} - traceback: {traceback.format_exc()}",
extra={"error": str(e), "traceback": traceback.format_exc()},
)
@@ -572,7 +608,7 @@ class Search(Resource):
source = {}
user_api_key = None
current_app.logger.info(
logger.info(
f"/api/answer - request_data: {data}, source: {source}",
extra={"data": json.dumps({"request_data": data, "source": source})},
)
@@ -610,7 +646,7 @@ class Search(Resource):
doc["source"] = "None"
except Exception as e:
current_app.logger.error(
logger.error(
f"/api/search - error: {str(e)} - traceback: {traceback.format_exc()}",
extra={"error": str(e), "traceback": traceback.format_exc()},
)

File diff suppressed because it is too large Load Diff

View File

@@ -2,22 +2,22 @@ import platform
import dotenv
from flask import Flask, redirect, request
from application.api.answer.routes import answer
from application.api.internal.routes import internal
from application.api.user.routes import user
from application.celery_init import celery
from application.core.logging_config import setup_logging
from application.core.settings import settings
from application.extensions import api
setup_logging()
from application.api.answer.routes import answer # noqa: E402
from application.api.internal.routes import internal # noqa: E402
from application.api.user.routes import user # noqa: E402
from application.celery_init import celery # noqa: E402
from application.core.settings import settings # noqa: E402
from application.extensions import api # noqa: E402
if platform.system() == "Windows":
import pathlib
pathlib.PosixPath = pathlib.WindowsPath
dotenv.load_dotenv()
setup_logging()
app = Flask(__name__)
app.register_blueprint(user)

View File

@@ -1,8 +1,10 @@
import redis
import time
import json
import logging
import time
from threading import Lock
import redis
from application.core.settings import settings
from application.utils import get_hash
@@ -11,41 +13,47 @@ logger = logging.getLogger(__name__)
_redis_instance = None
_instance_lock = Lock()
def get_redis_instance():
global _redis_instance
if _redis_instance is None:
with _instance_lock:
if _redis_instance is None:
try:
_redis_instance = redis.Redis.from_url(settings.CACHE_REDIS_URL, socket_connect_timeout=2)
_redis_instance = redis.Redis.from_url(
settings.CACHE_REDIS_URL, socket_connect_timeout=2
)
except redis.ConnectionError as e:
logger.error(f"Redis connection error: {e}")
_redis_instance = None
return _redis_instance
def gen_cache_key(*messages, model="docgpt"):
def gen_cache_key(messages, model="docgpt", tools=None):
if not all(isinstance(msg, dict) for msg in messages):
raise ValueError("All messages must be dictionaries.")
messages_str = json.dumps(list(messages), sort_keys=True)
combined = f"{model}_{messages_str}"
messages_str = json.dumps(messages)
tools_str = json.dumps(str(tools)) if tools else ""
combined = f"{model}_{messages_str}_{tools_str}"
cache_key = get_hash(combined)
return cache_key
def gen_cache(func):
def wrapper(self, model, messages, *args, **kwargs):
def wrapper(self, model, messages, stream, tools=None, *args, **kwargs):
try:
cache_key = gen_cache_key(*messages)
cache_key = gen_cache_key(messages, model, tools)
redis_client = get_redis_instance()
if redis_client:
try:
cached_response = redis_client.get(cache_key)
if cached_response:
return cached_response.decode('utf-8')
return cached_response.decode("utf-8")
except redis.ConnectionError as e:
logger.error(f"Redis connection error: {e}")
result = func(self, model, messages, *args, **kwargs)
if redis_client:
result = func(self, model, messages, stream, tools, *args, **kwargs)
if redis_client and isinstance(result, str):
try:
redis_client.set(cache_key, result, ex=1800)
except redis.ConnectionError as e:
@@ -55,20 +63,22 @@ def gen_cache(func):
except ValueError as e:
logger.error(e)
return "Error: No user message found in the conversation to generate a cache key."
return wrapper
def stream_cache(func):
def wrapper(self, model, messages, stream, *args, **kwargs):
cache_key = gen_cache_key(*messages)
def wrapper(self, model, messages, stream, tools=None, *args, **kwargs):
cache_key = gen_cache_key(messages, model, tools)
logger.info(f"Stream cache key: {cache_key}")
redis_client = get_redis_instance()
if redis_client:
try:
cached_response = redis_client.get(cache_key)
if cached_response:
logger.info(f"Cache hit for stream key: {cache_key}")
cached_response = json.loads(cached_response.decode('utf-8'))
cached_response = json.loads(cached_response.decode("utf-8"))
for chunk in cached_response:
yield chunk
time.sleep(0.03)
@@ -76,18 +86,18 @@ def stream_cache(func):
except redis.ConnectionError as e:
logger.error(f"Redis connection error: {e}")
result = func(self, model, messages, stream, *args, **kwargs)
result = func(self, model, messages, stream, tools=tools, *args, **kwargs)
stream_cache_data = []
for chunk in result:
stream_cache_data.append(chunk)
yield chunk
if redis_client:
try:
redis_client.set(cache_key, json.dumps(stream_cache_data), ex=1800)
logger.info(f"Stream cache saved for key: {cache_key}")
except redis.ConnectionError as e:
logger.error(f"Redis connection error: {e}")
return wrapper
return wrapper

View File

@@ -2,14 +2,22 @@ from celery import Celery
from application.core.settings import settings
from celery.signals import setup_logging
def make_celery(app_name=__name__):
celery = Celery(app_name, broker=settings.CELERY_BROKER_URL, backend=settings.CELERY_RESULT_BACKEND)
celery = Celery(
app_name,
broker=settings.CELERY_BROKER_URL,
backend=settings.CELERY_RESULT_BACKEND,
)
celery.conf.update(settings)
return celery
@setup_logging.connect
def config_loggers(*args, **kwargs):
from application.core.logging_config import setup_logging
setup_logging()
celery = make_celery()

View File

@@ -1,25 +1,37 @@
import os
from pathlib import Path
from typing import Optional
import os
from pydantic_settings import BaseSettings
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
current_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
class Settings(BaseSettings):
LLM_NAME: str = "docsgpt"
MODEL_NAME: Optional[str] = None # if LLM_NAME is openai, MODEL_NAME can be gpt-4 or gpt-3.5-turbo
MODEL_NAME: Optional[str] = (
None # if LLM_NAME is openai, MODEL_NAME can be gpt-4 or gpt-3.5-turbo
)
EMBEDDINGS_NAME: str = "huggingface_sentence-transformers/all-mpnet-base-v2"
CELERY_BROKER_URL: str = "redis://localhost:6379/0"
CELERY_RESULT_BACKEND: str = "redis://localhost:6379/1"
MONGO_URI: str = "mongodb://localhost:27017/docsgpt"
MODEL_PATH: str = os.path.join(current_dir, "models/docsgpt-7b-f16.gguf")
DEFAULT_MAX_HISTORY: int = 150
MODEL_TOKEN_LIMITS: dict = {"gpt-3.5-turbo": 4096, "claude-2": 1e5}
MODEL_TOKEN_LIMITS: dict = {
"gpt-4o-mini": 128000,
"gpt-3.5-turbo": 4096,
"claude-2": 1e5,
"gemini-2.0-flash-exp": 1e6,
}
UPLOAD_FOLDER: str = "inputs"
VECTOR_STORE: str = "faiss" # "faiss" or "elasticsearch" or "qdrant" or "milvus" or "lancedb"
RETRIEVERS_ENABLED: list = ["classic_rag", "duckduck_search"] # also brave_search
PARSE_PDF_AS_IMAGE: bool = False
VECTOR_STORE: str = (
"faiss" # "faiss" or "elasticsearch" or "qdrant" or "milvus" or "lancedb"
)
RETRIEVERS_ENABLED: list = ["classic_rag", "duckduck_search"] # also brave_search
# LLM Cache
CACHE_REDIS_URL: str = "redis://localhost:6379/2"
@@ -27,12 +39,18 @@ class Settings(BaseSettings):
API_URL: str = "http://localhost:7091" # backend url for celery worker
API_KEY: Optional[str] = None # LLM api key
EMBEDDINGS_KEY: Optional[str] = None # api key for embeddings (if using openai, just copy API_KEY)
EMBEDDINGS_KEY: Optional[str] = (
None # api key for embeddings (if using openai, just copy API_KEY)
)
OPENAI_API_BASE: Optional[str] = None # azure openai api base url
OPENAI_API_VERSION: Optional[str] = None # azure openai api version
AZURE_DEPLOYMENT_NAME: Optional[str] = None # azure deployment name for answering
AZURE_EMBEDDINGS_DEPLOYMENT_NAME: Optional[str] = None # azure deployment name for embeddings
OPENAI_BASE_URL: Optional[str] = None # openai base url for open ai compatable models
AZURE_EMBEDDINGS_DEPLOYMENT_NAME: Optional[str] = (
None # azure deployment name for embeddings
)
OPENAI_BASE_URL: Optional[str] = (
None # openai base url for open ai compatable models
)
# elasticsearch
ELASTIC_CLOUD_ID: Optional[str] = None # cloud id for elasticsearch
@@ -67,12 +85,14 @@ class Settings(BaseSettings):
# Milvus vectorstore config
MILVUS_COLLECTION_NAME: Optional[str] = "docsgpt"
MILVUS_URI: Optional[str] = "./milvus_local.db" # milvus lite version as default
MILVUS_URI: Optional[str] = "./milvus_local.db" # milvus lite version as default
MILVUS_TOKEN: Optional[str] = ""
# LanceDB vectorstore config
LANCEDB_PATH: str = "/tmp/lancedb" # Path where LanceDB stores its local data
LANCEDB_TABLE_NAME: Optional[str] = "docsgpts" # Name of the table to use for storing vectors
LANCEDB_TABLE_NAME: Optional[str] = (
"docsgpts" # Name of the table to use for storing vectors
)
BRAVE_SEARCH_API_KEY: Optional[str] = None
FLASK_DEBUG_MODE: bool = False

View File

@@ -17,7 +17,7 @@ class AnthropicLLM(BaseLLM):
self.AI_PROMPT = AI_PROMPT
def _raw_gen(
self, baseself, model, messages, stream=False, max_tokens=300, **kwargs
self, baseself, model, messages, stream=False, tools=None, max_tokens=300, **kwargs
):
context = messages[0]["content"]
user_question = messages[-1]["content"]
@@ -34,7 +34,7 @@ class AnthropicLLM(BaseLLM):
return completion.completion
def _raw_gen_stream(
self, baseself, model, messages, stream=True, max_tokens=300, **kwargs
self, baseself, model, messages, stream=True, tools=None, max_tokens=300, **kwargs
):
context = messages[0]["content"]
user_question = messages[-1]["content"]

View File

@@ -1,6 +1,7 @@
from abc import ABC, abstractmethod
from application.cache import gen_cache, stream_cache
from application.usage import gen_token_usage, stream_token_usage
from application.cache import stream_cache, gen_cache
class BaseLLM(ABC):
@@ -13,17 +14,43 @@ class BaseLLM(ABC):
return method(self, *args, **kwargs)
@abstractmethod
def _raw_gen(self, model, messages, stream, *args, **kwargs):
def _raw_gen(self, model, messages, stream, tools, *args, **kwargs):
pass
def gen(self, model, messages, stream=False, *args, **kwargs):
def gen(self, model, messages, stream=False, tools=None, *args, **kwargs):
decorators = [gen_token_usage, gen_cache]
return self._apply_decorator(self._raw_gen, decorators=decorators, model=model, messages=messages, stream=stream, *args, **kwargs)
return self._apply_decorator(
self._raw_gen,
decorators=decorators,
model=model,
messages=messages,
stream=stream,
tools=tools,
*args,
**kwargs
)
@abstractmethod
def _raw_gen_stream(self, model, messages, stream, *args, **kwargs):
pass
def gen_stream(self, model, messages, stream=True, *args, **kwargs):
def gen_stream(self, model, messages, stream=True, tools=None, *args, **kwargs):
decorators = [stream_cache, stream_token_usage]
return self._apply_decorator(self._raw_gen_stream, decorators=decorators, model=model, messages=messages, stream=stream, *args, **kwargs)
return self._apply_decorator(
self._raw_gen_stream,
decorators=decorators,
model=model,
messages=messages,
stream=stream,
tools=tools,
*args,
**kwargs
)
def supports_tools(self):
return hasattr(self, "_supports_tools") and callable(
getattr(self, "_supports_tools")
)
def _supports_tools(self):
raise NotImplementedError("Subclass must implement _supports_tools method")

View File

@@ -9,35 +9,25 @@ class DocsGPTAPILLM(BaseLLM):
super().__init__(*args, **kwargs)
self.api_key = api_key
self.user_api_key = user_api_key
self.endpoint = "https://llm.docsgpt.co.uk"
self.endpoint = "https://llm.arc53.com"
def _raw_gen(self, baseself, model, messages, stream=False, *args, **kwargs):
context = messages[0]["content"]
user_question = messages[-1]["content"]
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
response = requests.post(
f"{self.endpoint}/answer", json={"prompt": prompt, "max_new_tokens": 30}
f"{self.endpoint}/answer", json={"messages": messages, "max_new_tokens": 30}
)
response_clean = response.json()["a"].replace("###", "")
return response_clean
def _raw_gen_stream(self, baseself, model, messages, stream=True, *args, **kwargs):
context = messages[0]["content"]
user_question = messages[-1]["content"]
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
# send prompt to endpoint /stream
response = requests.post(
f"{self.endpoint}/stream",
json={"prompt": prompt, "max_new_tokens": 256},
json={"messages": messages, "max_new_tokens": 256},
stream=True,
)
for line in response.iter_lines():
if line:
# data = json.loads(line)
data_str = line.decode("utf-8")
if data_str.startswith("data: "):
data = json.loads(data_str[6:])

View File

@@ -1,21 +1,95 @@
from google import genai
from google.genai import types
from application.llm.base import BaseLLM
class GoogleLLM(BaseLLM):
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.api_key = api_key
self.user_api_key = user_api_key
def _clean_messages_google(self, messages):
return [
{
"role": "model" if message["role"] == "system" else message["role"],
"parts": [message["content"]],
}
for message in messages[1:]
]
cleaned_messages = []
for message in messages:
role = message.get("role")
content = message.get("content")
if role == "assistant":
role = "model"
parts = []
if role and content is not None:
if isinstance(content, str):
parts = [types.Part.from_text(content)]
elif isinstance(content, list):
for item in content:
if "text" in item:
parts.append(types.Part.from_text(item["text"]))
elif "function_call" in item:
parts.append(
types.Part.from_function_call(
name=item["function_call"]["name"],
args=item["function_call"]["args"],
)
)
elif "function_response" in item:
parts.append(
types.Part.from_function_response(
name=item["function_response"]["name"],
response=item["function_response"]["response"],
)
)
else:
raise ValueError(
f"Unexpected content dictionary format:{item}"
)
else:
raise ValueError(f"Unexpected content type: {type(content)}")
cleaned_messages.append(types.Content(role=role, parts=parts))
return cleaned_messages
def _clean_tools_format(self, tools_list):
genai_tools = []
for tool_data in tools_list:
if tool_data["type"] == "function":
function = tool_data["function"]
parameters = function["parameters"]
properties = parameters.get("properties", {})
if properties:
genai_function = dict(
name=function["name"],
description=function["description"],
parameters={
"type": "OBJECT",
"properties": {
k: {
**v,
"type": v["type"].upper() if v["type"] else None,
}
for k, v in properties.items()
},
"required": (
parameters["required"]
if "required" in parameters
else []
),
},
)
else:
genai_function = dict(
name=function["name"],
description=function["description"],
)
genai_tool = types.Tool(function_declarations=[genai_function])
genai_tools.append(genai_tool)
return genai_tools
def _raw_gen(
self,
@@ -23,13 +97,32 @@ class GoogleLLM(BaseLLM):
model,
messages,
stream=False,
**kwargs
):
import google.generativeai as genai
genai.configure(api_key=self.api_key)
model = genai.GenerativeModel(model, system_instruction=messages[0]["content"])
response = model.generate_content(self._clean_messages_google(messages))
return response.text
tools=None,
formatting="openai",
**kwargs,
):
client = genai.Client(api_key=self.api_key)
if formatting == "openai":
messages = self._clean_messages_google(messages)
config = types.GenerateContentConfig()
if messages[0].role == "system":
config.system_instruction = messages[0].parts[0].text
messages = messages[1:]
if tools:
cleaned_tools = self._clean_tools_format(tools)
config.tools = cleaned_tools
response = client.models.generate_content(
model=model,
contents=messages,
config=config,
)
return response
else:
response = client.models.generate_content(
model=model, contents=messages, config=config
)
return response.text
def _raw_gen_stream(
self,
@@ -37,12 +130,30 @@ class GoogleLLM(BaseLLM):
model,
messages,
stream=True,
**kwargs
):
import google.generativeai as genai
genai.configure(api_key=self.api_key)
model = genai.GenerativeModel(model, system_instruction=messages[0]["content"])
response = model.generate_content(self._clean_messages_google(messages), stream=True)
for line in response:
if line.text is not None:
yield line.text
tools=None,
formatting="openai",
**kwargs,
):
client = genai.Client(api_key=self.api_key)
if formatting == "openai":
messages = self._clean_messages_google(messages)
config = types.GenerateContentConfig()
if messages[0].role == "system":
config.system_instruction = messages[0].parts[0].text
messages = messages[1:]
if tools:
cleaned_tools = self._clean_tools_format(tools)
config.tools = cleaned_tools
response = client.models.generate_content_stream(
model=model,
contents=messages,
config=config,
)
for chunk in response:
if chunk.text is not None:
yield chunk.text
def _supports_tools(self):
return True

View File

@@ -1,45 +1,32 @@
from application.llm.base import BaseLLM
from openai import OpenAI
class GroqLLM(BaseLLM):
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
from openai import OpenAI
super().__init__(*args, **kwargs)
self.client = OpenAI(api_key=api_key, base_url="https://api.groq.com/openai/v1")
self.api_key = api_key
self.user_api_key = user_api_key
def _raw_gen(
self,
baseself,
model,
messages,
stream=False,
**kwargs
):
response = self.client.chat.completions.create(
model=model, messages=messages, stream=stream, **kwargs
)
return response.choices[0].message.content
def _raw_gen(self, baseself, model, messages, stream=False, tools=None, **kwargs):
if tools:
response = self.client.chat.completions.create(
model=model, messages=messages, stream=stream, tools=tools, **kwargs
)
return response.choices[0]
else:
response = self.client.chat.completions.create(
model=model, messages=messages, stream=stream, **kwargs
)
return response.choices[0].message.content
def _raw_gen_stream(
self,
baseself,
model,
messages,
stream=True,
**kwargs
):
self, baseself, model, messages, stream=True, tools=None, **kwargs
):
response = self.client.chat.completions.create(
model=model, messages=messages, stream=stream, **kwargs
)
for line in response:
# import sys
# print(line.choices[0].delta.content, file=sys.stderr)
if line.choices[0].delta.content is not None:
yield line.choices[0].delta.content

View File

@@ -1,6 +1,7 @@
from application.llm.base import BaseLLM
from application.core.settings import settings
import json
from application.core.settings import settings
from application.llm.base import BaseLLM
class OpenAILLM(BaseLLM):
@@ -10,29 +11,95 @@ class OpenAILLM(BaseLLM):
super().__init__(*args, **kwargs)
if settings.OPENAI_BASE_URL:
self.client = OpenAI(
api_key=api_key,
base_url=settings.OPENAI_BASE_URL
)
self.client = OpenAI(api_key=api_key, base_url=settings.OPENAI_BASE_URL)
else:
self.client = OpenAI(api_key=api_key)
self.api_key = api_key
self.user_api_key = user_api_key
def _clean_messages_openai(self, messages):
cleaned_messages = []
for message in messages:
role = message.get("role")
content = message.get("content")
if role == "model":
role = "assistant"
if role and content is not None:
if isinstance(content, str):
cleaned_messages.append({"role": role, "content": content})
elif isinstance(content, list):
for item in content:
if "text" in item:
cleaned_messages.append(
{"role": role, "content": item["text"]}
)
elif "function_call" in item:
tool_call = {
"id": item["function_call"]["call_id"],
"type": "function",
"function": {
"name": item["function_call"]["name"],
"arguments": json.dumps(
item["function_call"]["args"]
),
},
}
cleaned_messages.append(
{
"role": "assistant",
"content": None,
"tool_calls": [tool_call],
}
)
elif "function_response" in item:
cleaned_messages.append(
{
"role": "tool",
"tool_call_id": item["function_response"][
"call_id"
],
"content": json.dumps(
item["function_response"]["response"]["result"]
),
}
)
else:
raise ValueError(
f"Unexpected content dictionary format: {item}"
)
else:
raise ValueError(f"Unexpected content type: {type(content)}")
return cleaned_messages
def _raw_gen(
self,
baseself,
model,
messages,
stream=False,
tools=None,
engine=settings.AZURE_DEPLOYMENT_NAME,
**kwargs
):
response = self.client.chat.completions.create(
model=model, messages=messages, stream=stream, **kwargs
)
return response.choices[0].message.content
**kwargs,
):
messages = self._clean_messages_openai(messages)
print(messages)
if tools:
response = self.client.chat.completions.create(
model=model,
messages=messages,
stream=stream,
tools=tools,
**kwargs,
)
return response.choices[0]
else:
response = self.client.chat.completions.create(
model=model, messages=messages, stream=stream, **kwargs
)
return response.choices[0].message.content
def _raw_gen_stream(
self,
@@ -40,19 +107,22 @@ class OpenAILLM(BaseLLM):
model,
messages,
stream=True,
tools=None,
engine=settings.AZURE_DEPLOYMENT_NAME,
**kwargs
):
**kwargs,
):
messages = self._clean_messages_openai(messages)
response = self.client.chat.completions.create(
model=model, messages=messages, stream=stream, **kwargs
)
for line in response:
# import sys
# print(line.choices[0].delta.content, file=sys.stderr)
if line.choices[0].delta.content is not None:
yield line.choices[0].delta.content
def _supports_tools(self):
return True
class AzureOpenAILLM(OpenAILLM):

View File

@@ -76,7 +76,7 @@ class SagemakerAPILLM(BaseLLM):
self.endpoint = settings.SAGEMAKER_ENDPOINT
self.runtime = runtime
def _raw_gen(self, baseself, model, messages, stream=False, **kwargs):
def _raw_gen(self, baseself, model, messages, stream=False, tools=None, **kwargs):
context = messages[0]["content"]
user_question = messages[-1]["content"]
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"
@@ -105,7 +105,7 @@ class SagemakerAPILLM(BaseLLM):
print(result[0]["generated_text"], file=sys.stderr)
return result[0]["generated_text"][len(prompt) :]
def _raw_gen_stream(self, baseself, model, messages, stream=True, **kwargs):
def _raw_gen_stream(self, baseself, model, messages, stream=True, tools=None, **kwargs):
context = messages[0]["content"]
user_question = messages[-1]["content"]
prompt = f"### Instruction \n {user_question} \n ### Context \n {context} \n ### Answer \n"

View File

@@ -0,0 +1,118 @@
import re
from typing import List, Tuple
import logging
from application.parser.schema.base import Document
from application.utils import get_encoding
logger = logging.getLogger(__name__)
class Chunker:
def __init__(
self,
chunking_strategy: str = "classic_chunk",
max_tokens: int = 2000,
min_tokens: int = 150,
duplicate_headers: bool = False,
):
if chunking_strategy not in ["classic_chunk"]:
raise ValueError(f"Unsupported chunking strategy: {chunking_strategy}")
self.chunking_strategy = chunking_strategy
self.max_tokens = max_tokens
self.min_tokens = min_tokens
self.duplicate_headers = duplicate_headers
self.encoding = get_encoding()
def separate_header_and_body(self, text: str) -> Tuple[str, str]:
header_pattern = r"^(.*?\n){3}"
match = re.match(header_pattern, text)
if match:
header = match.group(0)
body = text[len(header):]
else:
header, body = "", text # No header, treat entire text as body
return header, body
def combine_documents(self, doc: Document, next_doc: Document) -> Document:
combined_text = doc.text + " " + next_doc.text
combined_token_count = len(self.encoding.encode(combined_text))
new_doc = Document(
text=combined_text,
doc_id=doc.doc_id,
embedding=doc.embedding,
extra_info={**(doc.extra_info or {}), "token_count": combined_token_count}
)
return new_doc
def split_document(self, doc: Document) -> List[Document]:
split_docs = []
header, body = self.separate_header_and_body(doc.text)
header_tokens = self.encoding.encode(header) if header else []
body_tokens = self.encoding.encode(body)
current_position = 0
part_index = 0
while current_position < len(body_tokens):
end_position = current_position + self.max_tokens - len(header_tokens)
chunk_tokens = (header_tokens + body_tokens[current_position:end_position]
if self.duplicate_headers or part_index == 0 else body_tokens[current_position:end_position])
chunk_text = self.encoding.decode(chunk_tokens)
new_doc = Document(
text=chunk_text,
doc_id=f"{doc.doc_id}-{part_index}",
embedding=doc.embedding,
extra_info={**(doc.extra_info or {}), "token_count": len(chunk_tokens)}
)
split_docs.append(new_doc)
current_position = end_position
part_index += 1
header_tokens = []
return split_docs
def classic_chunk(self, documents: List[Document]) -> List[Document]:
processed_docs = []
i = 0
while i < len(documents):
doc = documents[i]
tokens = self.encoding.encode(doc.text)
token_count = len(tokens)
if self.min_tokens <= token_count <= self.max_tokens:
doc.extra_info = doc.extra_info or {}
doc.extra_info["token_count"] = token_count
processed_docs.append(doc)
i += 1
elif token_count < self.min_tokens:
if i + 1 < len(documents):
next_doc = documents[i + 1]
next_tokens = self.encoding.encode(next_doc.text)
if token_count + len(next_tokens) <= self.max_tokens:
# Combine small documents
combined_doc = self.combine_documents(doc, next_doc)
processed_docs.append(combined_doc)
i += 2
else:
# Keep the small document as is if adding next_doc would exceed max_tokens
doc.extra_info = doc.extra_info or {}
doc.extra_info["token_count"] = token_count
processed_docs.append(doc)
i += 1
else:
# No next document to combine with; add the small document as is
doc.extra_info = doc.extra_info or {}
doc.extra_info["token_count"] = token_count
processed_docs.append(doc)
i += 1
else:
# Split large documents
processed_docs.extend(self.split_document(doc))
i += 1
return processed_docs
def chunk(
self,
documents: List[Document]
) -> List[Document]:
if self.chunking_strategy == "classic_chunk":
return self.classic_chunk(documents)
else:
raise ValueError("Unsupported chunking strategy")

View File

@@ -0,0 +1,86 @@
import os
import logging
from retry import retry
from tqdm import tqdm
from application.core.settings import settings
from application.vectorstore.vector_creator import VectorCreator
@retry(tries=10, delay=60)
def add_text_to_store_with_retry(store, doc, source_id):
"""
Add a document's text and metadata to the vector store with retry logic.
Args:
store: The vector store object.
doc: The document to be added.
source_id: Unique identifier for the source.
"""
try:
doc.metadata["source_id"] = str(source_id)
store.add_texts([doc.page_content], metadatas=[doc.metadata])
except Exception as e:
logging.error(f"Failed to add document with retry: {e}")
raise
def embed_and_store_documents(docs, folder_name, source_id, task_status):
"""
Embeds documents and stores them in a vector store.
Args:
docs (list): List of documents to be embedded and stored.
folder_name (str): Directory to save the vector store.
source_id (str): Unique identifier for the source.
task_status: Task state manager for progress updates.
Returns:
None
"""
# Ensure the folder exists
if not os.path.exists(folder_name):
os.makedirs(folder_name)
# Initialize vector store
if settings.VECTOR_STORE == "faiss":
docs_init = [docs.pop(0)]
store = VectorCreator.create_vectorstore(
settings.VECTOR_STORE,
docs_init=docs_init,
source_id=folder_name,
embeddings_key=os.getenv("EMBEDDINGS_KEY"),
)
else:
store = VectorCreator.create_vectorstore(
settings.VECTOR_STORE,
source_id=source_id,
embeddings_key=os.getenv("EMBEDDINGS_KEY"),
)
store.delete_index()
total_docs = len(docs)
# Process and embed documents
for idx, doc in tqdm(
enumerate(docs),
desc="Embedding 🦖",
unit="docs",
total=total_docs,
bar_format="{l_bar}{bar}| Time Left: {remaining}",
):
try:
# Update task status for progress tracking
progress = int(((idx + 1) / total_docs) * 100)
task_status.update_state(state="PROGRESS", meta={"current": progress})
# Add document to vector store
add_text_to_store_with_retry(store, doc, source_id)
except Exception as e:
logging.error(f"Error embedding document {idx}: {e}")
logging.info(f"Saving progress at document {idx} out of {total_docs}")
store.save_local(folder_name)
break
# Save the vector store
if settings.VECTOR_STORE == "faiss":
store.save_local(folder_name)
logging.info("Vector store saved successfully.")

View File

@@ -13,6 +13,7 @@ from application.parser.file.rst_parser import RstParser
from application.parser.file.tabular_parser import PandasCSVParser,ExcelParser
from application.parser.file.json_parser import JSONParser
from application.parser.file.pptx_parser import PPTXParser
from application.parser.file.image_parser import ImageParser
from application.parser.schema.base import Document
DEFAULT_FILE_EXTRACTOR: Dict[str, BaseParser] = {
@@ -27,6 +28,9 @@ DEFAULT_FILE_EXTRACTOR: Dict[str, BaseParser] = {
".mdx": MarkdownParser(),
".json":JSONParser(),
".pptx":PPTXParser(),
".png": ImageParser(),
".jpg": ImageParser(),
".jpeg": ImageParser(),
}

View File

@@ -7,7 +7,8 @@ from pathlib import Path
from typing import Dict
from application.parser.file.base_parser import BaseParser
from application.core.settings import settings
import requests
class PDFParser(BaseParser):
"""PDF parser."""
@@ -18,22 +19,32 @@ class PDFParser(BaseParser):
def parse_file(self, file: Path, errors: str = "ignore") -> str:
"""Parse file."""
if settings.PARSE_PDF_AS_IMAGE:
doc2md_service = "https://llm.arc53.com/doc2md"
# alternatively you can use local vision capable LLM
with open(file, "rb") as file_loaded:
files = {'file': file_loaded}
response = requests.post(doc2md_service, files=files)
data = response.json()["markdown"]
return data
try:
import PyPDF2
from pypdf import PdfReader
except ImportError:
raise ValueError("PyPDF2 is required to read PDF files.")
raise ValueError("pypdf is required to read PDF files.")
text_list = []
with open(file, "rb") as fp:
# Create a PDF object
pdf = PyPDF2.PdfReader(fp)
pdf = PdfReader(fp)
# Get the number of pages in the PDF document
num_pages = len(pdf.pages)
# Iterate over every page
for page in range(num_pages):
for page_index in range(num_pages):
# Extract the text from the page
page_text = pdf.pages[page].extract_text()
page = pdf.pages[page_index]
page_text = page.extract_text()
text_list.append(page_text)
text = "\n".join(text_list)
@@ -56,4 +67,4 @@ class DocxParser(BaseParser):
text = docx2txt.process(file)
return text
return text

View File

@@ -0,0 +1,27 @@
"""Image parser.
Contains parser for .png, .jpg, .jpeg files.
"""
from pathlib import Path
import requests
from typing import Dict, Union
from application.parser.file.base_parser import BaseParser
class ImageParser(BaseParser):
"""Image parser."""
def _init_parser(self) -> Dict:
"""Init parser."""
return {}
def parse_file(self, file: Path, errors: str = "ignore") -> Union[str, list[str]]:
doc2md_service = "https://llm.arc53.com/doc2md"
# alternatively you can use local vision capable LLM
with open(file, "rb") as file_loaded:
files = {'file': file_loaded}
response = requests.post(doc2md_service, files=files)
data = response.json()["markdown"]
return data

View File

@@ -91,6 +91,25 @@ class RstParser(BaseParser):
]
return rst_tups
def chunk_by_token_count(self, text: str, max_tokens: int = 100) -> List[str]:
"""Chunk text by token count."""
avg_token_length = 5
chunk_size = max_tokens * avg_token_length
chunks = []
for i in range(0, len(text), chunk_size):
chunk = text[i:i+chunk_size]
if i + chunk_size < len(text):
last_space = chunk.rfind(' ')
if last_space != -1:
chunk = chunk[:last_space]
chunks.append(chunk.strip())
return chunks
def remove_images(self, content: str) -> str:
pattern = r"\.\. image:: (.*)"
content = re.sub(pattern, "", content)
@@ -136,7 +155,7 @@ class RstParser(BaseParser):
return {}
def parse_tups(
self, filepath: Path, errors: str = "ignore"
self, filepath: Path, errors: str = "ignore",max_tokens: Optional[int] = 1000
) -> List[Tuple[Optional[str], str]]:
"""Parse file into tuples."""
with open(filepath, "r") as f:
@@ -156,6 +175,15 @@ class RstParser(BaseParser):
rst_tups = self.remove_whitespaces_excess(rst_tups)
if self._remove_characters_excess:
rst_tups = self.remove_characters_excess(rst_tups)
# Apply chunking if max_tokens is provided
if max_tokens is not None:
chunked_tups = []
for header, text in rst_tups:
chunks = self.chunk_by_token_count(text, max_tokens)
for idx, chunk in enumerate(chunks):
chunked_tups.append((f"{header} - Chunk {idx + 1}", chunk))
return chunked_tups
return rst_tups
def parse_file(

View File

@@ -1,66 +0,0 @@
import os
import javalang
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.java'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, "r") as file:
java_code = file.read()
methods = {}
tree = javalang.parse.parse(java_code)
for _, node in tree.filter(javalang.tree.MethodDeclaration):
method_name = node.name
start_line = node.position.line - 1
end_line = start_line
brace_count = 0
for line in java_code.splitlines()[start_line:]:
end_line += 1
brace_count += line.count("{") - line.count("}")
if brace_count == 0:
break
method_source_code = "\n".join(java_code.splitlines()[start_line:end_line])
methods[method_name] = method_source_code
return methods
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = javalang.parse.parse(source_code)
for class_decl in tree.types:
class_name = class_decl.name
declarations = []
methods = []
for field_decl in class_decl.fields:
field_name = field_decl.declarators[0].name
field_type = field_decl.type.name
declarations.append(f"{field_type} {field_name}")
for method_decl in class_decl.methods:
methods.append(method_decl.name)
class_string = "Declarations: " + ", ".join(declarations) + "\n Method name: " + ", ".join(methods)
classes[class_name] = class_string
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict

View File

@@ -1,70 +0,0 @@
import os
import escodegen
import esprima
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.js'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = esprima.parseScript(source_code)
for node in tree.body:
if node.type == 'FunctionDeclaration':
func_name = node.id.name if node.id else '<anonymous>'
functions[func_name] = escodegen.generate(node)
elif node.type == 'VariableDeclaration':
for declaration in node.declarations:
if declaration.init and declaration.init.type == 'FunctionExpression':
func_name = declaration.id.name if declaration.id else '<anonymous>'
functions[func_name] = escodegen.generate(declaration.init)
elif node.type == 'ClassDeclaration':
for subnode in node.body.body:
if subnode.type == 'MethodDefinition':
func_name = subnode.key.name
functions[func_name] = escodegen.generate(subnode.value)
elif subnode.type == 'VariableDeclaration':
for declaration in subnode.declarations:
if declaration.init and declaration.init.type == 'FunctionExpression':
func_name = declaration.id.name if declaration.id else '<anonymous>'
functions[func_name] = escodegen.generate(declaration.init)
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = esprima.parseScript(source_code)
for node in tree.body:
if node.type == 'ClassDeclaration':
class_name = node.id.name
function_names = []
for subnode in node.body.body:
if subnode.type == 'MethodDefinition':
function_names.append(subnode.key.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict

View File

@@ -1,75 +0,0 @@
import os
from retry import retry
from application.core.settings import settings
from application.vectorstore.vector_creator import VectorCreator
# from langchain_community.embeddings import HuggingFaceEmbeddings
# from langchain_community.embeddings import HuggingFaceInstructEmbeddings
# from langchain_community.embeddings import CohereEmbeddings
@retry(tries=10, delay=60)
def store_add_texts_with_retry(store, i, id):
# add source_id to the metadata
i.metadata["source_id"] = str(id)
store.add_texts([i.page_content], metadatas=[i.metadata])
# store_pine.add_texts([i.page_content], metadatas=[i.metadata])
def call_openai_api(docs, folder_name, id, task_status):
# Function to create a vector store from the documents and save it to disk
if not os.path.exists(f"{folder_name}"):
os.makedirs(f"{folder_name}")
from tqdm import tqdm
c1 = 0
if settings.VECTOR_STORE == "faiss":
docs_init = [docs[0]]
docs.pop(0)
store = VectorCreator.create_vectorstore(
settings.VECTOR_STORE,
docs_init=docs_init,
source_id=f"{folder_name}",
embeddings_key=os.getenv("EMBEDDINGS_KEY"),
)
else:
store = VectorCreator.create_vectorstore(
settings.VECTOR_STORE,
source_id=str(id),
embeddings_key=os.getenv("EMBEDDINGS_KEY"),
)
store.delete_index()
# Uncomment for MPNet embeddings
# model_name = "sentence-transformers/all-mpnet-base-v2"
# hf = HuggingFaceEmbeddings(model_name=model_name)
# store = FAISS.from_documents(docs_test, hf)
s1 = len(docs)
for i in tqdm(
docs,
desc="Embedding 🦖",
unit="docs",
total=len(docs),
bar_format="{l_bar}{bar}| Time Left: {remaining}",
):
try:
task_status.update_state(
state="PROGRESS", meta={"current": int((c1 / s1) * 100)}
)
store_add_texts_with_retry(store, i, id)
except Exception as e:
print(e)
print("Error on ", i)
print("Saving progress")
print(f"stopped at {c1} out of {len(docs)}")
store.save_local(f"{folder_name}")
break
c1 += 1
if settings.VECTOR_STORE == "faiss":
store.save_local(f"{folder_name}")

View File

@@ -1,121 +0,0 @@
import ast
import os
from pathlib import Path
import tiktoken
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(
f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i + 1}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i + 1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.")

View File

@@ -2,16 +2,16 @@ import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
from application.parser.remote.base import BaseRemote
from application.parser.schema.base import Document
from langchain_community.document_loaders import WebBaseLoader
class CrawlerLoader(BaseRemote):
def __init__(self, limit=10):
from langchain_community.document_loaders import WebBaseLoader
self.loader = WebBaseLoader # Initialize the document loader
self.limit = limit # Set the limit for the number of pages to scrape
def load_data(self, inputs):
url = inputs
# Check if the input is a list and if it is, use the first element
if isinstance(url, list) and url:
url = url[0]
@@ -19,24 +19,29 @@ class CrawlerLoader(BaseRemote):
if not urlparse(url).scheme:
url = "http://" + url
visited_urls = set() # Keep track of URLs that have been visited
base_url = urlparse(url).scheme + "://" + urlparse(url).hostname # Extract the base URL
urls_to_visit = [url] # List of URLs to be visited, starting with the initial URL
loaded_content = [] # Store the loaded content from each URL
visited_urls = set()
base_url = urlparse(url).scheme + "://" + urlparse(url).hostname
urls_to_visit = [url]
loaded_content = []
# Continue crawling until there are no more URLs to visit
while urls_to_visit:
current_url = urls_to_visit.pop(0) # Get the next URL to visit
visited_urls.add(current_url) # Mark the URL as visited
current_url = urls_to_visit.pop(0)
visited_urls.add(current_url)
# Try to load and process the content from the current URL
try:
response = requests.get(current_url) # Fetch the content of the current URL
response.raise_for_status() # Raise an exception for HTTP errors
loader = self.loader([current_url]) # Initialize the document loader for the current URL
loaded_content.extend(loader.load()) # Load the content and add it to the loaded_content list
response = requests.get(current_url)
response.raise_for_status()
loader = self.loader([current_url])
docs = loader.load()
# Convert the loaded documents to your Document schema
for doc in docs:
loaded_content.append(
Document(
doc.page_content,
extra_info=doc.metadata
)
)
except Exception as e:
# Print an error message if loading or processing fails and continue with the next URL
print(f"Error processing URL {current_url}: {e}")
continue
@@ -45,15 +50,15 @@ class CrawlerLoader(BaseRemote):
all_links = [
urljoin(current_url, a['href'])
for a in soup.find_all('a', href=True)
if base_url in urljoin(current_url, a['href']) # Ensure links are from the same domain
if base_url in urljoin(current_url, a['href'])
]
# Add new links to the list of URLs to visit if they haven't been visited yet
urls_to_visit.extend([link for link in all_links if link not in visited_urls])
urls_to_visit = list(set(urls_to_visit)) # Remove duplicate URLs
urls_to_visit = list(set(urls_to_visit))
# Stop crawling if the limit of pages to scrape is reached
if self.limit is not None and len(visited_urls) >= self.limit:
break
return loaded_content # Return the loaded content from all visited URLs
return loaded_content

View File

@@ -0,0 +1,139 @@
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
from application.parser.remote.base import BaseRemote
import re
from markdownify import markdownify
from application.parser.schema.base import Document
import tldextract
class CrawlerLoader(BaseRemote):
def __init__(self, limit=10, allow_subdomains=False):
"""
Given a URL crawl web pages up to `self.limit`,
convert HTML content to Markdown, and returning a list of Document objects.
:param limit: The maximum number of pages to crawl.
:param allow_subdomains: If True, crawl pages on subdomains of the base domain.
"""
self.limit = limit
self.allow_subdomains = allow_subdomains
self.session = requests.Session()
def load_data(self, inputs):
url = inputs
if isinstance(url, list) and url:
url = url[0]
# Ensure the URL has a scheme (if not, default to http)
if not urlparse(url).scheme:
url = "http://" + url
# Keep track of visited URLs to avoid revisiting the same page
visited_urls = set()
# Determine the base domain for link filtering using tldextract
base_domain = self._get_base_domain(url)
urls_to_visit = {url}
documents = []
while urls_to_visit:
current_url = urls_to_visit.pop()
# Skip if already visited
if current_url in visited_urls:
continue
visited_urls.add(current_url)
# Fetch the page content
html_content = self._fetch_page(current_url)
if html_content is None:
continue
# Convert the HTML to Markdown for cleaner text formatting
title, language, processed_markdown = self._process_html_to_markdown(html_content, current_url)
if processed_markdown:
# Create a Document for each visited page
documents.append(
Document(
processed_markdown, # content
None, # doc_id
None, # embedding
{"source": current_url, "title": title, "language": language} # extra_info
)
)
# Extract links and filter them according to domain rules
new_links = self._extract_links(html_content, current_url)
filtered_links = self._filter_links(new_links, base_domain)
# Add any new, not-yet-visited links to the queue
urls_to_visit.update(link for link in filtered_links if link not in visited_urls)
# If we've reached the limit, stop crawling
if self.limit is not None and len(visited_urls) >= self.limit:
break
return documents
def _fetch_page(self, url):
try:
response = self.session.get(url, timeout=10)
response.raise_for_status()
return response.text
except requests.exceptions.RequestException as e:
print(f"Error fetching URL {url}: {e}")
return None
def _process_html_to_markdown(self, html_content, current_url):
soup = BeautifulSoup(html_content, 'html.parser')
title_tag = soup.find('title')
title = title_tag.text.strip() if title_tag else "No Title"
# Extract language
language_tag = soup.find('html')
language = language_tag.get('lang', 'en') if language_tag else "en"
markdownified = markdownify(html_content, heading_style="ATX", newline_style="BACKSLASH")
# Reduce sequences of more than two newlines to exactly three
markdownified = re.sub(r'\n{3,}', '\n\n\n', markdownified)
return title, language, markdownified
def _extract_links(self, html_content, current_url):
soup = BeautifulSoup(html_content, 'html.parser')
links = []
for a in soup.find_all('a', href=True):
full_url = urljoin(current_url, a['href'])
links.append((full_url, a.text.strip()))
return links
def _get_base_domain(self, url):
extracted = tldextract.extract(url)
# Reconstruct the domain as domain.suffix
base_domain = f"{extracted.domain}.{extracted.suffix}"
return base_domain
def _filter_links(self, links, base_domain):
"""
Filter the extracted links to only include those that match the crawling criteria:
- If allow_subdomains is True, allow any link whose domain ends with the base_domain.
- If allow_subdomains is False, only allow exact matches of the base_domain.
"""
filtered = []
for link, _ in links:
parsed_link = urlparse(link)
if not parsed_link.netloc:
continue
extracted = tldextract.extract(parsed_link.netloc)
link_base = f"{extracted.domain}.{extracted.suffix}"
if self.allow_subdomains:
# For subdomains: sub.example.com ends with example.com
if link_base == base_domain or link_base.endswith("." + base_domain):
filtered.append(link)
else:
# Exact domain match
if link_base == base_domain:
filtered.append(link)
return filtered

View File

@@ -1,5 +1,7 @@
from application.parser.remote.base import BaseRemote
from application.parser.schema.base import Document
from langchain_community.document_loaders import WebBaseLoader
from urllib.parse import urlparse
headers = {
"User-Agent": "Mozilla/5.0",
@@ -23,10 +25,20 @@ class WebLoader(BaseRemote):
urls = [urls]
documents = []
for url in urls:
# Check if the URL scheme is provided, if not, assume http
if not urlparse(url).scheme:
url = "http://" + url
try:
loader = self.loader([url], header_template=headers)
documents.extend(loader.load())
loaded_docs = loader.load()
for doc in loaded_docs:
documents.append(
Document(
doc.page_content,
extra_info=doc.metadata,
)
)
except Exception as e:
print(f"Error processing URL {url}: {e}")
continue
return documents
return documents

View File

@@ -1,79 +0,0 @@
import re
from math import ceil
from typing import List
import tiktoken
from application.parser.schema.base import Document
def separate_header_and_body(text):
header_pattern = r"^(.*?\n){3}"
match = re.match(header_pattern, text)
header = match.group(0)
body = text[len(header):]
return header, body
def group_documents(documents: List[Document], min_tokens: int, max_tokens: int) -> List[Document]:
docs = []
current_group = None
for doc in documents:
doc_len = len(tiktoken.get_encoding("cl100k_base").encode(doc.text))
# Check if current group is empty or if the document can be added based on token count and matching metadata
if (current_group is None or
(len(tiktoken.get_encoding("cl100k_base").encode(current_group.text)) + doc_len < max_tokens and
doc_len < min_tokens and
current_group.extra_info == doc.extra_info)):
if current_group is None:
current_group = doc # Use the document directly to retain its metadata
else:
current_group.text += " " + doc.text # Append text to the current group
else:
docs.append(current_group)
current_group = doc # Start a new group with the current document
if current_group is not None:
docs.append(current_group)
return docs
def split_documents(documents: List[Document], max_tokens: int) -> List[Document]:
docs = []
for doc in documents:
token_length = len(tiktoken.get_encoding("cl100k_base").encode(doc.text))
if token_length <= max_tokens:
docs.append(doc)
else:
header, body = separate_header_and_body(doc.text)
if len(tiktoken.get_encoding("cl100k_base").encode(header)) > max_tokens:
body = doc.text
header = ""
num_body_parts = ceil(token_length / max_tokens)
part_length = ceil(len(body) / num_body_parts)
body_parts = [body[i:i + part_length] for i in range(0, len(body), part_length)]
for i, body_part in enumerate(body_parts):
new_doc = Document(text=header + body_part.strip(),
doc_id=f"{doc.doc_id}-{i}",
embedding=doc.embedding,
extra_info=doc.extra_info)
docs.append(new_doc)
return docs
def group_split(documents: List[Document], max_tokens: int = 2000, min_tokens: int = 150, token_check: bool = True):
if not token_check:
return documents
print("Grouping small documents")
try:
documents = group_documents(documents=documents, min_tokens=min_tokens, max_tokens=max_tokens)
except Exception:
print("Grouping failed, try running without token_check")
print("Separating large documents")
try:
documents = split_documents(documents=documents, max_tokens=max_tokens)
except Exception:
print("Grouping failed, try running without token_check")
return documents

View File

@@ -1,25 +1,27 @@
anthropic==0.34.2
boto3==1.34.153
anthropic==0.45.2
boto3==1.35.97
beautifulsoup4==4.12.3
celery==5.3.6
celery==5.4.0
dataclasses-json==0.6.7
docx2txt==0.8
duckduckgo-search==6.3.0
ebooklib==0.18
elastic-transport==8.15.0
elasticsearch==8.15.1
elastic-transport==8.17.0
elasticsearch==8.17.0
escodegen==1.0.11
esprima==4.0.1
esutils==1.0.1
Flask==3.0.3
faiss-cpu==1.8.0.post1
Flask==3.1.0
faiss-cpu==1.9.0.post1
flask-restx==1.3.0
gTTS==2.3.2
google-genai==0.5.0
google-generativeai==0.8.3
gTTS==2.5.4
gunicorn==23.0.0
html2text==2024.2.26
javalang==0.13.0
jinja2==3.1.4
jiter==0.5.0
jinja2==3.1.5
jiter==0.8.2
jmespath==1.0.1
joblib==1.4.2
jsonpatch==1.33
@@ -28,62 +30,66 @@ jsonschema==4.23.0
jsonschema-spec==0.2.4
jsonschema-specifications==2023.7.1
kombu==5.4.2
langchain==0.3.0
langchain-community==0.3.0
langchain-core==0.3.2
langchain-openai==0.2.0
langchain-text-splitters==0.3.0
langsmith==0.1.125
langchain==0.3.14
langchain-community==0.3.14
langchain-core==0.3.29
langchain-openai==0.3.0
langchain-text-splitters==0.3.5
langsmith==0.2.10
lazy-object-proxy==1.10.0
lxml==5.3.0
markupsafe==2.1.5
marshmallow==3.22.0
markupsafe==3.0.2
marshmallow==3.26.1
mpmath==1.3.0
multidict==6.1.0
mypy-extensions==1.0.0
networkx==3.3
numpy==1.26.4
openai==1.46.1
networkx==3.4.2
numpy==2.2.1
openai==1.59.5
openapi-schema-validator==0.6.2
openapi-spec-validator==0.6.0
openapi3-parser==1.1.18
orjson==3.10.7
openapi3-parser==1.1.19
orjson==3.10.14
packaging==24.1
pandas==2.2.3
openpyxl==3.1.5
pathable==0.4.3
pillow==10.4.0
pathable==0.4.4
pillow==11.1.0
portalocker==2.10.1
prance==23.6.21.0
primp==0.6.3
prompt-toolkit==3.0.47
protobuf==5.28.2
primp==0.10.0
prompt-toolkit==3.0.50
protobuf==5.29.3
psycopg2-binary==2.9.10
py==1.11.0
pydantic==2.9.2
pydantic-core==2.23.4
pydantic-settings==2.4.0
pymongo==4.8.0
pypdf2==3.0.1
pydantic==2.10.4
pydantic-core==2.27.2
pydantic-settings==2.7.1
pymongo==4.10.1
pypdf==5.2.0
python-dateutil==2.9.0.post0
python-dotenv==1.0.1
python-pptx==1.0.2
qdrant-client==1.11.0
redis==5.0.1
qdrant-client==1.12.2
redis==5.2.1
referencing==0.30.2
regex==2024.9.11
regex==2024.11.6
requests==2.32.3
retry==0.9.2
sentence-transformers==3.0.1
tiktoken==0.7.0
tokenizers==0.19.1
torch==2.4.1
tqdm==4.66.5
transformers==4.44.2
sentence-transformers==3.3.1
tiktoken==0.8.0
tokenizers==0.21.0
torch==2.5.1
tqdm==4.67.1
transformers==4.48.0
typing-extensions==4.12.2
typing-inspect==0.9.0
tzdata==2024.2
urllib3==2.2.3
urllib3==2.3.0
vine==5.1.0
wcwidth==0.2.13
werkzeug==3.0.4
yarl==1.11.1
werkzeug==3.1.3
yarl==1.18.3
markdownify==0.14.1
tldextract==5.1.3
websockets==14.1

View File

@@ -2,7 +2,6 @@ import json
from application.retriever.base import BaseRetriever
from application.core.settings import settings
from application.llm.llm_creator import LLMCreator
from application.utils import num_tokens_from_string
from langchain_community.tools import BraveSearch
@@ -72,22 +71,13 @@ class BraveRetSearch(BaseRetriever):
for doc in docs:
yield {"source": doc}
if len(self.chat_history) > 1:
tokens_current_history = 0
# count tokens in history
if len(self.chat_history) > 0:
for i in self.chat_history:
if "prompt" in i and "response" in i:
tokens_batch = num_tokens_from_string(i["prompt"]) + num_tokens_from_string(
i["response"]
messages_combine.append({"role": "user", "content": i["prompt"]})
messages_combine.append(
{"role": "assistant", "content": i["response"]}
)
if tokens_current_history + tokens_batch < self.token_limit:
tokens_current_history += tokens_batch
messages_combine.append(
{"role": "user", "content": i["prompt"]}
)
messages_combine.append(
{"role": "system", "content": i["response"]}
)
messages_combine.append({"role": "user", "content": self.question})
llm = LLMCreator.create_llm(

View File

@@ -1,9 +1,10 @@
from application.retriever.base import BaseRetriever
from application.core.settings import settings
from application.vectorstore.vector_creator import VectorCreator
from application.llm.llm_creator import LLMCreator
import uuid
from application.utils import num_tokens_from_string
from application.core.settings import settings
from application.retriever.base import BaseRetriever
from application.tools.agent import Agent
from application.vectorstore.vector_creator import VectorCreator
class ClassicRAG(BaseRetriever):
@@ -20,7 +21,7 @@ class ClassicRAG(BaseRetriever):
user_api_key=None,
):
self.question = question
self.vectorstore = source['active_docs'] if 'active_docs' in source else None
self.vectorstore = source["active_docs"] if "active_docs" in source else None
self.chat_history = chat_history
self.prompt = prompt
self.chunks = chunks
@@ -36,6 +37,12 @@ class ClassicRAG(BaseRetriever):
)
)
self.user_api_key = user_api_key
self.agent = Agent(
llm_name=settings.LLM_NAME,
gpt_model=self.gpt_model,
api_key=settings.API_KEY,
user_api_key=self.user_api_key,
)
def _get_data(self):
if self.chunks == 0:
@@ -72,34 +79,52 @@ class ClassicRAG(BaseRetriever):
for doc in docs:
yield {"source": doc}
if len(self.chat_history) > 1:
tokens_current_history = 0
# count tokens in history
if len(self.chat_history) > 0:
for i in self.chat_history:
if "prompt" in i and "response" in i:
tokens_batch = num_tokens_from_string(i["prompt"]) + num_tokens_from_string(
i["response"]
messages_combine.append({"role": "user", "content": i["prompt"]})
messages_combine.append(
{"role": "assistant", "content": i["response"]}
)
if tokens_current_history + tokens_batch < self.token_limit:
tokens_current_history += tokens_batch
messages_combine.append(
{"role": "user", "content": i["prompt"]}
)
messages_combine.append(
{"role": "system", "content": i["response"]}
)
messages_combine.append({"role": "user", "content": self.question})
if "tool_calls" in i:
for tool_call in i["tool_calls"]:
call_id = tool_call.get("call_id")
if call_id is None or call_id == "None":
call_id = str(uuid.uuid4())
function_call_dict = {
"function_call": {
"name": tool_call.get("action_name"),
"args": tool_call.get("arguments"),
"call_id": call_id,
}
}
function_response_dict = {
"function_response": {
"name": tool_call.get("action_name"),
"response": {"result": tool_call.get("result")},
"call_id": call_id,
}
}
messages_combine.append(
{"role": "assistant", "content": [function_call_dict]}
)
messages_combine.append(
{"role": "tool", "content": [function_response_dict]}
)
messages_combine.append({"role": "user", "content": self.question})
completion = self.agent.gen(messages_combine)
llm = LLMCreator.create_llm(
settings.LLM_NAME, api_key=settings.API_KEY, user_api_key=self.user_api_key
)
completion = llm.gen_stream(model=self.gpt_model, messages=messages_combine)
for line in completion:
yield {"answer": str(line)}
yield {"tool_calls": self.agent.tool_calls.copy()}
def search(self):
return self._get_data()
def get_params(self):
return {
"question": self.question,
@@ -109,5 +134,5 @@ class ClassicRAG(BaseRetriever):
"chunks": self.chunks,
"token_limit": self.token_limit,
"gpt_model": self.gpt_model,
"user_api_key": self.user_api_key
"user_api_key": self.user_api_key,
}

View File

@@ -1,7 +1,6 @@
from application.retriever.base import BaseRetriever
from application.core.settings import settings
from application.llm.llm_creator import LLMCreator
from application.utils import num_tokens_from_string
from langchain_community.tools import DuckDuckGoSearchResults
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
@@ -89,21 +88,12 @@ class DuckDuckSearch(BaseRetriever):
for doc in docs:
yield {"source": doc}
if len(self.chat_history) > 1:
tokens_current_history = 0
# count tokens in history
if len(self.chat_history) > 0:
for i in self.chat_history:
if "prompt" in i and "response" in i:
tokens_batch = num_tokens_from_string(i["prompt"]) + num_tokens_from_string(
i["response"]
)
if tokens_current_history + tokens_batch < self.token_limit:
tokens_current_history += tokens_batch
if "prompt" in i and "response" in i:
messages_combine.append({"role": "user", "content": i["prompt"]})
messages_combine.append(
{"role": "user", "content": i["prompt"]}
)
messages_combine.append(
{"role": "system", "content": i["response"]}
{"role": "assistant", "content": i["response"]}
)
messages_combine.append({"role": "user", "content": self.question})

184
application/tools/agent.py Normal file
View File

@@ -0,0 +1,184 @@
from application.core.mongo_db import MongoDB
from application.llm.llm_creator import LLMCreator
from application.tools.llm_handler import get_llm_handler
from application.tools.tool_action_parser import ToolActionParser
from application.tools.tool_manager import ToolManager
class Agent:
def __init__(self, llm_name, gpt_model, api_key, user_api_key=None):
# Initialize the LLM with the provided parameters
self.llm = LLMCreator.create_llm(
llm_name, api_key=api_key, user_api_key=user_api_key
)
self.llm_handler = get_llm_handler(llm_name)
self.gpt_model = gpt_model
# Static tool configuration (to be replaced later)
self.tools = []
self.tool_config = {}
self.tool_calls = []
def _get_user_tools(self, user="local"):
mongo = MongoDB.get_client()
db = mongo["docsgpt"]
user_tools_collection = db["user_tools"]
user_tools = user_tools_collection.find({"user": user, "status": True})
user_tools = list(user_tools)
tools_by_id = {str(tool["_id"]): tool for tool in user_tools}
return tools_by_id
def _build_tool_parameters(self, action):
params = {"type": "object", "properties": {}, "required": []}
for param_type in ["query_params", "headers", "body", "parameters"]:
if param_type in action and action[param_type].get("properties"):
for k, v in action[param_type]["properties"].items():
if v.get("filled_by_llm", True):
params["properties"][k] = {
key: value
for key, value in v.items()
if key != "filled_by_llm" and key != "value"
}
params["required"].append(k)
return params
def _prepare_tools(self, tools_dict):
self.tools = [
{
"type": "function",
"function": {
"name": f"{action['name']}_{tool_id}",
"description": action["description"],
"parameters": self._build_tool_parameters(action),
},
}
for tool_id, tool in tools_dict.items()
if (
(tool["name"] == "api_tool" and "actions" in tool.get("config", {}))
or (tool["name"] != "api_tool" and "actions" in tool)
)
for action in (
tool["config"]["actions"].values()
if tool["name"] == "api_tool"
else tool["actions"]
)
if action.get("active", True)
]
def _execute_tool_action(self, tools_dict, call):
parser = ToolActionParser(self.llm.__class__.__name__)
tool_id, action_name, call_args = parser.parse_args(call)
tool_data = tools_dict[tool_id]
action_data = (
tool_data["config"]["actions"][action_name]
if tool_data["name"] == "api_tool"
else next(
action
for action in tool_data["actions"]
if action["name"] == action_name
)
)
query_params, headers, body, parameters = {}, {}, {}, {}
param_types = {
"query_params": query_params,
"headers": headers,
"body": body,
"parameters": parameters,
}
for param_type, target_dict in param_types.items():
if param_type in action_data and action_data[param_type].get("properties"):
for param, details in action_data[param_type]["properties"].items():
if param not in call_args and "value" in details:
target_dict[param] = details["value"]
for param, value in call_args.items():
for param_type, target_dict in param_types.items():
if param_type in action_data and param in action_data[param_type].get(
"properties", {}
):
target_dict[param] = value
tm = ToolManager(config={})
tool = tm.load_tool(
tool_data["name"],
tool_config=(
{
"url": tool_data["config"]["actions"][action_name]["url"],
"method": tool_data["config"]["actions"][action_name]["method"],
"headers": headers,
"query_params": query_params,
}
if tool_data["name"] == "api_tool"
else tool_data["config"]
),
)
if tool_data["name"] == "api_tool":
print(
f"Executing api: {action_name} with query_params: {query_params}, headers: {headers}, body: {body}"
)
result = tool.execute_action(action_name, **body)
else:
print(f"Executing tool: {action_name} with args: {call_args}")
result = tool.execute_action(action_name, **parameters)
call_id = getattr(call, "id", None)
tool_call_data = {
"tool_name": tool_data["name"],
"call_id": call_id if call_id is not None else "None",
"action_name": f"{action_name}_{tool_id}",
"arguments": call_args,
"result": result,
}
self.tool_calls.append(tool_call_data)
return result, call_id
def _simple_tool_agent(self, messages):
tools_dict = self._get_user_tools()
self._prepare_tools(tools_dict)
resp = self.llm.gen(model=self.gpt_model, messages=messages, tools=self.tools)
if isinstance(resp, str):
yield resp
return
if (
hasattr(resp, "message")
and hasattr(resp.message, "content")
and resp.message.content is not None
):
yield resp.message.content
return
resp = self.llm_handler.handle_response(self, resp, tools_dict, messages)
if isinstance(resp, str):
yield resp
elif (
hasattr(resp, "message")
and hasattr(resp.message, "content")
and resp.message.content is not None
):
yield resp.message.content
else:
completion = self.llm.gen_stream(
model=self.gpt_model, messages=messages, tools=self.tools
)
for line in completion:
yield line
return
def gen(self, messages):
self.tool_calls = []
if self.llm.supports_tools():
resp = self._simple_tool_agent(messages)
for line in resp:
yield line
else:
resp = self.llm.gen_stream(model=self.gpt_model, messages=messages)
for line in resp:
yield line

21
application/tools/base.py Normal file
View File

@@ -0,0 +1,21 @@
from abc import ABC, abstractmethod
class Tool(ABC):
@abstractmethod
def execute_action(self, action_name: str, **kwargs):
pass
@abstractmethod
def get_actions_metadata(self):
"""
Returns a list of JSON objects describing the actions supported by the tool.
"""
pass
@abstractmethod
def get_config_requirements(self):
"""
Returns a dictionary describing the configuration requirements for the tool.
"""
pass

View File

@@ -0,0 +1,54 @@
import json
import requests
from application.tools.base import Tool
class APITool(Tool):
"""
API Tool
A flexible tool for performing various API actions (e.g., sending messages, retrieving data) via custom user-specified APIs
"""
def __init__(self, config):
self.config = config
self.url = config.get("url", "")
self.method = config.get("method", "GET")
self.headers = config.get("headers", {"Content-Type": "application/json"})
self.query_params = config.get("query_params", {})
def execute_action(self, action_name, **kwargs):
return self._make_api_call(
self.url, self.method, self.headers, self.query_params, kwargs
)
def _make_api_call(self, url, method, headers, query_params, body):
if query_params:
url = f"{url}?{requests.compat.urlencode(query_params)}"
if isinstance(body, dict):
body = json.dumps(body)
try:
print(f"Making API call: {method} {url} with body: {body}")
response = requests.request(method, url, headers=headers, data=body)
response.raise_for_status()
try:
data = response.json()
except ValueError:
data = None
return {
"status_code": response.status_code,
"data": data,
"message": "API call successful.",
}
except requests.exceptions.RequestException as e:
return {
"status_code": response.status_code if response else None,
"message": f"API call failed: {str(e)}",
}
def get_actions_metadata(self):
return []
def get_config_requirements(self):
return {}

View File

@@ -0,0 +1,77 @@
import requests
from application.tools.base import Tool
class CryptoPriceTool(Tool):
"""
CryptoPrice
A tool for retrieving cryptocurrency prices using the CryptoCompare public API
"""
def __init__(self, config):
self.config = config
def execute_action(self, action_name, **kwargs):
actions = {"cryptoprice_get": self._get_price}
if action_name in actions:
return actions[action_name](**kwargs)
else:
raise ValueError(f"Unknown action: {action_name}")
def _get_price(self, symbol, currency):
"""
Fetches the current price of a given cryptocurrency symbol in the specified currency.
Example:
symbol = "BTC"
currency = "USD"
returns price in USD.
"""
url = f"https://min-api.cryptocompare.com/data/price?fsym={symbol.upper()}&tsyms={currency.upper()}"
response = requests.get(url)
if response.status_code == 200:
data = response.json()
# data will be like {"USD": <price>} if the call is successful
if currency.upper() in data:
return {
"status_code": response.status_code,
"price": data[currency.upper()],
"message": f"Price of {symbol.upper()} in {currency.upper()} retrieved successfully.",
}
else:
return {
"status_code": response.status_code,
"message": f"Couldn't find price for {symbol.upper()} in {currency.upper()}.",
}
else:
return {
"status_code": response.status_code,
"message": "Failed to retrieve price.",
}
def get_actions_metadata(self):
return [
{
"name": "cryptoprice_get",
"description": "Retrieve the price of a specified cryptocurrency in a given currency",
"parameters": {
"type": "object",
"properties": {
"symbol": {
"type": "string",
"description": "The cryptocurrency symbol (e.g. BTC)",
},
"currency": {
"type": "string",
"description": "The currency in which you want the price (e.g. USD)",
},
},
"required": ["symbol", "currency"],
"additionalProperties": False,
},
}
]
def get_config_requirements(self):
# No specific configuration needed for this tool as it just queries a public endpoint
return {}

View File

@@ -0,0 +1,163 @@
import psycopg2
from application.tools.base import Tool
class PostgresTool(Tool):
"""
PostgreSQL Database Tool
A tool for connecting to a PostgreSQL database using a connection string,
executing SQL queries, and retrieving schema information.
"""
def __init__(self, config):
self.config = config
self.connection_string = config.get("token", "")
def execute_action(self, action_name, **kwargs):
actions = {
"postgres_execute_sql": self._execute_sql,
"postgres_get_schema": self._get_schema,
}
if action_name in actions:
return actions[action_name](**kwargs)
else:
raise ValueError(f"Unknown action: {action_name}")
def _execute_sql(self, sql_query):
"""
Executes an SQL query against the PostgreSQL database using a connection string.
"""
conn = None # Initialize conn to None for error handling
try:
conn = psycopg2.connect(self.connection_string)
cur = conn.cursor()
cur.execute(sql_query)
conn.commit()
if sql_query.strip().lower().startswith("select"):
column_names = [desc[0] for desc in cur.description] if cur.description else []
results = []
rows = cur.fetchall()
for row in rows:
results.append(dict(zip(column_names, row)))
response_data = {"data": results, "column_names": column_names}
else:
row_count = cur.rowcount
response_data = {"message": f"Query executed successfully, {row_count} rows affected."}
cur.close()
return {
"status_code": 200,
"message": "SQL query executed successfully.",
"response_data": response_data,
}
except psycopg2.Error as e:
error_message = f"Database error: {e}"
print(f"Database error: {e}")
return {
"status_code": 500,
"message": "Failed to execute SQL query.",
"error": error_message,
}
finally:
if conn: # Ensure connection is closed even if errors occur
conn.close()
def _get_schema(self, db_name):
"""
Retrieves the schema of the PostgreSQL database using a connection string.
"""
conn = None # Initialize conn to None for error handling
try:
conn = psycopg2.connect(self.connection_string)
cur = conn.cursor()
cur.execute("""
SELECT
table_name,
column_name,
data_type,
column_default,
is_nullable
FROM
information_schema.columns
WHERE
table_schema = 'public'
ORDER BY
table_name,
ordinal_position;
""")
schema_data = {}
for row in cur.fetchall():
table_name, column_name, data_type, column_default, is_nullable = row
if table_name not in schema_data:
schema_data[table_name] = []
schema_data[table_name].append({
"column_name": column_name,
"data_type": data_type,
"column_default": column_default,
"is_nullable": is_nullable
})
cur.close()
return {
"status_code": 200,
"message": "Database schema retrieved successfully.",
"schema": schema_data,
}
except psycopg2.Error as e:
error_message = f"Database error: {e}"
print(f"Database error: {e}")
return {
"status_code": 500,
"message": "Failed to retrieve database schema.",
"error": error_message,
}
finally:
if conn: # Ensure connection is closed even if errors occur
conn.close()
def get_actions_metadata(self):
return [
{
"name": "postgres_execute_sql",
"description": "Execute an SQL query against the PostgreSQL database and return the results. Use this tool to interact with the database, e.g., retrieve specific data or perform updates. Only SELECT queries will return data, other queries will return execution status.",
"parameters": {
"type": "object",
"properties": {
"sql_query": {
"type": "string",
"description": "The SQL query to execute.",
},
},
"required": ["sql_query"],
"additionalProperties": False,
},
},
{
"name": "postgres_get_schema",
"description": "Retrieve the schema of the PostgreSQL database, including tables and their columns. Use this to understand the database structure before executing queries. db_name is 'default' if not provided.",
"parameters": {
"type": "object",
"properties": {
"db_name": {
"type": "string",
"description": "The name of the database to retrieve the schema for.",
},
},
"required": ["db_name"],
"additionalProperties": False,
},
},
]
def get_config_requirements(self):
return {
"token": {
"type": "string",
"description": "PostgreSQL database connection string (e.g., 'postgresql://user:password@host:port/dbname')",
},
}

View File

@@ -0,0 +1,86 @@
import requests
from application.tools.base import Tool
class TelegramTool(Tool):
"""
Telegram Bot
A flexible Telegram tool for performing various actions (e.g., sending messages, images).
Requires a bot token and chat ID for configuration
"""
def __init__(self, config):
self.config = config
self.token = config.get("token", "")
def execute_action(self, action_name, **kwargs):
actions = {
"telegram_send_message": self._send_message,
"telegram_send_image": self._send_image,
}
if action_name in actions:
return actions[action_name](**kwargs)
else:
raise ValueError(f"Unknown action: {action_name}")
def _send_message(self, text, chat_id):
print(f"Sending message: {text}")
url = f"https://api.telegram.org/bot{self.token}/sendMessage"
payload = {"chat_id": chat_id, "text": text}
response = requests.post(url, data=payload)
return {"status_code": response.status_code, "message": "Message sent"}
def _send_image(self, image_url, chat_id):
print(f"Sending image: {image_url}")
url = f"https://api.telegram.org/bot{self.token}/sendPhoto"
payload = {"chat_id": chat_id, "photo": image_url}
response = requests.post(url, data=payload)
return {"status_code": response.status_code, "message": "Image sent"}
def get_actions_metadata(self):
return [
{
"name": "telegram_send_message",
"description": "Send a notification to Telegram chat",
"parameters": {
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "Text to send in the notification",
},
"chat_id": {
"type": "string",
"description": "Chat ID to send the notification to",
},
},
"required": ["text"],
"additionalProperties": False,
},
},
{
"name": "telegram_send_image",
"description": "Send an image to the Telegram chat",
"parameters": {
"type": "object",
"properties": {
"image_url": {
"type": "string",
"description": "URL of the image to send",
},
"chat_id": {
"type": "string",
"description": "Chat ID to send the image to",
},
},
"required": ["image_url"],
"additionalProperties": False,
},
},
]
def get_config_requirements(self):
return {
"token": {"type": "string", "description": "Bot token for authentication"},
}

View File

@@ -0,0 +1,112 @@
import json
from abc import ABC, abstractmethod
class LLMHandler(ABC):
@abstractmethod
def handle_response(self, agent, resp, tools_dict, messages, **kwargs):
pass
class OpenAILLMHandler(LLMHandler):
def handle_response(self, agent, resp, tools_dict, messages):
while resp.finish_reason == "tool_calls":
message = json.loads(resp.model_dump_json())["message"]
keys_to_remove = {"audio", "function_call", "refusal"}
filtered_data = {
k: v for k, v in message.items() if k not in keys_to_remove
}
messages.append(filtered_data)
tool_calls = resp.message.tool_calls
for call in tool_calls:
try:
tool_response, call_id = agent._execute_tool_action(
tools_dict, call
)
function_call_dict = {
"function_call": {
"name": call.function.name,
"args": call.function.arguments,
"call_id": call_id,
}
}
function_response_dict = {
"function_response": {
"name": call.function.name,
"response": {"result": tool_response},
"call_id": call_id,
}
}
messages.append(
{"role": "assistant", "content": [function_call_dict]}
)
messages.append(
{"role": "tool", "content": [function_response_dict]}
)
except Exception as e:
messages.append(
{
"role": "tool",
"content": f"Error executing tool: {str(e)}",
"tool_call_id": call_id,
}
)
resp = agent.llm.gen(
model=agent.gpt_model, messages=messages, tools=agent.tools
)
return resp
class GoogleLLMHandler(LLMHandler):
def handle_response(self, agent, resp, tools_dict, messages):
from google.genai import types
while True:
response = agent.llm.gen(
model=agent.gpt_model, messages=messages, tools=agent.tools
)
if response.candidates and response.candidates[0].content.parts:
tool_call_found = False
for part in response.candidates[0].content.parts:
if part.function_call:
tool_call_found = True
tool_response, call_id = agent._execute_tool_action(
tools_dict, part.function_call
)
function_response_part = types.Part.from_function_response(
name=part.function_call.name,
response={"result": tool_response},
)
messages.append(
{"role": "model", "content": [part.to_json_dict()]}
)
messages.append(
{
"role": "tool",
"content": [function_response_part.to_json_dict()],
}
)
if (
not tool_call_found
and response.candidates[0].content.parts
and response.candidates[0].content.parts[0].text
):
return response.candidates[0].content.parts[0].text
elif not tool_call_found:
return response.candidates[0].content.parts
else:
return response
def get_llm_handler(llm_type):
handlers = {
"openai": OpenAILLMHandler(),
"google": GoogleLLMHandler(),
}
return handlers.get(llm_type, OpenAILLMHandler())

View File

@@ -0,0 +1,26 @@
import json
class ToolActionParser:
def __init__(self, llm_type):
self.llm_type = llm_type
self.parsers = {
"OpenAILLM": self._parse_openai_llm,
"GoogleLLM": self._parse_google_llm,
}
def parse_args(self, call):
parser = self.parsers.get(self.llm_type, self._parse_openai_llm)
return parser(call)
def _parse_openai_llm(self, call):
call_args = json.loads(call.function.arguments)
tool_id = call.function.name.split("_")[-1]
action_name = call.function.name.rsplit("_", 1)[0]
return tool_id, action_name, call_args
def _parse_google_llm(self, call):
call_args = call.args
tool_id = call.name.split("_")[-1]
action_name = call.name.rsplit("_", 1)[0]
return tool_id, action_name, call_args

View File

@@ -0,0 +1,46 @@
import importlib
import inspect
import os
import pkgutil
from application.tools.base import Tool
class ToolManager:
def __init__(self, config):
self.config = config
self.tools = {}
self.load_tools()
def load_tools(self):
tools_dir = os.path.join(os.path.dirname(__file__), "implementations")
for finder, name, ispkg in pkgutil.iter_modules([tools_dir]):
if name == "base" or name.startswith("__"):
continue
module = importlib.import_module(
f"application.tools.implementations.{name}"
)
for member_name, obj in inspect.getmembers(module, inspect.isclass):
if issubclass(obj, Tool) and obj is not Tool:
tool_config = self.config.get(name, {})
self.tools[name] = obj(tool_config)
def load_tool(self, tool_name, tool_config):
self.config[tool_name] = tool_config
module = importlib.import_module(
f"application.tools.implementations.{tool_name}"
)
for member_name, obj in inspect.getmembers(module, inspect.isclass):
if issubclass(obj, Tool) and obj is not Tool:
return obj(tool_config)
def execute_action(self, tool_name, action_name, **kwargs):
if tool_name not in self.tools:
raise ValueError(f"Tool '{tool_name}' not loaded")
return self.tools[tool_name].execute_action(action_name, **kwargs)
def get_all_actions_metadata(self):
metadata = []
for tool in self.tools.values():
metadata.extend(tool.get_actions_metadata())
return metadata

View File

@@ -1,29 +1,84 @@
from io import BytesIO
import asyncio
import websockets
import json
import base64
from io import BytesIO
from application.tts.base import BaseTTS
class ElevenlabsTTS(BaseTTS):
def __init__(self):
from elevenlabs.client import ElevenLabs
self.client = ElevenLabs(
api_key="ELEVENLABS_API_KEY",
)
def __init__(self):
self.api_key = 'ELEVENLABS_API_KEY'# here you should put your api key
self.model = "eleven_flash_v2_5"
self.voice = "VOICE_ID" # this is the hash code for the voice not the name!
self.write_audio = 1
def text_to_speech(self, text):
lang = "en"
audio = self.client.generate(
text=text,
model="eleven_multilingual_v2",
voice="Brian",
)
audio_data = BytesIO()
for chunk in audio:
audio_data.write(chunk)
audio_bytes = audio_data.getvalue()
asyncio.run(self._text_to_speech_websocket(text))
# Encode to base64
audio_base64 = base64.b64encode(audio_bytes).decode("utf-8")
return audio_base64, lang
async def _text_to_speech_websocket(self, text):
uri = f"wss://api.elevenlabs.io/v1/text-to-speech/{self.voice}/stream-input?model_id={self.model}"
websocket = await websockets.connect(uri)
payload = {
"text": " ",
"voice_settings": {
"stability": 0.5,
"similarity_boost": 0.8,
},
"xi_api_key": self.api_key,
}
await websocket.send(json.dumps(payload))
async def listen():
while 1:
try:
msg = await websocket.recv()
data = json.loads(msg)
if data.get("audio"):
print("audio received")
yield base64.b64decode(data["audio"])
elif data.get("isFinal"):
break
except websockets.exceptions.ConnectionClosed:
print("websocket closed")
break
listen_task = asyncio.create_task(self.stream(listen()))
await websocket.send(json.dumps({"text": text}))
# this is to signal the end of the text, either use this or flush
await websocket.send(json.dumps({"text": ""}))
await listen_task
async def stream(self, audio_stream):
if self.write_audio:
audio_bytes = BytesIO()
async for chunk in audio_stream:
if chunk:
audio_bytes.write(chunk)
with open("output_audio.mp3", "wb") as f:
f.write(audio_bytes.getvalue())
else:
async for chunk in audio_stream:
pass # depends on the streamer!
def test_elevenlabs_websocket():
"""
Tests the ElevenlabsTTS text_to_speech method with a sample prompt.
Prints out the base64-encoded result and writes it to 'output_audio.mp3'.
"""
# Instantiate your TTS class
tts = ElevenlabsTTS()
# Call the method with some sample text
tts.text_to_speech("Hello from ElevenLabs WebSocket!")
print("Saved audio to output_audio.mp3.")
if __name__ == "__main__":
test_elevenlabs_websocket()

View File

@@ -1,7 +1,7 @@
import sys
from datetime import datetime
from application.core.mongo_db import MongoDB
from application.utils import num_tokens_from_string
from application.utils import num_tokens_from_string, num_tokens_from_object_or_list
mongo = MongoDB.get_client()
db = mongo["docsgpt"]
@@ -21,11 +21,16 @@ def update_token_usage(user_api_key, token_usage):
def gen_token_usage(func):
def wrapper(self, model, messages, stream, **kwargs):
def wrapper(self, model, messages, stream, tools, **kwargs):
for message in messages:
self.token_usage["prompt_tokens"] += num_tokens_from_string(message["content"])
result = func(self, model, messages, stream, **kwargs)
self.token_usage["generated_tokens"] += num_tokens_from_string(result)
if message["content"]:
self.token_usage["prompt_tokens"] += num_tokens_from_string(message["content"])
result = func(self, model, messages, stream, tools, **kwargs)
# check if result is a string
if isinstance(result, str):
self.token_usage["generated_tokens"] += num_tokens_from_string(result)
else:
self.token_usage["generated_tokens"] += num_tokens_from_object_or_list(result)
update_token_usage(self.user_api_key, self.token_usage)
return result
@@ -33,11 +38,11 @@ def gen_token_usage(func):
def stream_token_usage(func):
def wrapper(self, model, messages, stream, **kwargs):
def wrapper(self, model, messages, stream, tools, **kwargs):
for message in messages:
self.token_usage["prompt_tokens"] += num_tokens_from_string(message["content"])
batch = []
result = func(self, model, messages, stream, **kwargs)
result = func(self, model, messages, stream, tools, **kwargs)
for r in result:
batch.append(r)
yield r

View File

@@ -1,5 +1,7 @@
import tiktoken
import hashlib
import re
import tiktoken
from flask import jsonify, make_response
@@ -15,8 +17,22 @@ def get_encoding():
def num_tokens_from_string(string: str) -> int:
encoding = get_encoding()
num_tokens = len(encoding.encode(string))
return num_tokens
if isinstance(string, str):
num_tokens = len(encoding.encode(string))
return num_tokens
else:
return 0
def num_tokens_from_object_or_list(thing):
if isinstance(thing, list):
return sum([num_tokens_from_object_or_list(x) for x in thing])
elif isinstance(thing, dict):
return sum([num_tokens_from_object_or_list(x) for x in thing.values()])
elif isinstance(thing, str):
return num_tokens_from_string(thing)
else:
return 0
def count_tokens_docs(docs):
@@ -44,5 +60,52 @@ def check_required_fields(data, required_fields):
def get_hash(data):
return hashlib.md5(data.encode()).hexdigest()
return hashlib.md5(data.encode(), usedforsecurity=False).hexdigest()
def limit_chat_history(history, max_token_limit=None, gpt_model="docsgpt"):
"""
Limits chat history based on token count.
Returns a list of messages that fit within the token limit.
"""
from application.core.settings import settings
max_token_limit = (
max_token_limit
if max_token_limit
and max_token_limit
< settings.MODEL_TOKEN_LIMITS.get(gpt_model, settings.DEFAULT_MAX_HISTORY)
else settings.MODEL_TOKEN_LIMITS.get(gpt_model, settings.DEFAULT_MAX_HISTORY)
)
if not history:
return []
trimmed_history = []
tokens_current_history = 0
for message in reversed(history):
tokens_batch = 0
if "prompt" in message and "response" in message:
tokens_batch += num_tokens_from_string(message["prompt"])
tokens_batch += num_tokens_from_string(message["response"])
if "tool_calls" in message:
for tool_call in message["tool_calls"]:
tool_call_string = f"Tool: {tool_call.get('tool_name')} | Action: {tool_call.get('action_name')} | Args: {tool_call.get('arguments')} | Response: {tool_call.get('result')}"
tokens_batch += num_tokens_from_string(tool_call_string)
if tokens_current_history + tokens_batch < max_token_limit:
tokens_current_history += tokens_batch
trimmed_history.insert(0, message)
else:
break
return trimmed_history
def validate_function_name(function_name):
"""Validates if a function name matches the allowed pattern."""
if not re.match(r"^[a-zA-Z0-9_-]+$", function_name):
return False
return True

View File

@@ -75,9 +75,9 @@ class BaseVectorStore(ABC):
openai_api_key=embeddings_key
)
elif embeddings_name == "huggingface_sentence-transformers/all-mpnet-base-v2":
if os.path.exists("./model/all-mpnet-base-v2"):
if os.path.exists("./models/all-mpnet-base-v2"):
embedding_instance = EmbeddingsSingleton.get_instance(
embeddings_name="./model/all-mpnet-base-v2",
embeddings_name = "./models/all-mpnet-base-v2",
)
else:
embedding_instance = EmbeddingsSingleton.get_instance(
@@ -86,4 +86,5 @@ class BaseVectorStore(ABC):
else:
embedding_instance = EmbeddingsSingleton.get_instance(embeddings_name)
return embedding_instance
return embedding_instance

View File

@@ -1,8 +1,12 @@
from langchain_community.vectorstores import FAISS
from application.vectorstore.base import BaseVectorStore
from application.core.settings import settings
import os
from langchain_community.vectorstores import FAISS
from application.core.settings import settings
from application.parser.schema.base import Document
from application.vectorstore.base import BaseVectorStore
def get_vectorstore(path: str) -> str:
if path:
vectorstore = os.path.join("application", "indexes", path)
@@ -10,21 +14,25 @@ def get_vectorstore(path: str) -> str:
vectorstore = os.path.join("application")
return vectorstore
class FaissStore(BaseVectorStore):
def __init__(self, source_id: str, embeddings_key: str, docs_init=None):
super().__init__()
self.source_id = source_id
self.path = get_vectorstore(source_id)
embeddings = self._get_embeddings(settings.EMBEDDINGS_NAME, embeddings_key)
self.embeddings = self._get_embeddings(settings.EMBEDDINGS_NAME, embeddings_key)
try:
if docs_init:
self.docsearch = FAISS.from_documents(docs_init, embeddings)
self.docsearch = FAISS.from_documents(docs_init, self.embeddings)
else:
self.docsearch = FAISS.load_local(self.path, embeddings, allow_dangerous_deserialization=True)
self.docsearch = FAISS.load_local(
self.path, self.embeddings, allow_dangerous_deserialization=True
)
except Exception:
raise
self.assert_embedding_dimensions(embeddings)
self.assert_embedding_dimensions(self.embeddings)
def search(self, *args, **kwargs):
return self.docsearch.similarity_search(*args, **kwargs)
@@ -40,11 +48,42 @@ class FaissStore(BaseVectorStore):
def assert_embedding_dimensions(self, embeddings):
"""Check that the word embedding dimension of the docsearch index matches the dimension of the word embeddings used."""
if settings.EMBEDDINGS_NAME == "huggingface_sentence-transformers/all-mpnet-base-v2":
word_embedding_dimension = getattr(embeddings, 'dimension', None)
if (
settings.EMBEDDINGS_NAME
== "huggingface_sentence-transformers/all-mpnet-base-v2"
):
word_embedding_dimension = getattr(embeddings, "dimension", None)
if word_embedding_dimension is None:
raise AttributeError("'dimension' attribute not found in embeddings instance.")
raise AttributeError(
"'dimension' attribute not found in embeddings instance."
)
docsearch_index_dimension = self.docsearch.index.d
if word_embedding_dimension != docsearch_index_dimension:
raise ValueError(f"Embedding dimension mismatch: embeddings.dimension ({word_embedding_dimension}) != docsearch index dimension ({docsearch_index_dimension})")
raise ValueError(
f"Embedding dimension mismatch: embeddings.dimension ({word_embedding_dimension}) != docsearch index dimension ({docsearch_index_dimension})"
)
def get_chunks(self):
chunks = []
if self.docsearch:
for doc_id, doc in self.docsearch.docstore._dict.items():
chunk_data = {
"doc_id": doc_id,
"text": doc.page_content,
"metadata": doc.metadata,
}
chunks.append(chunk_data)
return chunks
def add_chunk(self, text, metadata=None):
metadata = metadata or {}
doc = Document(text=text, extra_info=metadata).to_langchain_format()
doc_id = self.docsearch.add_documents([doc])
self.save_local(self.path)
return doc_id
def delete_chunk(self, chunk_id):
self.delete_index([chunk_id])
self.save_local(self.path)
return True

View File

@@ -124,3 +124,53 @@ class MongoDBVectorStore(BaseVectorStore):
def delete_index(self, *args, **kwargs):
self._collection.delete_many({"source_id": self._source_id})
def get_chunks(self):
try:
chunks = []
cursor = self._collection.find({"source_id": self._source_id})
for doc in cursor:
doc_id = str(doc.get("_id"))
text = doc.get(self._text_key)
metadata = {
k: v
for k, v in doc.items()
if k
not in ["_id", self._text_key, self._embedding_key, "source_id"]
}
if text:
chunks.append(
{"doc_id": doc_id, "text": text, "metadata": metadata}
)
return chunks
except Exception as e:
print(f"Error getting chunks: {e}")
return []
def add_chunk(self, text, metadata=None):
metadata = metadata or {}
embeddings = self._embedding.embed_documents([text])
if not embeddings:
raise ValueError("Could not generate embedding for chunk")
chunk_data = {
self._text_key: text,
self._embedding_key: embeddings[0],
"source_id": self._source_id,
**metadata,
}
result = self._collection.insert_one(chunk_data)
return str(result.inserted_id)
def delete_chunk(self, chunk_id):
try:
from bson.objectid import ObjectId
object_id = ObjectId(chunk_id)
result = self._collection.delete_one({"_id": object_id})
return result.deleted_count > 0
except Exception as e:
print(f"Error deleting chunk: {e}")
return False

View File

@@ -12,10 +12,10 @@ from bson.objectid import ObjectId
from application.core.mongo_db import MongoDB
from application.core.settings import settings
from application.parser.file.bulk import SimpleDirectoryReader
from application.parser.open_ai_func import call_openai_api
from application.parser.embedding_pipeline import embed_and_store_documents
from application.parser.remote.remote_creator import RemoteCreator
from application.parser.schema.base import Document
from application.parser.token_func import group_split
from application.parser.chunking import Chunker
from application.utils import count_tokens_docs
mongo = MongoDB.get_client()
@@ -126,7 +126,6 @@ def ingest_worker(
limit = None
exclude = True
sample = False
token_check = True
full_path = os.path.join(directory, user, name_job)
logging.info(f"Ingest file: {full_path}", extra={"user": user, "job": name_job})
@@ -153,17 +152,19 @@ def ingest_worker(
exclude_hidden=exclude,
file_metadata=metadata_from_filename,
).load_data()
raw_docs = group_split(
documents=raw_docs,
min_tokens=MIN_TOKENS,
chunker = Chunker(
chunking_strategy="classic_chunk",
max_tokens=MAX_TOKENS,
token_check=token_check,
min_tokens=MIN_TOKENS,
duplicate_headers=False
)
raw_docs = chunker.chunk(documents=raw_docs)
docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
id = ObjectId()
call_openai_api(docs, full_path, id, self)
embed_and_store_documents(docs, full_path, id, self)
tokens = count_tokens_docs(docs)
self.update_state(state="PROGRESS", meta={"current": 100})
@@ -202,52 +203,61 @@ def remote_worker(
sync_frequency="never",
operation_mode="upload",
doc_id=None,
):
token_check = True
):
full_path = os.path.join(directory, user, name_job)
if not os.path.exists(full_path):
os.makedirs(full_path)
self.update_state(state="PROGRESS", meta={"current": 1})
logging.info(
f"Remote job: {full_path}",
extra={"user": user, "job": name_job, "source_data": source_data},
)
try:
logging.info("Initializing remote loader with type: %s", loader)
remote_loader = RemoteCreator.create_loader(loader)
raw_docs = remote_loader.load_data(source_data)
remote_loader = RemoteCreator.create_loader(loader)
raw_docs = remote_loader.load_data(source_data)
chunker = Chunker(
chunking_strategy="classic_chunk",
max_tokens=MAX_TOKENS,
min_tokens=MIN_TOKENS,
duplicate_headers=False
)
docs = chunker.chunk(documents=raw_docs)
docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
tokens = count_tokens_docs(docs)
logging.info("Total tokens calculated: %d", tokens)
docs = group_split(
documents=raw_docs,
min_tokens=MIN_TOKENS,
max_tokens=MAX_TOKENS,
token_check=token_check,
)
tokens = count_tokens_docs(docs)
if operation_mode == "upload":
id = ObjectId()
call_openai_api(docs, full_path, id, self)
elif operation_mode == "sync":
if not doc_id or not ObjectId.is_valid(doc_id):
raise ValueError("doc_id must be provided for sync operation.")
id = ObjectId(doc_id)
call_openai_api(docs, full_path, id, self)
self.update_state(state="PROGRESS", meta={"current": 100})
if operation_mode == "upload":
id = ObjectId()
embed_and_store_documents(docs, full_path, id, self)
elif operation_mode == "sync":
if not doc_id or not ObjectId.is_valid(doc_id):
logging.error("Invalid doc_id provided for sync operation: %s", doc_id)
raise ValueError("doc_id must be provided for sync operation.")
id = ObjectId(doc_id)
embed_and_store_documents(docs, full_path, id, self)
file_data = {
"name": name_job,
"user": user,
"tokens": tokens,
"retriever": retriever,
"id": str(id),
"type": loader,
"remote_data": source_data,
"sync_frequency": sync_frequency,
}
upload_index(full_path, file_data)
self.update_state(state="PROGRESS", meta={"current": 100})
shutil.rmtree(full_path)
file_data = {
"name": name_job,
"user": user,
"tokens": tokens,
"retriever": retriever,
"id": str(id),
"type": loader,
"remote_data": source_data,
"sync_frequency": sync_frequency,
}
upload_index(full_path, file_data)
except Exception as e:
logging.error("Error in remote_worker task: %s", str(e), exc_info=True)
raise
finally:
if os.path.exists(full_path):
shutil.rmtree(full_path)
logging.info("remote_worker task completed successfully")
return {"urls": source_data, "name_job": name_job, "user": user, "limited": False}
def sync(

View File

@@ -1,6 +1,6 @@
services:
frontend:
build: ./frontend
build: ../frontend
environment:
- VITE_API_HOST=http://localhost:7091
- VITE_API_STREAMING=$VITE_API_STREAMING
@@ -10,7 +10,7 @@ services:
- backend
backend:
build: ./application
build: ../application
environment:
- API_KEY=$OPENAI_API_KEY
- EMBEDDINGS_KEY=$OPENAI_API_KEY
@@ -25,15 +25,15 @@ services:
ports:
- "7091:7091"
volumes:
- ./application/indexes:/app/application/indexes
- ./application/inputs:/app/application/inputs
- ./application/vectors:/app/application/vectors
- ../application/indexes:/app/application/indexes
- ../application/inputs:/app/application/inputs
- ../application/vectors:/app/application/vectors
depends_on:
- redis
- mongo
worker:
build: ./application
build: ../application
command: celery -A application.app.celery worker -l INFO
environment:
- API_KEY=$OPENAI_API_KEY

View File

@@ -0,0 +1,18 @@
services:
redis:
image: redis:6-alpine
ports:
- 6379:6379
mongo:
image: mongo:6
ports:
- 27017:27017
volumes:
- mongodb_data_container:/data/db
volumes:
mongodb_data_container:

View File

@@ -1,8 +1,8 @@
services:
frontend:
build: ./frontend
build: ../frontend
volumes:
- ./frontend/src:/app/src
- ../frontend/src:/app/src
environment:
- VITE_API_HOST=http://localhost:7091
- VITE_API_STREAMING=$VITE_API_STREAMING

View File

@@ -1,8 +1,8 @@
services:
frontend:
build: ./frontend
build: ../frontend
volumes:
- ./frontend/src:/app/src
- ../frontend/src:/app/src
environment:
- VITE_API_HOST=http://localhost:7091
- VITE_API_STREAMING=$VITE_API_STREAMING
@@ -12,7 +12,7 @@ services:
- backend
backend:
build: ./application
build: ../application
environment:
- API_KEY=$API_KEY
- EMBEDDINGS_KEY=$API_KEY
@@ -21,18 +21,20 @@ services:
- CELERY_RESULT_BACKEND=redis://redis:6379/1
- MONGO_URI=mongodb://mongo:27017/docsgpt
- CACHE_REDIS_URL=redis://redis:6379/2
- OPENAI_BASE_URL=$OPENAI_BASE_URL
- MODEL_NAME=$MODEL_NAME
ports:
- "7091:7091"
volumes:
- ./application/indexes:/app/application/indexes
- ./application/inputs:/app/application/inputs
- ./application/vectors:/app/application/vectors
- ../application/indexes:/app/application/indexes
- ../application/inputs:/app/application/inputs
- ../application/vectors:/app/application/vectors
depends_on:
- redis
- mongo
worker:
build: ./application
build: ../application
command: celery -A application.app.celery worker -l INFO -B
environment:
- API_KEY=$API_KEY

View File

@@ -0,0 +1,11 @@
version: "3.8"
services:
ollama:
image: ollama/ollama
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
volumes:
ollama_data:

View File

@@ -0,0 +1,16 @@
version: "3.8"
services:
ollama:
image: ollama/ollama
ports:
- "11434:11434"
volumes:
- ollama_data:/root/.ollama
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
volumes:
ollama_data:

View File

@@ -1,20 +0,0 @@
services:
frontend:
build: ./frontend
environment:
- VITE_API_HOST=http://localhost:7091
- VITE_API_STREAMING=$VITE_API_STREAMING
ports:
- "5173:5173"
depends_on:
- mock-backend
mock-backend:
build: ./mock-backend
ports:
- "7091:7091"
redis:
image: redis:6-alpine
ports:
- 6379:6379

View File

@@ -0,0 +1,120 @@
import Image from 'next/image';
const iconMap = {
'Amazon Lightsail': '/lightsail.png',
'Railway': '/railway.png',
'Civo Compute Cloud': '/civo.png',
'DigitalOcean Droplet': '/digitalocean.png',
'Kamatera Cloud': '/kamatera.png',
};
export function DeploymentCards({ items }) {
return (
<>
<div className="deployment-cards">
{items.map(({ title, link, description }) => {
const isExternal = link.startsWith('https://');
const iconSrc = iconMap[title] || '/default-icon.png'; // Default icon if not found
return (
<div
key={title}
className={`card${isExternal ? ' external' : ''}`}
>
<a href={link} target={isExternal ? '_blank' : undefined} rel="noopener noreferrer" className="card-link-wrapper">
<div className="card-icon-container">
{iconSrc && <div className="card-icon"><Image src={iconSrc} alt={title} width={32} height={32} /></div>} {/* Reduced icon size */}
</div>
<h3 className="card-title">{title}</h3>
{description && <p className="card-description">{description}</p>}
<p className="card-url">{new URL(link).hostname.replace('www.', '')}</p>
</a>
</div>
);
})}
</div>
<style jsx>{`
.deployment-cards {
margin-top: 24px;
display: grid;
grid-template-columns: 1fr;
gap: 16px;
}
@media (min-width: 768px) {
.deployment-cards {
grid-template-columns: 1fr 1fr;
}
}
.card {
background-color: #222222;
border-radius: 8px;
padding: 16px;
transition: background-color 0.3s;
position: relative;
color: #ffffff;
/* Make the card a flex container */
display: flex;
flex-direction: column;
align-items: center; /* Center horizontally */
justify-content: center; /* Center vertically */
height: 100%; /* Fill the height of the grid cell */
}
.card:hover {
background-color: #333333;
}
.card.external::after {
content: "↗";
position: absolute;
top: 12px; /* Adjusted position */
right: 12px; /* Adjusted position */
color: #ffffff;
font-size: 0.7em; /* Reduced size */
opacity: 0.8; /* Slightly faded */
}
.card-link-wrapper {
display: flex;
flex-direction: column;
align-items:center;
color: inherit;
text-decoration: none;
width:100%; /* Important: make link wrapper take full width */
}
.card-icon-container{
display:flex;
justify-content:center;
width: 100%;
margin-bottom: 8px; /* Space between icon and title */
}
.card-icon {
display: block;
margin: 0 auto;
}
.card-title {
font-weight: 600;
margin-bottom: 4px;
font-size: 16px;
text-align: center;
color: #f0f0f0; /* Lighter title color if needed */
}
.card-description {
margin-bottom: 0;
font-size: 13px;
color: #aaaaaa;
text-align: center;
line-height: 1.4;
}
.card-url {
margin-top: 8px; /*Keep space consistent */
font-size: 11px;
color: #777777;
text-align: center;
font-family: monospace;
}
`}</style>
</>
);
}

750
docs/package-lock.json generated
View File

@@ -7,8 +7,8 @@
"license": "MIT",
"dependencies": {
"@vercel/analytics": "^1.1.1",
"docsgpt": "^0.4.7",
"next": "^14.2.12",
"docsgpt-react": "^0.4.9",
"next": "^14.2.22",
"nextra": "^2.13.2",
"nextra-theme-docs": "^2.13.2",
"react": "^18.2.0",
@@ -931,14 +931,14 @@
}
},
"node_modules/@next/env": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.12.tgz",
"integrity": "sha512-3fP29GIetdwVIfIRyLKM7KrvJaqepv+6pVodEbx0P5CaMLYBtx+7eEg8JYO5L9sveJO87z9eCReceZLi0hxO1Q=="
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.22.tgz",
"integrity": "sha512-EQ6y1QeNQglNmNIXvwP/Bb+lf7n9WtgcWvtoFsHquVLCJUuxRs+6SfZ5EK0/EqkkLex4RrDySvKgKNN7PXip7Q=="
},
"node_modules/@next/swc-darwin-arm64": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.12.tgz",
"integrity": "sha512-crHJ9UoinXeFbHYNok6VZqjKnd8rTd7K3Z2zpyzF1ch7vVNKmhjv/V7EHxep3ILoN8JB9AdRn/EtVVyG9AkCXw==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.22.tgz",
"integrity": "sha512-HUaLiehovgnqY4TMBZJ3pDaOsTE1spIXeR10pWgdQVPYqDGQmHJBj3h3V6yC0uuo/RoY2GC0YBFRkOX3dI9WVQ==",
"cpu": [
"arm64"
],
@@ -951,9 +951,9 @@
}
},
"node_modules/@next/swc-darwin-x64": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.12.tgz",
"integrity": "sha512-JbEaGbWq18BuNBO+lCtKfxl563Uw9oy2TodnN2ioX00u7V1uzrsSUcg3Ep9ce+P0Z9es+JmsvL2/rLphz+Frcw==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.22.tgz",
"integrity": "sha512-ApVDANousaAGrosWvxoGdLT0uvLBUC+srqOcpXuyfglA40cP2LBFaGmBjhgpxYk5z4xmunzqQvcIgXawTzo2uQ==",
"cpu": [
"x64"
],
@@ -966,9 +966,9 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.12.tgz",
"integrity": "sha512-qBy7OiXOqZrdp88QEl2H4fWalMGnSCrr1agT/AVDndlyw2YJQA89f3ttR/AkEIP9EkBXXeGl6cC72/EZT5r6rw==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.22.tgz",
"integrity": "sha512-3O2J99Bk9aM+d4CGn9eEayJXHuH9QLx0BctvWyuUGtJ3/mH6lkfAPRI4FidmHMBQBB4UcvLMfNf8vF0NZT7iKw==",
"cpu": [
"arm64"
],
@@ -981,9 +981,9 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.12.tgz",
"integrity": "sha512-EfD9L7o9biaQxjwP1uWXnk3vYZi64NVcKUN83hpVkKocB7ogJfyH2r7o1pPnMtir6gHZiGCeHKagJ0yrNSLNHw==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.22.tgz",
"integrity": "sha512-H/hqfRz75yy60y5Eg7DxYfbmHMjv60Dsa6IWHzpJSz4MRkZNy5eDnEW9wyts9bkxwbOVZNPHeb3NkqanP+nGPg==",
"cpu": [
"arm64"
],
@@ -996,9 +996,9 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.12.tgz",
"integrity": "sha512-iQ+n2pxklJew9IpE47hE/VgjmljlHqtcD5UhZVeHICTPbLyrgPehaKf2wLRNjYH75udroBNCgrSSVSVpAbNoYw==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.22.tgz",
"integrity": "sha512-LckLwlCLcGR1hlI5eiJymR8zSHPsuruuwaZ3H2uudr25+Dpzo6cRFjp/3OR5UYJt8LSwlXv9mmY4oI2QynwpqQ==",
"cpu": [
"x64"
],
@@ -1011,9 +1011,9 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.12.tgz",
"integrity": "sha512-rFkUkNwcQ0ODn7cxvcVdpHlcOpYxMeyMfkJuzaT74xjAa5v4fxP4xDk5OoYmPi8QNLDs3UgZPMSBmpBuv9zKWA==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.22.tgz",
"integrity": "sha512-qGUutzmh0PoFU0fCSu0XYpOfT7ydBZgDfcETIeft46abPqP+dmePhwRGLhFKwZWxNWQCPprH26TjaTxM0Nv8mw==",
"cpu": [
"x64"
],
@@ -1026,9 +1026,9 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.12.tgz",
"integrity": "sha512-PQFYUvwtHs/u0K85SG4sAdDXYIPXpETf9mcEjWc0R4JmjgMKSDwIU/qfZdavtP6MPNiMjuKGXHCtyhR/M5zo8g==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.22.tgz",
"integrity": "sha512-K6MwucMWmIvMb9GlvT0haYsfIPxfQD8yXqxwFy4uLFMeXIb2TcVYQimxkaFZv86I7sn1NOZnpOaVk5eaxThGIw==",
"cpu": [
"arm64"
],
@@ -1041,9 +1041,9 @@
}
},
"node_modules/@next/swc-win32-ia32-msvc": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.12.tgz",
"integrity": "sha512-FAj2hMlcbeCV546eU2tEv41dcJb4NeqFlSXU/xL/0ehXywHnNpaYajOUvn3P8wru5WyQe6cTZ8fvckj/2XN4Vw==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.22.tgz",
"integrity": "sha512-5IhDDTPEbzPR31ZzqHe90LnNe7BlJUZvC4sA1thPJV6oN5WmtWjZ0bOYfNsyZx00FJt7gggNs6SrsX0UEIcIpA==",
"cpu": [
"ia32"
],
@@ -1056,9 +1056,9 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.12.tgz",
"integrity": "sha512-yu8QvV53sBzoIVRHsxCHqeuS8jYq6Lrmdh0briivuh+Brsp6xjg80MAozUsBTAV9KNmY08KlX0KYTWz1lbPzEg==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.22.tgz",
"integrity": "sha512-nvRaB1PyG4scn9/qNzlkwEwLzuoPH3Gjp7Q/pLuwUgOTt1oPMlnCI3A3rgkt+eZnU71emOiEv/mR201HoURPGg==",
"cpu": [
"x64"
],
@@ -1170,6 +1170,407 @@
"node": ">=8"
}
},
"node_modules/@parcel/core": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/core/-/core-2.13.2.tgz",
"integrity": "sha512-1zC5Au4z9or5XyP6ipfvJqHktuB0jD7WuxMcV1CWAZGARHKylLe+0ccl+Wx7HN5O+xAvfCDtTlKrATY8qyrIyw==",
"peer": true,
"dependencies": {
"@mischnic/json-sourcemap": "^0.1.0",
"@parcel/cache": "2.13.2",
"@parcel/diagnostic": "2.13.2",
"@parcel/events": "2.13.2",
"@parcel/feature-flags": "2.13.2",
"@parcel/fs": "2.13.2",
"@parcel/graph": "3.3.2",
"@parcel/logger": "2.13.2",
"@parcel/package-manager": "2.13.2",
"@parcel/plugin": "2.13.2",
"@parcel/profiler": "2.13.2",
"@parcel/rust": "2.13.2",
"@parcel/source-map": "^2.1.1",
"@parcel/types": "2.13.2",
"@parcel/utils": "2.13.2",
"@parcel/workers": "2.13.2",
"base-x": "^3.0.8",
"browserslist": "^4.6.6",
"clone": "^2.1.1",
"dotenv": "^16.4.5",
"dotenv-expand": "^11.0.6",
"json5": "^2.2.0",
"msgpackr": "^1.9.9",
"nullthrows": "^1.1.1",
"semver": "^7.5.2"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/cache": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/cache/-/cache-2.13.2.tgz",
"integrity": "sha512-Y0nWlCMWDSp1lxiPI5zCWTGD0InnVZ+IfqeyLWmROAqValYyd0QZCvnSljKJ144jWTr0jXxDveir+DVF8sAYaA==",
"peer": true,
"dependencies": {
"@parcel/fs": "2.13.2",
"@parcel/logger": "2.13.2",
"@parcel/utils": "2.13.2",
"lmdb": "2.8.5"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
},
"peerDependencies": {
"@parcel/core": "^2.13.2"
}
},
"node_modules/@parcel/core/node_modules/@parcel/codeframe": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/codeframe/-/codeframe-2.13.2.tgz",
"integrity": "sha512-qFMiS14orb6QSQj5/J/QN+gJElUfedVAKBTNkp9QB4i8ObdLHDqHRUzFb55ZQJI3G4vsxOOWAOUXGirtLwrxGQ==",
"peer": true,
"dependencies": {
"chalk": "^4.1.2"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/diagnostic": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/diagnostic/-/diagnostic-2.13.2.tgz",
"integrity": "sha512-6Au0JEJ5SY2gYrY0/m0i0sTuqTvK0k2E9azhBJR+zzCREbUxLiDdLZ+vXAfLW7t/kPAcWtdNU0Bj7pnZcMiMXg==",
"peer": true,
"dependencies": {
"@mischnic/json-sourcemap": "^0.1.0",
"nullthrows": "^1.1.1"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/events": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/events/-/events-2.13.2.tgz",
"integrity": "sha512-BVB9hW1RGh/tMaDHfpa+uIgz5PMULorCnjmWr/KvrlhdUSUQoaPYfRcTDYrKhoKuNIKsWSnTGvXrxE53L5qo0w==",
"peer": true,
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/fs": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/fs/-/fs-2.13.2.tgz",
"integrity": "sha512-bdeIMuAXhMnROvqV55JWRUmjD438/T7h3r3NsFnkq+Mp4z2nuAn0STxbqDNxIgTMJHNunSDzncqRNMT7xJCe8A==",
"peer": true,
"dependencies": {
"@parcel/feature-flags": "2.13.2",
"@parcel/rust": "2.13.2",
"@parcel/types-internal": "2.13.2",
"@parcel/utils": "2.13.2",
"@parcel/watcher": "^2.0.7",
"@parcel/workers": "2.13.2"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
},
"peerDependencies": {
"@parcel/core": "^2.13.2"
}
},
"node_modules/@parcel/core/node_modules/@parcel/logger": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/logger/-/logger-2.13.2.tgz",
"integrity": "sha512-SFVABAMqaT9jIDn4maPgaQQauPDz8fpoKUGEuLF44Q0aQFbBUy7vX7KYs/EvYSWZo4VyJcUDHvIInBlepA0/ZQ==",
"peer": true,
"dependencies": {
"@parcel/diagnostic": "2.13.2",
"@parcel/events": "2.13.2"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/markdown-ansi": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/markdown-ansi/-/markdown-ansi-2.13.2.tgz",
"integrity": "sha512-MIEoetfT/snk1GqWzBI3AhifV257i2xke9dvyQl14PPiMl+TlVhwnbQyA09WJBvDor+MuxZypHL7xoFdW8ff3A==",
"peer": true,
"dependencies": {
"chalk": "^4.1.2"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/node-resolver-core": {
"version": "3.4.2",
"resolved": "https://registry.npmjs.org/@parcel/node-resolver-core/-/node-resolver-core-3.4.2.tgz",
"integrity": "sha512-SwnKLcZRG1VdB5JeM/Ax5VMWWh2QfXufmMQCKKx0/Kk41nUpie+aIZKj3LH6Z/fJsnKig/vXpeWoxGhmG523qg==",
"peer": true,
"dependencies": {
"@mischnic/json-sourcemap": "^0.1.0",
"@parcel/diagnostic": "2.13.2",
"@parcel/fs": "2.13.2",
"@parcel/rust": "2.13.2",
"@parcel/utils": "2.13.2",
"nullthrows": "^1.1.1",
"semver": "^7.5.2"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/package-manager": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/package-manager/-/package-manager-2.13.2.tgz",
"integrity": "sha512-6HjfbdJUjHyNKzYB7GSYnOCtLwqCGW7yT95GlnnTKyFffvXYsqvBSyepMuPRlbX0mFUm4S9l2DH3OVZrk108AA==",
"peer": true,
"dependencies": {
"@parcel/diagnostic": "2.13.2",
"@parcel/fs": "2.13.2",
"@parcel/logger": "2.13.2",
"@parcel/node-resolver-core": "3.4.2",
"@parcel/types": "2.13.2",
"@parcel/utils": "2.13.2",
"@parcel/workers": "2.13.2",
"@swc/core": "^1.7.26",
"semver": "^7.5.2"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
},
"peerDependencies": {
"@parcel/core": "^2.13.2"
}
},
"node_modules/@parcel/core/node_modules/@parcel/plugin": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/plugin/-/plugin-2.13.2.tgz",
"integrity": "sha512-Q+RIENS1B185yLPhrGdzBK1oJrZmh/RXrYMnzJs78Tog8SpihjeNBNR6z4PT85o2F+Gy2y1S9A26fpiGq161qQ==",
"peer": true,
"dependencies": {
"@parcel/types": "2.13.2"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/profiler": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/profiler/-/profiler-2.13.2.tgz",
"integrity": "sha512-fur6Oq2HkX6AiM8rtqmDvldH5JWz0sqXA1ylz8cE3XOiDZIuvCulZmQ+hH+4odaNH6QocI1MwfV+GDh3HlQoCA==",
"peer": true,
"dependencies": {
"@parcel/diagnostic": "2.13.2",
"@parcel/events": "2.13.2",
"@parcel/types-internal": "2.13.2",
"chrome-trace-event": "^1.0.2"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/rust": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/rust/-/rust-2.13.2.tgz",
"integrity": "sha512-XFIewSwxkrDYOnnSP/XZ1LDLdXTs7L9CjQUWtl46Vir5Pq/rinemwLJeKGIwKLHy7fhUZQjYxquH6fBL+AY8DA==",
"peer": true,
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/types": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/types/-/types-2.13.2.tgz",
"integrity": "sha512-6ixqjk2pjKELn4sQ/jdvpbCVTeH6xXQTdotkN8Wzk68F2K2MtSPIRAEocumlexScfffbRQplr2MdIf1JJWLogA==",
"peer": true,
"dependencies": {
"@parcel/types-internal": "2.13.2",
"@parcel/workers": "2.13.2"
}
},
"node_modules/@parcel/core/node_modules/@parcel/utils": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/utils/-/utils-2.13.2.tgz",
"integrity": "sha512-BkFtRo5xenmonwnBy+X4sVbHIRrx+ZHMPpS/6hFqyTvoUUFq2yTFQnfRGVVOOvscVUxpGom+kewnrTG3HHbZoA==",
"peer": true,
"dependencies": {
"@parcel/codeframe": "2.13.2",
"@parcel/diagnostic": "2.13.2",
"@parcel/logger": "2.13.2",
"@parcel/markdown-ansi": "2.13.2",
"@parcel/rust": "2.13.2",
"@parcel/source-map": "^2.1.1",
"chalk": "^4.1.2",
"nullthrows": "^1.1.1"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/core/node_modules/@parcel/workers": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/workers/-/workers-2.13.2.tgz",
"integrity": "sha512-P78BpH0yTT9KK09wgK4eabtlb5OlcWAmZebOToN5UYuwWEylKt0gWZx1+d+LPQupvK84/iZ+AutDScsATjgUMw==",
"peer": true,
"dependencies": {
"@parcel/diagnostic": "2.13.2",
"@parcel/logger": "2.13.2",
"@parcel/profiler": "2.13.2",
"@parcel/types-internal": "2.13.2",
"@parcel/utils": "2.13.2",
"nullthrows": "^1.1.1"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
},
"peerDependencies": {
"@parcel/core": "^2.13.2"
}
},
"node_modules/@parcel/core/node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"peer": true,
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@parcel/core/node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
"peer": true,
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/@parcel/core/node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"peer": true,
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/@parcel/core/node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"peer": true
},
"node_modules/@parcel/core/node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"peer": true,
"engines": {
"node": ">=8"
}
},
"node_modules/@parcel/core/node_modules/semver": {
"version": "7.6.3",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz",
"integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==",
"peer": true,
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/@parcel/core/node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"peer": true,
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/@parcel/diagnostic": {
"version": "2.12.0",
"resolved": "https://registry.npmjs.org/@parcel/diagnostic/-/diagnostic-2.12.0.tgz",
@@ -1198,6 +1599,19 @@
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/feature-flags": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/feature-flags/-/feature-flags-2.13.2.tgz",
"integrity": "sha512-cCwDAKD4Er24EkuQ+loVZXSURpM0gAGRsLJVoBtFiCSbB3nmIJJ6FLRwSBI/5OsOUExiUXDvSpfUCA5ldGTzbw==",
"peer": true,
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/fs": {
"version": "2.12.0",
"resolved": "https://registry.npmjs.org/@parcel/fs/-/fs-2.12.0.tgz",
@@ -1220,6 +1634,23 @@
"@parcel/core": "^2.12.0"
}
},
"node_modules/@parcel/graph": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/@parcel/graph/-/graph-3.3.2.tgz",
"integrity": "sha512-aAysQLRr8SOonSHWqdKHMJzfcrDFXKK8IYZEurlOzosiSgZXrAK7q8b8JcaJ4r84/jlvQYNYneNZeFQxKjHXkA==",
"peer": true,
"dependencies": {
"@parcel/feature-flags": "2.13.2",
"nullthrows": "^1.1.1"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/logger": {
"version": "2.12.0",
"resolved": "https://registry.npmjs.org/@parcel/logger/-/logger-2.12.0.tgz",
@@ -1569,6 +2000,35 @@
"utility-types": "^3.10.0"
}
},
"node_modules/@parcel/types-internal": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/types-internal/-/types-internal-2.13.2.tgz",
"integrity": "sha512-j0zb3WNM8O/+d8CArll7/4w4AyBED3Jbo32/unz89EPVN0VklmgBrRCAI5QXDKuJAGdAZSL5/a8bNYbwl7/Wxw==",
"peer": true,
"dependencies": {
"@parcel/diagnostic": "2.13.2",
"@parcel/feature-flags": "2.13.2",
"@parcel/source-map": "^2.1.1",
"utility-types": "^3.10.0"
}
},
"node_modules/@parcel/types-internal/node_modules/@parcel/diagnostic": {
"version": "2.13.2",
"resolved": "https://registry.npmjs.org/@parcel/diagnostic/-/diagnostic-2.13.2.tgz",
"integrity": "sha512-6Au0JEJ5SY2gYrY0/m0i0sTuqTvK0k2E9azhBJR+zzCREbUxLiDdLZ+vXAfLW7t/kPAcWtdNU0Bj7pnZcMiMXg==",
"peer": true,
"dependencies": {
"@mischnic/json-sourcemap": "^0.1.0",
"nullthrows": "^1.1.1"
},
"engines": {
"node": ">= 16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
}
},
"node_modules/@parcel/utils": {
"version": "2.12.0",
"resolved": "https://registry.npmjs.org/@parcel/utils/-/utils-2.12.0.tgz",
@@ -2279,13 +2739,13 @@
}
},
"node_modules/@swc/core": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core/-/core-1.4.5.tgz",
"integrity": "sha512-4/JGkG4b1Z/QwCGgx+Ub46MlzrsZvBk5JSkxm9PcZ4bSX81c+4Y94Xm3iLp5Ka8NxzS5rD4mJSpcYuN3Tw0ceg==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core/-/core-1.10.1.tgz",
"integrity": "sha512-rQ4dS6GAdmtzKiCRt3LFVxl37FaY1cgL9kSUTnhQ2xc3fmHOd7jdJK/V4pSZMG1ruGTd0bsi34O2R0Olg9Zo/w==",
"hasInstallScript": true,
"dependencies": {
"@swc/counter": "^0.1.2",
"@swc/types": "^0.1.5"
"@swc/counter": "^0.1.3",
"@swc/types": "^0.1.17"
},
"engines": {
"node": ">=10"
@@ -2295,19 +2755,19 @@
"url": "https://opencollective.com/swc"
},
"optionalDependencies": {
"@swc/core-darwin-arm64": "1.4.5",
"@swc/core-darwin-x64": "1.4.5",
"@swc/core-linux-arm-gnueabihf": "1.4.5",
"@swc/core-linux-arm64-gnu": "1.4.5",
"@swc/core-linux-arm64-musl": "1.4.5",
"@swc/core-linux-x64-gnu": "1.4.5",
"@swc/core-linux-x64-musl": "1.4.5",
"@swc/core-win32-arm64-msvc": "1.4.5",
"@swc/core-win32-ia32-msvc": "1.4.5",
"@swc/core-win32-x64-msvc": "1.4.5"
"@swc/core-darwin-arm64": "1.10.1",
"@swc/core-darwin-x64": "1.10.1",
"@swc/core-linux-arm-gnueabihf": "1.10.1",
"@swc/core-linux-arm64-gnu": "1.10.1",
"@swc/core-linux-arm64-musl": "1.10.1",
"@swc/core-linux-x64-gnu": "1.10.1",
"@swc/core-linux-x64-musl": "1.10.1",
"@swc/core-win32-arm64-msvc": "1.10.1",
"@swc/core-win32-ia32-msvc": "1.10.1",
"@swc/core-win32-x64-msvc": "1.10.1"
},
"peerDependencies": {
"@swc/helpers": "^0.5.0"
"@swc/helpers": "*"
},
"peerDependenciesMeta": {
"@swc/helpers": {
@@ -2316,9 +2776,9 @@
}
},
"node_modules/@swc/core-darwin-arm64": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.4.5.tgz",
"integrity": "sha512-toMSkbByHNfGXESyY1aiq5L3KutgijrNWB/THgdHIA1aIbwtrgMdFQfxpSE+INuuvWYi/Fxarv86EnU7ewbI0Q==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.10.1.tgz",
"integrity": "sha512-NyELPp8EsVZtxH/mEqvzSyWpfPJ1lugpTQcSlMduZLj1EASLO4sC8wt8hmL1aizRlsbjCX+r0PyL+l0xQ64/6Q==",
"cpu": [
"arm64"
],
@@ -2331,9 +2791,9 @@
}
},
"node_modules/@swc/core-darwin-x64": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.4.5.tgz",
"integrity": "sha512-LN8cbnmb4Gav8UcbBc+L/DEthmzCWZz22rQr6fIEHMN+f0d71fuKnV0ca0hoKbpZn33dlzUmXQE53HRjlRUQbw==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.10.1.tgz",
"integrity": "sha512-L4BNt1fdQ5ZZhAk5qoDfUnXRabDOXKnXBxMDJ+PWLSxOGBbWE6aJTnu4zbGjJvtot0KM46m2LPAPY8ttknqaZA==",
"cpu": [
"x64"
],
@@ -2346,9 +2806,9 @@
}
},
"node_modules/@swc/core-linux-arm-gnueabihf": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.4.5.tgz",
"integrity": "sha512-suRFkhBWmOQxlM4frpos1uqjmHfaEI8FuJ0LL5+yRE7IunNDeQJBKujGZt6taeuxo1KqC0N0Ajr8IluN2wrKpA==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.10.1.tgz",
"integrity": "sha512-Y1u9OqCHgvVp2tYQAJ7hcU9qO5brDMIrA5R31rwWQIAKDkJKtv3IlTHF0hrbWk1wPR0ZdngkQSJZple7G+Grvw==",
"cpu": [
"arm"
],
@@ -2361,9 +2821,9 @@
}
},
"node_modules/@swc/core-linux-arm64-gnu": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.4.5.tgz",
"integrity": "sha512-mLKxasQArDGmR6k9c0tkPVUdoo8VfUecocMG1Mx9NYvpidJNaZ3xq9nYM77v7uq1fQqrs/59DM1fJTNRWvv/UQ==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.10.1.tgz",
"integrity": "sha512-tNQHO/UKdtnqjc7o04iRXng1wTUXPgVd8Y6LI4qIbHVoVPwksZydISjMcilKNLKIwOoUQAkxyJ16SlOAeADzhQ==",
"cpu": [
"arm64"
],
@@ -2376,9 +2836,9 @@
}
},
"node_modules/@swc/core-linux-arm64-musl": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.4.5.tgz",
"integrity": "sha512-pgKuyRP7S29U/HMDTx+x8dFcklWxwB9cHFNCNWSE6bS4vHR93jc4quwPX9OEQX5CVHxm+c8+xof043I4OGkAXw==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.10.1.tgz",
"integrity": "sha512-x0L2Pd9weQ6n8dI1z1Isq00VHFvpBClwQJvrt3NHzmR+1wCT/gcYl1tp9P5xHh3ldM8Cn4UjWCw+7PaUgg8FcQ==",
"cpu": [
"arm64"
],
@@ -2391,9 +2851,9 @@
}
},
"node_modules/@swc/core-linux-x64-gnu": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.4.5.tgz",
"integrity": "sha512-srR+YN86Oerzoghd0DPCzTbTp08feeJPSr9kkNdmtQWENOa4l/9cJV3+XY6vviw0sEjezPmYnc3SwRxJRaxvEw==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.10.1.tgz",
"integrity": "sha512-yyYEwQcObV3AUsC79rSzN9z6kiWxKAVJ6Ntwq2N9YoZqSPYph+4/Am5fM1xEQYf/kb99csj0FgOelomJSobxQA==",
"cpu": [
"x64"
],
@@ -2406,9 +2866,9 @@
}
},
"node_modules/@swc/core-linux-x64-musl": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.4.5.tgz",
"integrity": "sha512-aSf41LZtDeG5VXI4RCnzcu0UInPyNm3ip8Kw+sCK+sSqW9o7DgBkyqqbip3RZq84fNUHBQQQQdKXetltsyRRqw==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.10.1.tgz",
"integrity": "sha512-tcaS43Ydd7Fk7sW5ROpaf2Kq1zR+sI5K0RM+0qYLYYurvsJruj3GhBCaiN3gkzd8m/8wkqNqtVklWaQYSDsyqA==",
"cpu": [
"x64"
],
@@ -2421,9 +2881,9 @@
}
},
"node_modules/@swc/core-win32-arm64-msvc": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.4.5.tgz",
"integrity": "sha512-vU3k8JwRUlTkJMfJQY9E4VvLrsIFOpfhnvbuXB84Amo1cJsz+bYQcC6RSvY7qpaDzDKFdUGbJco4uZTRoRf7Mg==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.10.1.tgz",
"integrity": "sha512-D3Qo1voA7AkbOzQ2UGuKNHfYGKL6eejN8VWOoQYtGHHQi1p5KK/Q7V1ku55oxXBsj79Ny5FRMqiRJpVGad7bjQ==",
"cpu": [
"arm64"
],
@@ -2436,9 +2896,9 @@
}
},
"node_modules/@swc/core-win32-ia32-msvc": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.4.5.tgz",
"integrity": "sha512-856YRh3frRK2XbrSjDOFBgoAqWJLNRkaEtfGzXfeEoyJlOz0BFsSJHxKlHAFkxRfHe2li9DJRUQFTEhXn4OUWw==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.10.1.tgz",
"integrity": "sha512-WalYdFoU3454Og+sDKHM1MrjvxUGwA2oralknXkXL8S0I/8RkWZOB++p3pLaGbTvOO++T+6znFbQdR8KRaa7DA==",
"cpu": [
"ia32"
],
@@ -2451,9 +2911,9 @@
}
},
"node_modules/@swc/core-win32-x64-msvc": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.4.5.tgz",
"integrity": "sha512-j1+kV7jmWY1+NbXAvxAEW165781yLXVZKLcoXIZKmw18EatqMF6w8acg1gDG8C+Iw5aWLkRZVS4pijSh7+DtCQ==",
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.10.1.tgz",
"integrity": "sha512-JWobfQDbTnoqaIwPKQ3DVSywihVXlQMbDuwik/dDWlj33A8oEHcjPOGs4OqcA3RHv24i+lfCQpM3Mn4FAMfacA==",
"cpu": [
"x64"
],
@@ -2480,9 +2940,12 @@
}
},
"node_modules/@swc/types": {
"version": "0.1.5",
"resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.5.tgz",
"integrity": "sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw=="
"version": "0.1.17",
"resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.17.tgz",
"integrity": "sha512-V5gRru+aD8YVyCOMAjMpWR1Ui577DD5KSJsHP8RAxopAH22jFz6GZd/qxqjO6MJHQhcsjvjOFXyDhyLQUnMveQ==",
"dependencies": {
"@swc/counter": "^0.1.3"
}
},
"node_modules/@theguild/remark-mermaid": {
"version": "0.0.5",
@@ -2723,6 +3186,15 @@
"url": "https://github.com/sponsors/wooorm"
}
},
"node_modules/base-x": {
"version": "3.0.10",
"resolved": "https://registry.npmjs.org/base-x/-/base-x-3.0.10.tgz",
"integrity": "sha512-7d0s06rR9rYaIWHkpfLIFICM/tkSVdoPC9qYAQRpxn9DdKNWNsKC0uk++akckyLq16Tx2WIinnZ6WRriAt6njQ==",
"peer": true,
"dependencies": {
"safe-buffer": "^5.0.1"
}
},
"node_modules/boolbase": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
@@ -2937,6 +3409,15 @@
"node": ">=4"
}
},
"node_modules/clone": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz",
"integrity": "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==",
"peer": true,
"engines": {
"node": ">=0.8"
}
},
"node_modules/clsx": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.0.tgz",
@@ -3575,11 +4056,10 @@
"node": ">=0.3.1"
}
},
"node_modules/docsgpt": {
"version": "0.4.7",
"resolved": "https://registry.npmjs.org/docsgpt/-/docsgpt-0.4.7.tgz",
"integrity": "sha512-4YZzLZo6ybudFrJVUQflDFeWzFiTATRWB9myrGSpLigyuMMzax1ZAY2xFallZLuEG9VVm0mOgkx3ssWHLrXWkQ==",
"license": "Apache-2.0",
"node_modules/docsgpt-react": {
"version": "0.4.9",
"resolved": "https://registry.npmjs.org/docsgpt-react/-/docsgpt-react-0.4.9.tgz",
"integrity": "sha512-mGGbd4IGVHrQVVdgoej991Vpl/hYkTuKz5Ax95hvqSbWDZELZnEx2/AZajAII5AayUZKWYaEFRluewUiGJVSbA==",
"dependencies": {
"@babel/plugin-transform-flow-strip-types": "^7.23.3",
"@parcel/resolver-glob": "^2.12.0",
@@ -3665,6 +4145,33 @@
"url": "https://github.com/fb55/domutils?sponsor=1"
}
},
"node_modules/dotenv": {
"version": "16.4.7",
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz",
"integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==",
"peer": true,
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://dotenvx.com"
}
},
"node_modules/dotenv-expand": {
"version": "11.0.7",
"resolved": "https://registry.npmjs.org/dotenv-expand/-/dotenv-expand-11.0.7.tgz",
"integrity": "sha512-zIHwmZPRshsCdpMDyVsqGmgyP0yT8GAgXUnkdAoJisxvf33k7yO6OuoKmcTGuXPWSsm8Oh88nZicRLA9Y0rUeA==",
"peer": true,
"dependencies": {
"dotenv": "^16.4.5"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://dotenvx.com"
}
},
"node_modules/electron-to-chromium": {
"version": "1.4.693",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.693.tgz",
@@ -4678,9 +5185,9 @@
"integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w=="
},
"node_modules/katex": {
"version": "0.16.10",
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.10.tgz",
"integrity": "sha512-ZiqaC04tp2O5utMsl2TEZTXxa6WSC4yo0fv5ML++D3QZv/vx2Mct0mTlRx3O+uUkjfuAgOkzsCmq5MiUEsDDdA==",
"version": "0.16.21",
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.21.tgz",
"integrity": "sha512-XvqR7FgOHtWupfMiigNzmh+MgUVmDGU2kXZm899ZkPfcuoPuFxyHmXsgATDpFZDAXCI8tvinaVcDo8PIIJSo4A==",
"funding": [
"https://opencollective.com/katex",
"https://github.com/sponsors/katex"
@@ -6234,9 +6741,9 @@
}
},
"node_modules/nanoid": {
"version": "3.3.7",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz",
"integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==",
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz",
"integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==",
"funding": [
{
"type": "github",
@@ -6251,11 +6758,11 @@
}
},
"node_modules/next": {
"version": "14.2.12",
"resolved": "https://registry.npmjs.org/next/-/next-14.2.12.tgz",
"integrity": "sha512-cDOtUSIeoOvt1skKNihdExWMTybx3exnvbFbb9ecZDIxlvIbREQzt9A5Km3Zn3PfU+IFjyYGsHS+lN9VInAGKA==",
"version": "14.2.22",
"resolved": "https://registry.npmjs.org/next/-/next-14.2.22.tgz",
"integrity": "sha512-Ps2caobQ9hlEhscLPiPm3J3SYhfwfpMqzsoCMZGWxt9jBRK9hoBZj2A37i8joKhsyth2EuVKDVJCTF5/H4iEDw==",
"dependencies": {
"@next/env": "14.2.12",
"@next/env": "14.2.22",
"@swc/helpers": "0.5.5",
"busboy": "1.6.0",
"caniuse-lite": "^1.0.30001579",
@@ -6270,15 +6777,15 @@
"node": ">=18.17.0"
},
"optionalDependencies": {
"@next/swc-darwin-arm64": "14.2.12",
"@next/swc-darwin-x64": "14.2.12",
"@next/swc-linux-arm64-gnu": "14.2.12",
"@next/swc-linux-arm64-musl": "14.2.12",
"@next/swc-linux-x64-gnu": "14.2.12",
"@next/swc-linux-x64-musl": "14.2.12",
"@next/swc-win32-arm64-msvc": "14.2.12",
"@next/swc-win32-ia32-msvc": "14.2.12",
"@next/swc-win32-x64-msvc": "14.2.12"
"@next/swc-darwin-arm64": "14.2.22",
"@next/swc-darwin-x64": "14.2.22",
"@next/swc-linux-arm64-gnu": "14.2.22",
"@next/swc-linux-arm64-musl": "14.2.22",
"@next/swc-linux-x64-gnu": "14.2.22",
"@next/swc-linux-x64-musl": "14.2.22",
"@next/swc-win32-arm64-msvc": "14.2.22",
"@next/swc-win32-ia32-msvc": "14.2.22",
"@next/swc-win32-x64-msvc": "14.2.22"
},
"peerDependencies": {
"@opentelemetry/api": "^1.1.0",
@@ -9598,6 +10105,26 @@
"node": ">=6"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"peer": true
},
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
@@ -9942,6 +10469,19 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/typescript": {
"version": "5.7.2",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz",
"integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
},
"engines": {
"node": ">=14.17"
}
},
"node_modules/uc.micro": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-2.1.0.tgz",

View File

@@ -7,8 +7,8 @@
"license": "MIT",
"dependencies": {
"@vercel/analytics": "^1.1.1",
"docsgpt": "^0.4.7",
"next": "^14.2.12",
"docsgpt-react": "^0.4.9",
"next": "^14.2.22",
"nextra": "^2.13.2",
"nextra-theme-docs": "^2.13.2",
"react": "^18.2.0",

View File

@@ -1,350 +0,0 @@
# API Endpoints Documentation
*Currently, the application provides the following main API endpoints:*
### 1. /api/answer
**Description:**
This endpoint is used to request answers to user-provided questions.
**Request:**
**Method**: `POST`
**Headers**: Content-Type should be set to `application/json; charset=utf-8`
**Request Body**: JSON object with the following fields:
* `question` — The user's question.
* `history` — (Optional) Previous conversation history.
* `api_key`— Your API key.
* `embeddings_key` — Your embeddings key.
* `active_docs` — The location of active documentation.
Here is a JavaScript Fetch Request example:
```js
// answer (POST http://127.0.0.1:5000/api/answer)
fetch("http://127.0.0.1:5000/api/answer", {
"method": "POST",
"headers": {
"Content-Type": "application/json; charset=utf-8"
},
"body": JSON.stringify({"question":"Hi","history":null,"api_key":"OPENAI_API_KEY","embeddings_key":"OPENAI_API_KEY",
"active_docs": "javascript/.project/ES2015/openai_text-embedding-ada-002/"})
})
.then((res) => res.text())
.then(console.log.bind(console))
```
**Response**
In response, you will get a JSON document containing the `answer`, `query` and `result`:
```json
{
"answer": "Hi there! How can I help you?\n",
"query": "Hi",
"result": "Hi there! How can I help you?\nSOURCES:"
}
```
### 2. /api/docs_check
**Description:**
This endpoint will make sure documentation is loaded on the server (just run it every time user is switching between libraries (documentations)).
**Request:**
**Method**: `POST`
**Headers**: Content-Type should be set to `application/json; charset=utf-8`
**Request Body**: JSON object with the field:
* `docs` — The location of the documentation:
```js
// docs_check (POST http://127.0.0.1:5000/api/docs_check)
fetch("http://127.0.0.1:5000/api/docs_check", {
"method": "POST",
"headers": {
"Content-Type": "application/json; charset=utf-8"
},
"body": JSON.stringify({"docs":"javascript/.project/ES2015/openai_text-embedding-ada-002/"})
})
.then((res) => res.text())
.then(console.log.bind(console))
```
**Response:**
In response, you will get a JSON document like this one indicating whether the documentation exists or not:
```json
{
"status": "exists"
}
```
### 3. /api/combine
**Description:**
This endpoint provides information about available vectors and their locations with a simple GET request.
**Request:**
**Method**: `GET`
**Response:**
Response will include:
* `date`
* `description`
* `docLink`
* `fullName`
* `language`
* `location` (local or docshub)
* `model`
* `name`
* `version`
Example of JSON in Docshub and local:
<img width="295" alt="image" src="https://user-images.githubusercontent.com/15183589/224714085-f09f51a4-7a9a-4efb-bd39-798029bb4273.png">
### 4. /api/upload
**Description:**
This endpoint is used to upload a file that needs to be trained, response is JSON with task ID, which can be used to check on task's progress.
**Request:**
**Method**: `POST`
**Request Body**: A multipart/form-data form with file upload and additional fields, including `user` and `name`.
HTML example:
```html
<form action="/api/upload" method="post" enctype="multipart/form-data" class="mt-2">
<input type="file" name="file" class="py-4" id="file-upload">
<input type="text" name="user" value="local" hidden>
<input type="text" name="name" placeholder="Name:">
<button type="submit" class="py-2 px-4 text-white bg-purple-30 rounded-md hover:bg-purple-30 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-purple-30">
Upload
</button>
</form>
```
**Response:**
JSON response with a status and a task ID that can be used to check the task's progress.
### 5. /api/task_status
**Description:**
This endpoint is used to get the status of a task (`task_id`) from `/api/upload`
**Request:**
**Method**: `GET`
**Query Parameter**: `task_id` (task ID to check)
**Sample JavaScript Fetch Request:**
```js
// Task status (Get http://127.0.0.1:5000/api/task_status)
fetch("http://localhost:5001/api/task_status?task_id=YOUR_TASK_ID", {
"method": "GET",
"headers": {
"Content-Type": "application/json; charset=utf-8"
},
})
.then((res) => res.text())
.then(console.log.bind(console))
```
**Response:**
There are two types of responses:
1. While the task is still running, the 'current' value will show progress from 0 to 100.
```json
{
"result": {
"current": 1
},
"status": "PROGRESS"
}
```
2. When task is completed:
```json
{
"result": {
"directory": "temp",
"filename": "install.rst",
"formats": [
".rst",
".md",
".pdf"
],
"name_job": "somename",
"user": "local"
},
"status": "SUCCESS"
}
```
### 6. /api/delete_old
**Description:**
This endpoint is used to delete old Vector Stores.
**Request:**
**Method**: `GET`
**Query Parameter**: `task_id`
**Sample JavaScript Fetch Request:**
```js
// delete_old (GET http://127.0.0.1:5000/api/delete_old)
fetch("http://localhost:5001/api/delete_old?task_id=YOUR_TASK_ID", {
"method": "GET",
"headers": {
"Content-Type": "application/json; charset=utf-8"
},
})
.then((res) => res.text())
.then(console.log.bind(console))
```
**Response:**
JSON response indicating the status of the operation:
```json
{ "status": "ok" }
```
### 7. /api/get_api_keys
**Description:**
The endpoint retrieves a list of API keys for the user.
**Request:**
**Method**: `GET`
**Sample JavaScript Fetch Request:**
```js
// get_api_keys (GET http://127.0.0.1:5000/api/get_api_keys)
fetch("http://localhost:5001/api/get_api_keys", {
"method": "GET",
"headers": {
"Content-Type": "application/json; charset=utf-8"
},
})
.then((res) => res.text())
.then(console.log.bind(console))
```
**Response:**
JSON response with a list of created API keys:
```json
[
{
"id": "string",
"name": "string",
"key": "string",
"source": "string"
},
...
]
```
### 8. /api/create_api_key
**Description:**
Create a new API key for the user.
**Request:**
**Method**: `POST`
**Headers**: Content-Type should be set to `application/json; charset=utf-8`
**Request Body**: JSON object with the following fields:
* `name` — A name for the API key.
* `source` — The source documents that will be used.
* `prompt_id` — The prompt ID.
* `chunks` — The number of chunks used to process an answer.
Here is a JavaScript Fetch Request example:
```js
// create_api_key (POST http://127.0.0.1:5000/api/create_api_key)
fetch("http://127.0.0.1:5000/api/create_api_key", {
"method": "POST",
"headers": {
"Content-Type": "application/json; charset=utf-8"
},
"body": JSON.stringify({"name":"Example Key Name",
"source":"Example Source",
"prompt_id":"creative",
"chunks":"2"})
})
.then((res) => res.json())
.then(console.log.bind(console))
```
**Response**
In response, you will get a JSON document containing the `id` and `key`:
```json
{
"id": "string",
"key": "string"
}
```
### 9. /api/delete_api_key
**Description:**
Delete an API key for the user.
**Request:**
**Method**: `POST`
**Headers**: Content-Type should be set to `application/json; charset=utf-8`
**Request Body**: JSON object with the field:
* `id` — The unique identifier of the API key to be deleted.
Here is a JavaScript Fetch Request example:
```js
// delete_api_key (POST http://127.0.0.1:5000/api/delete_api_key)
fetch("http://127.0.0.1:5000/api/delete_api_key", {
"method": "POST",
"headers": {
"Content-Type": "application/json; charset=utf-8"
},
"body": JSON.stringify({"id":"API_KEY_ID"})
})
.then((res) => res.json())
.then(console.log.bind(console))
```
**Response:**
In response, you will get a JSON document indicating the status of the operation:
```json
{
"status": "ok"
}
```

View File

@@ -1,10 +0,0 @@
{
"API-docs": {
"title": "🗂️️ API-docs",
"href": "/API/API-docs"
},
"api-key-guide": {
"title": "🔐 API Keys guide",
"href": "/API/api-key-guide"
}
}

View File

@@ -1,3 +1,9 @@
---
title: Hosting DocsGPT on Amazon Lightsail
description:
display: hidden
---
# Self-hosting DocsGPT on Amazon Lightsail
Here's a step-by-step guide on how to set up an Amazon Lightsail instance to host DocsGPT.
@@ -73,7 +79,7 @@ To save the file, press CTRL+X, then Y, and then ENTER.
Next, set the correct IP for the Backend by opening the docker-compose.yml file:
`nano docker-compose.yml`
`nano deployment/docker-compose.yaml`
And Change line 7 to: `VITE_API_HOST=http://localhost:7091`
to this `VITE_API_HOST=http://<your instance public IP>:7091`
@@ -84,7 +90,7 @@ This will allow the frontend to connect to the backend.
You're almost there! Now that all the necessary bits and pieces have been installed, it is time to run the application. To do so, use the following command:
`sudo docker-compose up -d`
`sudo docker compose -f deployment/docker-compose.yaml up -d`
Launching it for the first time will take a few minutes to download all the necessary dependencies and build.
@@ -101,10 +107,4 @@ Repeat the process for port `7091`.
#### Access your instance
Your instance is now available at your Public IP Address on port 5173. Enjoy using DocsGPT!
## Other Deployment Options
- [Deploy DocsGPT on Civo Compute Cloud](https://dev.to/rutamhere/deploying-docsgpt-on-civo-compute-c)
- [Deploy DocsGPT on DigitalOcean Droplet](https://dev.to/rutamhere/deploying-docsgpt-on-digitalocean-droplet-50ea)
- [Deploy DocsGPT on Kamatera Performance Cloud](https://dev.to/rutamhere/deploying-docsgpt-on-kamatera-performance-cloud-1bj)
Your instance is now available at your Public IP Address on port 5173. Enjoy using DocsGPT!

View File

@@ -0,0 +1,163 @@
---
title: Setting Up a Development Environment
description: Guide to setting up a development environment for DocsGPT, including backend and frontend setup.
---
# Setting Up a Development Environment
This guide will walk you through setting up a development environment for DocsGPT. This setup allows you to modify and test the application's backend and frontend components.
## 1. Spin Up MongoDB and Redis
For development purposes, you can quickly start MongoDB and Redis containers, which are the primary database and caching systems used by DocsGPT. We provide a dedicated Docker Compose file, `docker-compose-dev.yaml`, located in the `deployment` directory, that includes only these essential services.
You can find the `docker-compose-dev.yaml` file [here](https://github.com/arc53/DocsGPT/blob/main/deployment/docker-compose-dev.yaml).
**Steps to start MongoDB and Redis:**
1. Navigate to the root directory of your DocsGPT repository in your terminal.
2. Run the following commands to build and start the containers defined in `docker-compose-dev.yaml`:
```bash
docker compose -f deployment/docker-compose-dev.yaml build
docker compose -f deployment/docker-compose-dev.yaml up -d
```
These commands will start MongoDB and Redis in detached mode, running in the background.
## 2. Run the Backend
To run the DocsGPT backend locally, you'll need to set up a Python environment and install the necessary dependencies.
**Prerequisites:**
* **Python 3.12:** Ensure you have Python 3.12 installed on your system. You can check your Python version by running `python --version` or `python3 --version` in your terminal.
**Steps to run the backend:**
1. **Configure Environment Variables:**
DocsGPT backend settings are configured using environment variables. You can set these either in a `.env` file or directly in the `settings.py` file. For a comprehensive overview of all settings, please refer to the [DocsGPT Settings Guide](/Deploying/DocsGPT-Settings).
* **Option 1: Using a `.env` file (Recommended):**
* If you haven't already, create a file named `.env` in the **root directory** of your DocsGPT project.
* Modify the `.env` file to adjust settings as needed. You can find a comprehensive list of configurable options in [`application/core/settings.py`](application/core/settings.py).
* **Option 2: Exporting Environment Variables:**
* Alternatively, you can export environment variables directly in your terminal. However, using a `.env` file is generally more organized for development.
2. **Create a Python Virtual Environment (Optional but Recommended):**
Using a virtual environment isolates project dependencies and avoids conflicts with system-wide Python packages.
* **macOS and Linux:**
```bash
python -m venv venv
. venv/bin/activate
```
* **Windows:**
```bash
python -m venv venv
venv/Scripts/activate
```
3. **Download Embedding Model:**
The backend requires an embedding model. Download the `mpnet-base-v2` model and place it in the `model/` directory within the project root. You can use the following script:
```bash
wget https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip
unzip mpnet-base-v2.zip -d model
rm mpnet-base-v2.zip
```
Alternatively, you can manually download the zip file from [here](https://d3dg1063dc54p9.cloudfront.net/models/embeddings/mpnet-base-v2.zip), unzip it, and place the extracted folder in `model/`.
4. **Install Backend Dependencies:**
Navigate to the root of your DocsGPT repository and install the required Python packages:
```bash
pip install -r application/requirements.txt
```
5. **Run the Flask App:**
Start the Flask backend application:
```bash
flask --app application/app.py run --host=0.0.0.0 --port=7091
```
This command will launch the backend server, making it accessible on `http://localhost:7091`.
6. **Start the Celery Worker:**
Open a new terminal window (and activate your virtual environment if you used one). Start the Celery worker to handle background tasks:
```bash
celery -A application.app.celery worker -l INFO
```
This command will start the Celery worker, which processes tasks such as document parsing and vector embedding.
**Running in Debugger (VSCode):**
For easier debugging, you can launch the Flask app and Celery worker directly from VSCode's debugger.
* Press <kbd>Shift</kbd> + <kbd>Cmd</kbd> + <kbd>D</kbd> (macOS) or <kbd>Shift</kbd> + <kbd>Windows</kbd> + <kbd>D</kbd> (Windows) to open the Run and Debug view.
* You should see configurations named "Flask" and "Celery". Select the desired configuration and click the "Start Debugging" button (green play icon).
## 3. Start the Frontend
To run the DocsGPT frontend locally, you'll need Node.js and npm (Node Package Manager).
**Prerequisites:**
* **Node.js version 16 or higher:** Ensure you have Node.js version 16 or greater installed. You can check your Node.js version by running `node -v` in your terminal. npm is usually bundled with Node.js.
**Steps to start the frontend:**
1. **Navigate to the Frontend Directory:**
In your terminal, change the current directory to the `frontend` folder within your DocsGPT repository:
```bash
cd frontend
```
2. **Install Global Packages (If Needed):**
If you don't have `husky` and `vite` installed globally, you can install them:
```bash
npm install husky -g
npm install vite -g
```
You can skip this step if you already have these packages installed or prefer to use local installations (though global installation simplifies running the commands in this guide).
3. **Install Frontend Dependencies:**
Install the project's frontend dependencies using npm:
```bash
npm install --include=dev
```
This command reads the `package.json` file in the `frontend` directory and installs all listed dependencies, including development dependencies.
4. **Run the Frontend App:**
Start the frontend development server:
```bash
npm run dev
```
This command will start the Vite development server. The frontend application will typically be accessible at [http://localhost:5173/](http://localhost:5173/). The terminal will display the exact URL where the frontend is running.
With both the backend and frontend running, you should now have a fully functional DocsGPT development environment. You can access the application in your browser at [http://localhost:5173/](http://localhost:5173/) and start developing!

View File

@@ -0,0 +1,135 @@
---
title: Docker Deployment of DocsGPT
description: Deploy DocsGPT using Docker and Docker Compose for easy setup and management.
---
# Docker Deployment of DocsGPT
Docker is the recommended method for deploying DocsGPT, providing a consistent and isolated environment for the application to run. This guide will walk you through deploying DocsGPT using Docker and Docker Compose.
## Prerequisites
* **Docker Engine:** You need to have Docker Engine installed on your system.
* **macOS:** [Docker Desktop for Mac](https://docs.docker.com/desktop/install/mac-install/)
* **Linux:** [Docker Engine Installation Guide](https://docs.docker.com/engine/install/) (follow instructions for your specific distribution)
* **Windows:** [Docker Desktop for Windows](https://docs.docker.com/desktop/install/windows-install/) (requires WSL 2 backend, see notes below)
* **Docker Compose:** Docker Compose is usually included with Docker Desktop. If you are using Docker Engine separately, ensure you have Docker Compose V2 installed.
**Important Note for Windows Users:** Docker Desktop on Windows generally requires the WSL 2 backend to function correctly, especially when using features like host networking which are utilized in DocsGPT's Docker Compose setup. Ensure WSL 2 is enabled and configured in Docker Desktop settings.
## Quickest Setup: Using DocsGPT Public API
The fastest way to try out DocsGPT is by using the public API endpoint. This requires minimal configuration and no local LLM setup.
1. **Clone the DocsGPT Repository (if you haven't already):**
```bash
git clone https://github.com/arc53/DocsGPT.git
cd DocsGPT
```
2. **Create a `.env` file:**
In the root directory of your DocsGPT repository, create a file named `.env`.
3. **Add Public API Configuration to `.env`:**
Open the `.env` file and add the following lines:
```
LLM_NAME=docsgpt
VITE_API_STREAMING=true
```
This minimal configuration tells DocsGPT to use the public API. For more advanced settings and other LLM options, refer to the [DocsGPT Settings Guide](/Deploying/DocsGPT-Settings).
4. **Launch DocsGPT with Docker Compose:**
Navigate to the root directory of the DocsGPT repository in your terminal and run:
```bash
docker compose -f deployment/docker-compose.yaml up -d
```
The `-d` flag runs Docker Compose in detached mode (in the background).
5. **Access DocsGPT in your browser:**
Once the containers are running, open your web browser and go to [http://localhost:5173/](http://localhost:5173/).
6. **Stopping DocsGPT:**
To stop the application, navigate to the same directory in your terminal and run:
```bash
docker compose -f deployment/docker-compose.yaml down
```
## Optional Ollama Setup (Local Models)
DocsGPT provides optional Docker Compose files to easily integrate with [Ollama](https://ollama.com/) for running local models. These files add an official Ollama container to your Docker Compose setup. These files are located in the `deployment/optional/` directory.
There are two Ollama optional files:
* **`docker-compose.optional.ollama-cpu.yaml`**: For running Ollama on CPU.
* **`docker-compose.optional.ollama-gpu.yaml`**: For running Ollama on GPU (requires Docker to be configured for GPU usage).
### Launching with Ollama and Pulling a Model
1. **Clone the DocsGPT Repository and Create `.env` (as described above).**
2. **Launch DocsGPT with Ollama Docker Compose:**
Choose the appropriate Ollama Compose file (CPU or GPU) and launch DocsGPT:
**CPU:**
```bash
docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-cpu.yaml up -d
```
**GPU:**
```bash
docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-gpu.yaml up -d
```
3. **Pull the Ollama Model:**
**Crucially, after launching with Ollama, you need to pull the desired model into the Ollama container.** Find the `MODEL_NAME` you configured in your `.env` file (e.g., `llama3.2:1b`). Then execute the following command to pull the model *inside* the running Ollama container:
```bash
docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-cpu.yaml exec -it ollama ollama pull <MODEL_NAME>
```
or (for GPU):
```bash
docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-gpu.yaml exec -it ollama ollama pull <MODEL_NAME>
```
Replace `<MODEL_NAME>` with the actual model name from your `.env` file.
4. **Access DocsGPT in your browser:**
Once the model is pulled and containers are running, open your web browser and go to [http://localhost:5173/](http://localhost:5173/).
5. **Stopping Ollama Setup:**
To stop a DocsGPT setup launched with Ollama optional files, use `docker compose down` and include all the compose files used during the `up` command:
```bash
docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-cpu.yaml down
```
or
```bash
docker compose -f deployment/docker-compose.yaml -f deployment/optional/docker-compose.optional.ollama-gpu.yaml down
```
**Important for GPU Usage:**
* **NVIDIA Container Toolkit (for NVIDIA GPUs):** If you are using NVIDIA GPUs, you need to have the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) installed and configured on your system for Docker to access your GPU.
* **Docker GPU Configuration:** Ensure Docker is configured to utilize your GPU. Refer to the [Ollama Docker Hub page](https://hub.docker.com/r/ollama/ollama) and Docker documentation for GPU setup instructions specific to your GPU type (NVIDIA, AMD, Intel).
## Restarting After Configuration Changes
Whenever you modify the `.env` file or any Docker Compose files, you need to restart the Docker containers for the changes to be applied. Use the same `docker compose down` and `docker compose up -d` commands you used to launch DocsGPT, ensuring you include all relevant `-f` flags for optional files if you are using them.
## Further Configuration
This guide covers the basic Docker deployment of DocsGPT. For detailed information on configuring various aspects of DocsGPT, such as LLM providers, models, vector stores, and more, please refer to the comprehensive [DocsGPT Settings Guide](/Deploying/DocsGPT-Settings).

View File

@@ -0,0 +1,107 @@
---
title: DocsGPT Settings
description: Configure your DocsGPT application by understanding the basic settings.
---
# DocsGPT Settings
DocsGPT is highly configurable, allowing you to tailor it to your specific needs and preferences. You can control various aspects of the application, from choosing the Large Language Model (LLM) provider to selecting embedding models and vector stores.
This document will guide you through the basic settings you can configure in DocsGPT. These settings determine how DocsGPT interacts with LLMs and processes your data.
## Configuration Methods
There are two primary ways to configure DocsGPT settings:
### 1. Configuration via `.env` file (Recommended)
The easiest and recommended way to configure basic settings is by using a `.env` file. This file should be located in the **root directory** of your DocsGPT project (the same directory where `setup.sh` is located).
**Example `.env` file structure:**
```
LLM_NAME=openai
API_KEY=YOUR_OPENAI_API_KEY
MODEL_NAME=gpt-4o
```
### 2. Configuration via `settings.py` file (Advanced)
For more advanced configurations or if you prefer to manage settings directly in code, you can modify the `settings.py` file. This file is located in the `application/core` directory of your DocsGPT project.
While modifying `settings.py` offers more flexibility, it's generally recommended to use the `.env` file for basic settings and reserve `settings.py` for more complex adjustments or when you need to configure settings programmatically.
**Location of `settings.py`:** `application/core/settings.py`
## Basic Settings Explained
Here are some of the most fundamental settings you'll likely want to configure:
- **`LLM_NAME`**: This setting determines which Large Language Model (LLM) provider DocsGPT will use. It tells DocsGPT which API to interact with.
- **Common values:**
- `docsgpt`: Use the DocsGPT Public API Endpoint (simple and free, as offered in `setup.sh` option 1).
- `openai`: Use OpenAI's API (requires an API key).
- `google`: Use Google's Vertex AI or Gemini models.
- `anthropic`: Use Anthropic's Claude models.
- `groq`: Use Groq's models.
- `huggingface`: Use HuggingFace Inference API.
- `azure_openai`: Use Azure OpenAI Service.
- `openai` (when using local inference engines like Ollama, Llama.cpp, TGI, etc.): This signals DocsGPT to use an OpenAI-compatible API format, even if the actual LLM is running locally.
- **`MODEL_NAME`**: Specifies the specific model to use from the chosen LLM provider. The available models depend on the `LLM_NAME` you've selected.
- **Examples:**
- For `LLM_NAME=openai`: `gpt-4o`
- For `LLM_NAME=google`: `gemini-2.0-flash`
- For local models (e.g., Ollama): `llama3.2:1b` (or any model name available in your setup).
- **`EMBEDDINGS_NAME`**: This setting defines which embedding model DocsGPT will use to generate vector embeddings for your documents. Embeddings are numerical representations of text that allow DocsGPT to understand the semantic meaning of your documents for efficient search and retrieval.
- **Default value:** `huggingface_sentence-transformers/all-mpnet-base-v2` (a good general-purpose embedding model).
- **Other options:** You can explore other embedding models from Hugging Face Sentence Transformers or other providers if needed.
- **`API_KEY`**: Required for most cloud-based LLM providers. This is your authentication key to access the LLM provider's API. You'll need to obtain this key from your chosen provider's platform.
- **`OPENAI_BASE_URL`**: Specifically used when `LLM_NAME` is set to `openai` but you are connecting to a local inference engine (like Ollama, Llama.cpp, etc.) that exposes an OpenAI-compatible API. This setting tells DocsGPT where to find your local LLM server.
## Configuration Examples
Let's look at some concrete examples of how to configure these settings in your `.env` file.
### Example for Cloud API Provider (OpenAI)
To use OpenAI's `gpt-4o` model, you would configure your `.env` file like this:
```
LLM_NAME=openai
API_KEY=YOUR_OPENAI_API_KEY # Replace with your actual OpenAI API key
MODEL_NAME=gpt-4o
```
Make sure to replace `YOUR_OPENAI_API_KEY` with your actual OpenAI API key.
### Example for Local Deployment
To use a local Ollama server with the `llama3.2:1b` model, you would configure your `.env` file like this:
```
LLM_NAME=openai # Using OpenAI compatible API format for local models
API_KEY=None # API Key is not needed for local Ollama
MODEL_NAME=llama3.2:1b
OPENAI_BASE_URL=http://host.docker.internal:11434/v1 # Default Ollama API URL within Docker
EMBEDDINGS_NAME=huggingface_sentence-transformers/all-mpnet-base-v2 # You can also run embeddings locally if needed
```
In this case, even though you are using Ollama locally, `LLM_NAME` is set to `openai` because Ollama (and many other local inference engines) are designed to be API-compatible with OpenAI. `OPENAI_BASE_URL` points DocsGPT to the local Ollama server.
## Exploring More Settings
These are just the basic settings to get you started. The `settings.py` file contains many more advanced options that you can explore to further customize DocsGPT, such as:
- Vector store configuration (`VECTOR_STORE`, Qdrant, Milvus, LanceDB settings)
- Retriever settings (`RETRIEVERS_ENABLED`)
- Cache settings (`CACHE_REDIS_URL`)
- And many more!
For a complete list of available settings and their descriptions, refer to the `settings.py` file in `application/core`. Remember to restart your Docker containers after making changes to your `.env` file or `settings.py` for the changes to take effect.

View File

@@ -0,0 +1,33 @@
import { DeploymentCards } from '../../components/DeploymentCards';
# Deployment Guides
<DeploymentCards
items={[
{
title: 'Amazon Lightsail',
link: 'https://docs.docsgpt.cloud/Deploying/Amazon-Lightsail',
description: 'Self-hosting DocsGPT on Amazon Lightsail'
},
{
title: 'Railway',
link: 'https://docs.docsgpt.cloud/Deploying/Railway',
description: 'Hosting DocsGPT on Railway'
},
{
title: 'Civo Compute Cloud',
link: 'https://dev.to/rutamhere/deploying-docsgpt-on-civo-compute-c',
description: 'Step-by-step guide for Civo deployment'
},
{
title: 'DigitalOcean Droplet',
link: 'https://dev.to/rutamhere/deploying-docsgpt-on-digitalocean-droplet-50ea',
description: 'Guide for DigitalOcean deployment'
},
{
title: 'Kamatera Cloud',
link: 'https://dev.to/rutamhere/deploying-docsgpt-on-kamatera-performance-cloud-1bj',
description: 'Kamatera deployment tutorial'
}
]}
/>

View File

@@ -1,4 +1,10 @@
# Self-hosting DocsGPT on Kubernetes
---
title: Deploying DocsGPT on Kubernetes
description: Learn how to self-host DocsGPT on a Kubernetes cluster for scalable and robust deployments.
---
# Self-hosting DocsGPT
on Kubernetes
This guide will walk you through deploying DocsGPT on Kubernetes.
@@ -11,7 +17,7 @@ Ensure you have the following installed before proceeding:
## Folder Structure
The `k8s` folder contains the necessary deployment and service configuration files:
The `deployment/k8s` folder contains the necessary deployment and service configuration files:
- `deployments/`
- `services/`
@@ -23,7 +29,7 @@ The `k8s` folder contains the necessary deployment and service configuration fil
```sh
git clone https://github.com/arc53/DocsGPT.git
cd docsgpt/k8s
cd docsgpt/deployment/k8s
```
2. **Configure Secrets (optional)**

View File

@@ -1,69 +0,0 @@
## Launching Web App
**Note**: Make sure you have Docker installed
**On macOS or Linux:**
Just run the following command:
```bash
./setup.sh
```
This command will install all the necessary dependencies and provide you with an option to use our LLM API, download the local model or use OpenAI.
If you prefer to follow manual steps, refer to this guide:
1. Open and download this repository with
```bash
git clone https://github.com/arc53/DocsGPT.git
```
2. Create a `.env` file in your root directory and set your `API_KEY` with your [OpenAI API key](https://platform.openai.com/account/api-keys). (optional in case you want to use OpenAI)
3. Run the following commands:
```bash
docker-compose build && docker-compose up
```
4. Navigate to http://localhost:5173/.
To stop, simply press **Ctrl + C**.
**For WINDOWS:**
To run the setup on Windows, you have two options: using the Windows Subsystem for Linux (WSL) or using Git Bash or Command Prompt.
**Option 1: Using Windows Subsystem for Linux (WSL):**
1. Install WSL if you haven't already. You can follow the official Microsoft documentation for installation: (https://learn.microsoft.com/en-us/windows/wsl/install).
2. After setting up WSL, open the WSL terminal.
3. Clone the repository and create the `.env` file:
```bash
git clone https://github.com/arc53/DocsGPT.git
cd DocsGPT
echo "API_KEY=Yourkey" > .env
echo "VITE_API_STREAMING=true" >> .env
```
4. Run the following command to start the setup with Docker Compose:
```bash
./run-with-docker-compose.sh
```
6. Open your web browser and navigate to http://localhost:5173/.
7. To stop the setup, just press **Ctrl + C** in the WSL terminal
**Option 2: Using Git Bash or Command Prompt (CMD):**
1. Install Git for Windows if you haven't already. Download it from the official website: (https://gitforwindows.org/).
2. Open Git Bash or Command Prompt.
3. Clone the repository and create the `.env` file:
```bash
git clone https://github.com/arc53/DocsGPT.git
cd DocsGPT
echo "API_KEY=Yourkey" > .env
echo "VITE_API_STREAMING=true" >> .env
```
4. Run the following command to start the setup with Docker Compose:
```bash
./run-with-docker-compose.sh
```
5. Open your web browser and navigate to http://localhost:5173/.
6. To stop the setup, just press **Ctrl + C** in the Git Bash or Command Prompt terminal.
These steps should help you set up and run the project on Windows using either WSL or Git Bash/Command Prompt.
**Important:** Ensure that Docker is installed and properly configured on your Windows system for these steps to work.

View File

@@ -1,3 +1,7 @@
---
title: Hosting DocsGPT on Railway
description: Learn how to deploy your own DocsGPT instance on Railway with this step-by-step tutorial
---
# Self-hosting DocsGPT on Railway
@@ -97,11 +101,11 @@ To save the file, press CTRL+X, then Y, and then ENTER.
Next, set the correct IP for the Backend by opening the docker-compose.yml file:
Next, set the correct IP for the Backend by opening the docker-compose.yaml file:
`nano docker-compose.yml`
`nano deployment/docker-compose.yaml`
@@ -123,7 +127,7 @@ You're almost there! Now that all the necessary bits and pieces have been instal
`sudo docker-compose up -d`
`sudo docker compose -f deployment/docker-compose.yaml up -d`

View File

@@ -1,18 +1,32 @@
{
"DocsGPT-Settings": {
"title": "⚙️ App Configuration",
"href": "/Deploying/DocsGPT-Settings"
},
"Docker-Deploying": {
"title": "🛳️ Docker Setup",
"href": "/Deploying/Docker-Deploying"
},
"Development-Environment": {
"title": "🛠Development Environment",
"href": "/Deploying/Development-Environment"
},
"Kubernetes-Deploying": {
"title": "☸️ Deploying on Kubernetes",
"href": "/Deploying/Kubernetes-Deploying"
},
"Hosting-the-app": {
"title": "☁️ Hosting DocsGPT",
"href": "/Deploying/Hosting-the-app"
},
"Quickstart": {
"title": "Quickstart",
"href": "/Deploying/Quickstart"
"Amazon-Lightsail": {
"title": "Hosting DocsGPT on Amazon Lightsail",
"href": "/Deploying/Amazon-Lightsail",
"display": "hidden"
},
"Railway-Deploying": {
"title": "🚂Deploying on Railway",
"href": "/Deploying/Railway-Deploying"
},
"Kubernetes-Deploying": {
"title": "☸Deploying on Kubernetes",
"href": "/Deploying/Kubernetes-Deploying"
"Railway": {
"title": "Hosting DocsGPT on Railway",
"href": "/Deploying/Railway",
"display": "hidden"
}
}

View File

@@ -1,8 +1,12 @@
---
title: Comprehensive Guide to Setting Up the Chatwoot Extension with DocsGPT
description: This step-by-step guide walks you through the process of setting up the Chatwoot extension with DocsGPT, enabling seamless integration for automated responses and enhanced customer support. Learn how to launch DocsGPT, retrieve your Chatwoot access token, configure the .env file, and start the extension.
---
## Chatwoot Extension Setup Guide
### Step 1: Prepare and Start DocsGPT
- **Launch DocsGPT**: Follow the instructions in our [DocsGPT Wiki](https://github.com/arc53/DocsGPT/wiki) to start DocsGPT. Make sure to load your documentation.
- **Launch DocsGPT**: Follow the instructions in our [Quickstart](/quickstart) to start DocsGPT. Make sure to load your documentation.
### Step 2: Get Access Token from Chatwoot

Some files were not shown because too many files have changed in this diff Show More