mirror of
https://github.com/dalekurt/local-llm-stack.git
synced 2026-01-28 15:50:27 +00:00
180 lines
4.8 KiB
YAML
180 lines
4.8 KiB
YAML
volumes:
|
|
# All volumes are mapped directly to host paths in each service
|
|
n8n_storage: {}
|
|
postgres_storage: {}
|
|
qdrant_storage: {}
|
|
openwebui_storage: {}
|
|
flowise_storage: {}
|
|
anythingllm_storage: {}
|
|
pipelines_storage: {}
|
|
|
|
|
|
networks:
|
|
ai-network:
|
|
|
|
|
|
x-n8n: &service-n8n
|
|
image: n8nio/n8n:latest
|
|
networks: [ 'ai-network' ]
|
|
environment:
|
|
- DB_TYPE=postgresdb
|
|
- DB_POSTGRESDB_HOST=postgres
|
|
- DB_POSTGRESDB_USER=${POSTGRES_USER}
|
|
- DB_POSTGRESDB_PASSWORD=${POSTGRES_PASSWORD}
|
|
- N8N_DIAGNOSTICS_ENABLED=${N8N_DIAGNOSTICS_ENABLED}
|
|
- N8N_PERSONALIZATION_ENABLED=${N8N_PERSONALIZATION_ENABLED}
|
|
- N8N_ENCRYPTION_KEY
|
|
- N8N_USER_MANAGEMENT_JWT_SECRET
|
|
links:
|
|
- postgres
|
|
|
|
services:
|
|
# ollama:
|
|
# image: ollama/ollama:latest
|
|
# container_name: ollama
|
|
# networks: [ 'ai-network' ]
|
|
# restart: unless-stopped
|
|
# ports:
|
|
# - "11434:11434"
|
|
# volumes:
|
|
# - ollama_storage:/root/.ollama
|
|
# Note: When using containerized Ollama, make sure to update in .env:
|
|
# OLLAMA_BASE_PATH=http://ollama:11434
|
|
# EMBEDDING_BASE_PATH=http://ollama:11434
|
|
|
|
flowise:
|
|
image: flowiseai/flowise
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
container_name: flowise
|
|
environment:
|
|
- PORT=${FLOWISE_PORT:-3010}
|
|
ports:
|
|
- ${FLOWISE_PORT:-3001}:3010
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
volumes:
|
|
- ./data/flowise:/root/.flowise
|
|
|
|
anythingllm:
|
|
image: mintplexlabs/anythingllm
|
|
container_name: anythingllm
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
ports:
|
|
- "3002:3001"
|
|
cap_add:
|
|
- SYS_ADMIN
|
|
environment:
|
|
- STORAGE_DIR=${STORAGE_DIR}
|
|
- JWT_SECRET=${JWT_SECRET}
|
|
- LLM_PROVIDER=${LLM_PROVIDER}
|
|
- OLLAMA_BASE_PATH=http://host.docker.internal:11434
|
|
- OLLAMA_MODEL_PREF=${OLLAMA_MODEL_PREF}
|
|
- OLLAMA_MODEL_TOKEN_LIMIT=${OLLAMA_MODEL_TOKEN_LIMIT}
|
|
- EMBEDDING_ENGINE=${EMBEDDING_ENGINE}
|
|
- EMBEDDING_BASE_PATH=http://host.docker.internal:11434
|
|
- EMBEDDING_MODEL_PREF=${EMBEDDING_MODEL_PREF}
|
|
- EMBEDDING_MODEL_MAX_CHUNK_LENGTH=${EMBEDDING_MODEL_MAX_CHUNK_LENGTH}
|
|
- VECTOR_DB=${VECTOR_DB}
|
|
- QDRANT_ENDPOINT=${QDRANT_ENDPOINT}
|
|
- QDRANT_COLLECTION=${QDRANT_COLLECTION}
|
|
- WHISPER_PROVIDER=${WHISPER_PROVIDER}
|
|
- TTS_PROVIDER=${TTS_PROVIDER}
|
|
- PASSWORDMINCHAR=${PASSWORDMINCHAR}
|
|
volumes:
|
|
- ./data/anythingllm:/app/server/storage
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
depends_on:
|
|
- qdrant
|
|
|
|
open-webui:
|
|
image: ghcr.io/open-webui/open-webui:latest
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
container_name: open-webui
|
|
ports:
|
|
- "11500:8080"
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
volumes:
|
|
- ./data/openwebui:/app/backend/data
|
|
|
|
pipelines:
|
|
image: ghcr.io/open-webui/pipelines:main
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
container_name: pipelines
|
|
volumes:
|
|
- ./data/pipelines:/app/pipelines
|
|
environment:
|
|
- PIPELINES_API_KEY=0p3n-w3bu!
|
|
|
|
postgres:
|
|
image: postgres:16-alpine
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
container_name: postgres
|
|
ports:
|
|
- 5432:5432
|
|
environment:
|
|
- POSTGRES_USER
|
|
- POSTGRES_PASSWORD
|
|
- POSTGRES_DB
|
|
volumes:
|
|
- postgres_storage:/var/lib/postgresql/data
|
|
healthcheck:
|
|
test: [ 'CMD-SHELL', 'pg_isready -h localhost -U ${POSTGRES_USER} -d ${POSTGRES_DB}' ]
|
|
interval: 5s
|
|
timeout: 5s
|
|
retries: 10
|
|
|
|
n8n-import:
|
|
<<: *service-n8n
|
|
container_name: n8n-import
|
|
entrypoint: /bin/sh
|
|
command:
|
|
- "-c"
|
|
- "n8n import:credentials --separate --input=/backup/credentials && n8n import:workflow --separate --input=/backup/workflows"
|
|
volumes:
|
|
- ./n8n/backup:/backup
|
|
depends_on:
|
|
postgres:
|
|
condition: service_healthy
|
|
|
|
n8n:
|
|
<<: *service-n8n
|
|
container_name: n8n
|
|
restart: unless-stopped
|
|
ports:
|
|
- 5678:5678
|
|
volumes:
|
|
- ./data/n8n:/home/node/.n8n
|
|
- ./n8n/backup:/backup
|
|
- ./shared:/data/shared
|
|
- ./shared:/home/node/host_mount/shared_drive
|
|
depends_on:
|
|
postgres:
|
|
condition: service_healthy
|
|
n8n-import:
|
|
condition: service_completed_successfully
|
|
|
|
qdrant:
|
|
image: qdrant/qdrant
|
|
container_name: qdrant
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
ports:
|
|
- 6333:6333
|
|
volumes:
|
|
- ./data/qdrant:/qdrant/storage
|
|
|
|
watchtower:
|
|
image: containrrr/watchtower
|
|
container_name: watchtower
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
volumes:
|
|
- /var/run/docker.sock:/var/run/docker.sock
|
|
command: --interval 30 --cleanup --label-enable --monitor-only --scope ai-network |