mirror of
https://github.com/dalekurt/local-llm-stack.git
synced 2026-01-28 15:50:27 +00:00
165 lines
4.5 KiB
YAML
165 lines
4.5 KiB
YAML
volumes:
|
|
n8n_storage:
|
|
postgres_storage:
|
|
qdrant_storage:
|
|
open-webui:
|
|
flowise:
|
|
anythingllm_storage:
|
|
# ollama_storage: # Uncomment if using containerized Ollama
|
|
|
|
|
|
networks:
|
|
ai-network:
|
|
|
|
|
|
x-n8n: &service-n8n
|
|
image: n8nio/n8n:latest
|
|
networks: [ 'ai-network' ]
|
|
environment:
|
|
- DB_TYPE=postgresdb
|
|
- DB_POSTGRESDB_HOST=postgres
|
|
- DB_POSTGRESDB_USER=${POSTGRES_USER}
|
|
- DB_POSTGRESDB_PASSWORD=${POSTGRES_PASSWORD}
|
|
- N8N_DIAGNOSTICS_ENABLED=${N8N_DIAGNOSTICS_ENABLED}
|
|
- N8N_PERSONALIZATION_ENABLED=${N8N_PERSONALIZATION_ENABLED}
|
|
- N8N_ENCRYPTION_KEY
|
|
- N8N_USER_MANAGEMENT_JWT_SECRET
|
|
links:
|
|
- postgres
|
|
|
|
services:
|
|
# Ollama service - commented out by default since Ollama is installed natively
|
|
# Uncomment this service if you want to run Ollama in a container
|
|
# ollama:
|
|
# image: ollama/ollama:latest
|
|
# container_name: ollama
|
|
# networks: [ 'ai-network' ]
|
|
# restart: unless-stopped
|
|
# ports:
|
|
# - "11434:11434"
|
|
# volumes:
|
|
# - ollama_storage:/root/.ollama
|
|
# # If using containerized Ollama, update OLLAMA_BASE_PATH in .env to:
|
|
# # OLLAMA_BASE_PATH=http://ollama:11434
|
|
# # EMBEDDING_BASE_PATH=http://ollama:11434
|
|
|
|
flowise:
|
|
image: flowiseai/flowise
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
container_name: flowise
|
|
environment:
|
|
- PORT=${FLOWISE_PORT:-3001}
|
|
ports:
|
|
- ${FLOWISE_PORT:-3001}:3001
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
volumes:
|
|
- ~/.flowise:/root/.flowise
|
|
entrypoint: /bin/sh -c "sleep 3; flowise start"
|
|
|
|
anythingllm:
|
|
image: mintplexlabs/anythingllm
|
|
container_name: anythingllm
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
ports:
|
|
- "3002:3001"
|
|
cap_add:
|
|
- SYS_ADMIN
|
|
environment:
|
|
- STORAGE_DIR=${STORAGE_DIR}
|
|
- JWT_SECRET=${JWT_SECRET}
|
|
- LLM_PROVIDER=${LLM_PROVIDER}
|
|
- OLLAMA_BASE_PATH=${OLLAMA_BASE_PATH}
|
|
- OLLAMA_MODEL_PREF=${OLLAMA_MODEL_PREF}
|
|
- OLLAMA_MODEL_TOKEN_LIMIT=${OLLAMA_MODEL_TOKEN_LIMIT}
|
|
- EMBEDDING_ENGINE=${EMBEDDING_ENGINE}
|
|
- EMBEDDING_BASE_PATH=${EMBEDDING_BASE_PATH}
|
|
- EMBEDDING_MODEL_PREF=${EMBEDDING_MODEL_PREF}
|
|
- EMBEDDING_MODEL_MAX_CHUNK_LENGTH=${EMBEDDING_MODEL_MAX_CHUNK_LENGTH}
|
|
- VECTOR_DB=${VECTOR_DB}
|
|
- QDRANT_ENDPOINT=${QDRANT_ENDPOINT}
|
|
- QDRANT_COLLECTION=${QDRANT_COLLECTION}
|
|
- WHISPER_PROVIDER=${WHISPER_PROVIDER}
|
|
- TTS_PROVIDER=${TTS_PROVIDER}
|
|
- PASSWORDMINCHAR=${PASSWORDMINCHAR}
|
|
volumes:
|
|
- anythingllm_storage:/app/server/storage
|
|
# When using native Ollama, add host.docker.internal to allow container to access host
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
depends_on:
|
|
- qdrant
|
|
|
|
open-webui:
|
|
image: ghcr.io/open-webui/open-webui:latest
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
container_name: open-webui
|
|
ports:
|
|
- "11500:8080"
|
|
# When using native Ollama, uncomment this to allow container to access host
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
volumes:
|
|
- open-webui:/app/backend/data
|
|
|
|
postgres:
|
|
image: postgres:16-alpine
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
ports:
|
|
- 5432:5432
|
|
environment:
|
|
- POSTGRES_USER
|
|
- POSTGRES_PASSWORD
|
|
- POSTGRES_DB
|
|
volumes:
|
|
- postgres_storage:/var/lib/postgresql/data
|
|
healthcheck:
|
|
test: [ 'CMD-SHELL', 'pg_isready -h localhost -U ${POSTGRES_USER} -d ${POSTGRES_DB}' ]
|
|
interval: 5s
|
|
timeout: 5s
|
|
retries: 10
|
|
|
|
n8n-import:
|
|
<<: *service-n8n
|
|
container_name: n8n-import
|
|
entrypoint: /bin/sh
|
|
command:
|
|
- "-c"
|
|
- "n8n import:credentials --separate --input=/backup/credentials && n8n import:workflow --separate --input=/backup/workflows"
|
|
volumes:
|
|
- ./n8n/backup:/backup
|
|
depends_on:
|
|
postgres:
|
|
condition: service_healthy
|
|
|
|
n8n:
|
|
<<: *service-n8n
|
|
container_name: n8n
|
|
restart: unless-stopped
|
|
ports:
|
|
- 5678:5678
|
|
volumes:
|
|
- n8n_storage:/home/node/.n8n
|
|
- ./n8n/backup:/backup
|
|
- ./shared:/data/shared
|
|
- ./shared:/home/node/host_mount/shared_drive
|
|
depends_on:
|
|
postgres:
|
|
condition: service_healthy
|
|
n8n-import:
|
|
condition: service_completed_successfully
|
|
|
|
qdrant:
|
|
image: qdrant/qdrant
|
|
container_name: qdrant
|
|
networks: [ 'ai-network' ]
|
|
restart: unless-stopped
|
|
ports:
|
|
- 6333:6333
|
|
volumes:
|
|
- qdrant_storage:/qdrant/storage
|