mirror of
https://github.com/kossakovsky/n8n-install.git
synced 2026-03-07 22:33:11 +00:00
Refactor docker-compose.yml for improved service management
- Removed the ollama service configurations to streamline the setup. - Enhanced n8n service environment variables for better configuration and performance. - Added new services for Grafana, Prometheus, and Node Exporter to support monitoring. - Updated existing services to ensure proper dependencies and health checks. - Improved volume management and entrypoint scripts for n8n import functionality.
This commit is contained in:
@@ -1,6 +1,5 @@
|
||||
volumes:
|
||||
n8n_storage:
|
||||
ollama_storage:
|
||||
qdrant_storage:
|
||||
open-webui:
|
||||
flowise:
|
||||
@@ -11,45 +10,31 @@ volumes:
|
||||
langfuse_clickhouse_data:
|
||||
langfuse_clickhouse_logs:
|
||||
langfuse_minio_data:
|
||||
grafana:
|
||||
prometheus_data:
|
||||
|
||||
x-n8n: &service-n8n
|
||||
image: n8nio/n8n:latest
|
||||
environment:
|
||||
- DB_TYPE=postgresdb
|
||||
- DB_POSTGRESDB_HOST=db
|
||||
- DB_POSTGRESDB_USER=postgres
|
||||
- DB_POSTGRESDB_PASSWORD=${POSTGRES_PASSWORD}
|
||||
- DB_POSTGRESDB_DATABASE=postgres
|
||||
- N8N_DIAGNOSTICS_ENABLED=false
|
||||
- N8N_PERSONALIZATION_ENABLED=false
|
||||
- N8N_ENCRYPTION_KEY
|
||||
- N8N_USER_MANAGEMENT_JWT_SECRET
|
||||
- WEBHOOK_URL=${N8N_HOSTNAME:+https://}${N8N_HOSTNAME:-http://localhost:5678}
|
||||
|
||||
x-ollama: &service-ollama
|
||||
image: ollama/ollama:latest
|
||||
container_name: ollama
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 11434:11434
|
||||
environment:
|
||||
- OLLAMA_CONTEXT_LENGTH=8192
|
||||
volumes:
|
||||
- ollama_storage:/root/.ollama
|
||||
|
||||
x-init-ollama: &init-ollama
|
||||
image: ollama/ollama:latest
|
||||
container_name: ollama-pull-llama
|
||||
volumes:
|
||||
- ollama_storage:/root/.ollama
|
||||
entrypoint: /bin/sh
|
||||
command:
|
||||
- "-c"
|
||||
- "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull qwen2.5:7b-instruct-q4_K_M; OLLAMA_HOST=ollama:11434 ollama pull nomic-embed-text"
|
||||
# For a larger context length verison of the model, run these commands:
|
||||
# echo "FROM qwen2.5:7b-instruct-q4_K_M\n\nPARAMETER num_ctx 8096" > Modelfile
|
||||
# ollama create qwen2.5:7b-8k -f ./Modelfile
|
||||
# Change the name of the LLM and num_ctx as you see fit.
|
||||
environment: &service-n8n-env
|
||||
DB_TYPE: postgresdb
|
||||
DB_POSTGRESDB_HOST: db
|
||||
DB_POSTGRESDB_USER: postgres
|
||||
DB_POSTGRESDB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_POSTGRESDB_DATABASE: postgres
|
||||
N8N_DIAGNOSTICS_ENABLED: false
|
||||
N8N_PERSONALIZATION_ENABLED: false
|
||||
N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY}
|
||||
N8N_USER_MANAGEMENT_JWT_SECRET: ${N8N_USER_MANAGEMENT_JWT_SECRET}
|
||||
WEBHOOK_URL: ${N8N_HOSTNAME:+https://}${N8N_HOSTNAME:-http://localhost:5678}
|
||||
N8N_METRICS: true
|
||||
NODE_ENV: production
|
||||
EXECUTIONS_MODE: queue
|
||||
N8N_RUNNERS_ENABLED: true
|
||||
QUEUE_HEALTH_CHECK_ACTIVE: true
|
||||
QUEUE_BULL_REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
QUEUE_BULL_REDIS_PORT: ${REDIS_PORT:-6379}
|
||||
N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS: true
|
||||
N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE: true
|
||||
|
||||
services:
|
||||
flowise:
|
||||
@@ -57,21 +42,19 @@ services:
|
||||
restart: unless-stopped
|
||||
container_name: flowise
|
||||
environment:
|
||||
- PORT=3001
|
||||
ports:
|
||||
- 3001:3001
|
||||
- PORT=3001
|
||||
- FLOWISE_USERNAME
|
||||
- FLOWISE_PASSWORD
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
- "host.docker.internal:host-gateway"
|
||||
volumes:
|
||||
- ~/.flowise:/root/.flowise
|
||||
- ~/.flowise:/root/.flowise
|
||||
entrypoint: /bin/sh -c "sleep 3; flowise start"
|
||||
|
||||
open-webui:
|
||||
image: ghcr.io/open-webui/open-webui:main
|
||||
restart: unless-stopped
|
||||
container_name: open-webui
|
||||
ports:
|
||||
- "3000:8080"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
volumes:
|
||||
@@ -80,19 +63,22 @@ services:
|
||||
n8n-import:
|
||||
<<: *service-n8n
|
||||
container_name: n8n-import
|
||||
environment:
|
||||
<<: *service-n8n-env
|
||||
RUN_N8N_IMPORT: ${RUN_N8N_IMPORT:-false}
|
||||
entrypoint: /bin/sh
|
||||
command:
|
||||
- "-c"
|
||||
- "n8n import:credentials --separate --input=/backup/credentials && n8n import:workflow --separate --input=/backup/workflows"
|
||||
command: /scripts/n8n_import_script.sh
|
||||
volumes:
|
||||
- ./n8n/backup:/backup
|
||||
- ./n8n/backup:/backup
|
||||
- ./n8n/n8n_import_script.sh:/scripts/n8n_import_script.sh:ro
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
|
||||
n8n:
|
||||
<<: *service-n8n
|
||||
container_name: n8n
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 5678:5678
|
||||
volumes:
|
||||
- n8n_storage:/home/node/.n8n
|
||||
- ./n8n/backup:/backup
|
||||
@@ -101,33 +87,54 @@ services:
|
||||
n8n-import:
|
||||
condition: service_completed_successfully
|
||||
|
||||
n8n-worker:
|
||||
<<: *service-n8n
|
||||
container_name: n8n-worker
|
||||
restart: unless-stopped
|
||||
command: worker
|
||||
volumes:
|
||||
- n8n_storage:/home/node/.n8n
|
||||
- ./shared:/data/shared
|
||||
depends_on:
|
||||
n8n:
|
||||
condition: service_started
|
||||
redis:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
|
||||
qdrant:
|
||||
image: qdrant/qdrant
|
||||
container_name: qdrant
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 6333:6333
|
||||
volumes:
|
||||
- qdrant_storage:/qdrant/storage
|
||||
|
||||
caddy:
|
||||
container_name: caddy
|
||||
image: docker.io/library/caddy:2-alpine
|
||||
network_mode: host
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
- caddy-data:/data:rw
|
||||
- caddy-config:/config:rw
|
||||
environment:
|
||||
- N8N_HOSTNAME=${N8N_HOSTNAME:-":8001"}
|
||||
- WEBUI_HOSTNAME=${WEBUI_HOSTNAME:-":8002"}
|
||||
- FLOWISE_HOSTNAME=${FLOWISE_HOSTNAME:-":8003"}
|
||||
- OLLAMA_HOSTNAME=${OLLAMA_HOSTNAME:-":8004"}
|
||||
- SUPABASE_HOSTNAME=${SUPABASE_HOSTNAME:-":8005"}
|
||||
- SEARXNG_HOSTNAME=${SEARXNG_HOSTNAME:-":8006"}
|
||||
- LANGFUSE_HOSTNAME=${LANGFUSE_HOSTNAME:-":8007"}
|
||||
- N8N_HOSTNAME=${N8N_HOSTNAME}
|
||||
- WEBUI_HOSTNAME=${WEBUI_HOSTNAME}
|
||||
- FLOWISE_HOSTNAME=${FLOWISE_HOSTNAME}
|
||||
- SUPABASE_HOSTNAME=${SUPABASE_HOSTNAME}
|
||||
- SEARXNG_HOSTNAME=${SEARXNG_HOSTNAME}
|
||||
- LANGFUSE_HOSTNAME=${LANGFUSE_HOSTNAME}
|
||||
- LETSENCRYPT_EMAIL=${LETSENCRYPT_EMAIL:-internal}
|
||||
- PROMETHEUS_HOSTNAME=${PROMETHEUS_HOSTNAME}
|
||||
- GRAFANA_HOSTNAME=${GRAFANA_HOSTNAME}
|
||||
- PROMETHEUS_USERNAME=${PROMETHEUS_USERNAME}
|
||||
- PROMETHEUS_PASSWORD_HASH=${PROMETHEUS_PASSWORD_HASH}
|
||||
- SEARXNG_USERNAME=${SEARXNG_USERNAME}
|
||||
- SEARXNG_PASSWORD_HASH=${SEARXNG_PASSWORD_HASH}
|
||||
cap_drop:
|
||||
- ALL
|
||||
cap_add:
|
||||
@@ -143,15 +150,13 @@ services:
|
||||
restart: always
|
||||
depends_on: &langfuse-depends-on
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
condition: service_healthy
|
||||
minio:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- 127.0.0.1:3030:3030
|
||||
environment: &langfuse-worker-env
|
||||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/postgres
|
||||
SALT: ${LANGFUSE_SALT}
|
||||
@@ -200,11 +205,9 @@ services:
|
||||
image: langfuse/langfuse:3
|
||||
restart: always
|
||||
depends_on: *langfuse-depends-on
|
||||
ports:
|
||||
- 3002:3000
|
||||
environment:
|
||||
<<: *langfuse-worker-env
|
||||
NEXTAUTH_URL: http://localhost:3002
|
||||
NEXTAUTH_URL: https://{$LANGFUSE_HOSTNAME}
|
||||
NEXTAUTH_SECRET: ${NEXTAUTH_SECRET}
|
||||
LANGFUSE_INIT_ORG_ID: ${LANGFUSE_INIT_ORG_ID:-}
|
||||
LANGFUSE_INIT_ORG_NAME: ${LANGFUSE_INIT_ORG_NAME:-}
|
||||
@@ -227,9 +230,6 @@ services:
|
||||
volumes:
|
||||
- langfuse_clickhouse_data:/var/lib/clickhouse
|
||||
- langfuse_clickhouse_logs:/var/log/clickhouse-server
|
||||
ports:
|
||||
- 127.0.0.1:8123:8123
|
||||
- 127.0.0.1:9000:9000
|
||||
healthcheck:
|
||||
test: wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1
|
||||
interval: 5s
|
||||
@@ -246,9 +246,6 @@ services:
|
||||
environment:
|
||||
MINIO_ROOT_USER: minio
|
||||
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
|
||||
ports:
|
||||
- 9090:9000
|
||||
- 127.0.0.1:9091:9001
|
||||
volumes:
|
||||
- langfuse_minio_data:/data
|
||||
healthcheck:
|
||||
@@ -270,16 +267,14 @@ services:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_DB: postgres
|
||||
ports:
|
||||
- 127.0.0.1:5433:5432
|
||||
volumes:
|
||||
- langfuse_postgres_data:/var/lib/postgresql/data
|
||||
- langfuse_postgres_data:/var/lib/postgresql/data
|
||||
|
||||
redis:
|
||||
container_name: redis
|
||||
image: docker.io/valkey/valkey:8-alpine
|
||||
command: valkey-server --save 30 1 --loglevel warning
|
||||
restart: unless-stopped
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- valkey-data:/data
|
||||
cap_drop:
|
||||
@@ -297,14 +292,12 @@ services:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 3s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
retries: 10
|
||||
|
||||
searxng:
|
||||
container_name: searxng
|
||||
image: docker.io/searxng/searxng:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ./searxng:/etc/searxng:rw
|
||||
environment:
|
||||
@@ -321,46 +314,70 @@ services:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
max-file: "1"
|
||||
|
||||
ollama-cpu:
|
||||
profiles: ["cpu"]
|
||||
<<: *service-ollama
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
container_name: prometheus
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- prometheus_data:/prometheus
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
ollama-gpu:
|
||||
profiles: ["gpu-nvidia"]
|
||||
<<: *service-ollama
|
||||
node-exporter:
|
||||
image: prom/node-exporter:latest
|
||||
container_name: node-exporter
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /proc:/host/proc:ro
|
||||
- /sys:/host/sys:ro
|
||||
- /:/rootfs:ro
|
||||
command:
|
||||
- "--path.procfs=/host/proc"
|
||||
- "--path.sysfs=/host/sys"
|
||||
- "--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
|
||||
expose:
|
||||
- 9100
|
||||
|
||||
cadvisor:
|
||||
image: gcr.io/cadvisor/cadvisor:latest
|
||||
container_name: cadvisor
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /:/rootfs:ro
|
||||
- /var/run:/var/run:rw
|
||||
- /sys:/sys:ro
|
||||
- /var/lib/docker/:/var/lib/docker:ro
|
||||
expose:
|
||||
- 8080
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
container_name: grafana
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin}
|
||||
- GF_PROVISIONING_PATH=/etc/grafana/provisioning
|
||||
volumes:
|
||||
- grafana:/var/lib/grafana
|
||||
- ./grafana/provisioning:/etc/grafana/provisioning
|
||||
- ./grafana/dashboards:/var/lib/grafana/dashboards # Standard path often used, let's use Grafana's managed dashboards dir
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
depends_on:
|
||||
- prometheus
|
||||
|
||||
crawl4ai:
|
||||
image: unclecode/crawl4ai:latest # Use official image
|
||||
container_name: crawl4ai
|
||||
restart: unless-stopped
|
||||
shm_size: 1g # Recommended for browser operations
|
||||
env_file:
|
||||
- .env
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
|
||||
ollama-gpu-amd:
|
||||
profiles: ["gpu-amd"]
|
||||
<<: *service-ollama
|
||||
image: ollama/ollama:rocm
|
||||
devices:
|
||||
- "/dev/kfd"
|
||||
- "/dev/dri"
|
||||
|
||||
ollama-pull-llama-cpu:
|
||||
profiles: ["cpu"]
|
||||
<<: *init-ollama
|
||||
depends_on:
|
||||
- ollama-cpu
|
||||
|
||||
ollama-pull-llama-gpu:
|
||||
profiles: ["gpu-nvidia"]
|
||||
<<: *init-ollama
|
||||
depends_on:
|
||||
- ollama-gpu
|
||||
|
||||
ollama-pull-llama-gpu-amd:
|
||||
profiles: [gpu-amd]
|
||||
<<: *init-ollama
|
||||
image: ollama/ollama:rocm
|
||||
depends_on:
|
||||
- ollama-gpu-amd
|
||||
limits:
|
||||
cpus: "1.0"
|
||||
memory: 4G # Increased based on documentation recommendation
|
||||
|
||||
Reference in New Issue
Block a user