mirror of
https://github.com/kossakovsky/n8n-install.git
synced 2026-03-07 22:33:11 +00:00
- Removed the ollama service configurations to streamline the setup. - Enhanced n8n service environment variables for better configuration and performance. - Added new services for Grafana, Prometheus, and Node Exporter to support monitoring. - Updated existing services to ensure proper dependencies and health checks. - Improved volume management and entrypoint scripts for n8n import functionality.
384 lines
12 KiB
YAML
384 lines
12 KiB
YAML
volumes:
|
|
n8n_storage:
|
|
qdrant_storage:
|
|
open-webui:
|
|
flowise:
|
|
caddy-data:
|
|
caddy-config:
|
|
valkey-data:
|
|
langfuse_postgres_data:
|
|
langfuse_clickhouse_data:
|
|
langfuse_clickhouse_logs:
|
|
langfuse_minio_data:
|
|
grafana:
|
|
prometheus_data:
|
|
|
|
x-n8n: &service-n8n
|
|
image: n8nio/n8n:latest
|
|
environment: &service-n8n-env
|
|
DB_TYPE: postgresdb
|
|
DB_POSTGRESDB_HOST: db
|
|
DB_POSTGRESDB_USER: postgres
|
|
DB_POSTGRESDB_PASSWORD: ${POSTGRES_PASSWORD}
|
|
DB_POSTGRESDB_DATABASE: postgres
|
|
N8N_DIAGNOSTICS_ENABLED: false
|
|
N8N_PERSONALIZATION_ENABLED: false
|
|
N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY}
|
|
N8N_USER_MANAGEMENT_JWT_SECRET: ${N8N_USER_MANAGEMENT_JWT_SECRET}
|
|
WEBHOOK_URL: ${N8N_HOSTNAME:+https://}${N8N_HOSTNAME:-http://localhost:5678}
|
|
N8N_METRICS: true
|
|
NODE_ENV: production
|
|
EXECUTIONS_MODE: queue
|
|
N8N_RUNNERS_ENABLED: true
|
|
QUEUE_HEALTH_CHECK_ACTIVE: true
|
|
QUEUE_BULL_REDIS_HOST: ${REDIS_HOST:-redis}
|
|
QUEUE_BULL_REDIS_PORT: ${REDIS_PORT:-6379}
|
|
N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS: true
|
|
N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE: true
|
|
|
|
services:
|
|
flowise:
|
|
image: flowiseai/flowise
|
|
restart: unless-stopped
|
|
container_name: flowise
|
|
environment:
|
|
- PORT=3001
|
|
- FLOWISE_USERNAME
|
|
- FLOWISE_PASSWORD
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
volumes:
|
|
- ~/.flowise:/root/.flowise
|
|
entrypoint: /bin/sh -c "sleep 3; flowise start"
|
|
|
|
open-webui:
|
|
image: ghcr.io/open-webui/open-webui:main
|
|
restart: unless-stopped
|
|
container_name: open-webui
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
volumes:
|
|
- open-webui:/app/backend/data
|
|
|
|
n8n-import:
|
|
<<: *service-n8n
|
|
container_name: n8n-import
|
|
environment:
|
|
<<: *service-n8n-env
|
|
RUN_N8N_IMPORT: ${RUN_N8N_IMPORT:-false}
|
|
entrypoint: /bin/sh
|
|
command: /scripts/n8n_import_script.sh
|
|
volumes:
|
|
- ./n8n/backup:/backup
|
|
- ./n8n/n8n_import_script.sh:/scripts/n8n_import_script.sh:ro
|
|
depends_on:
|
|
postgres:
|
|
condition: service_healthy
|
|
|
|
n8n:
|
|
<<: *service-n8n
|
|
container_name: n8n
|
|
restart: unless-stopped
|
|
volumes:
|
|
- n8n_storage:/home/node/.n8n
|
|
- ./n8n/backup:/backup
|
|
- ./shared:/data/shared
|
|
depends_on:
|
|
n8n-import:
|
|
condition: service_completed_successfully
|
|
|
|
n8n-worker:
|
|
<<: *service-n8n
|
|
container_name: n8n-worker
|
|
restart: unless-stopped
|
|
command: worker
|
|
volumes:
|
|
- n8n_storage:/home/node/.n8n
|
|
- ./shared:/data/shared
|
|
depends_on:
|
|
n8n:
|
|
condition: service_started
|
|
redis:
|
|
condition: service_healthy
|
|
postgres:
|
|
condition: service_healthy
|
|
|
|
qdrant:
|
|
image: qdrant/qdrant
|
|
container_name: qdrant
|
|
restart: unless-stopped
|
|
volumes:
|
|
- qdrant_storage:/qdrant/storage
|
|
|
|
caddy:
|
|
container_name: caddy
|
|
image: docker.io/library/caddy:2-alpine
|
|
ports:
|
|
- "80:80"
|
|
- "443:443"
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
|
- caddy-data:/data:rw
|
|
- caddy-config:/config:rw
|
|
environment:
|
|
- N8N_HOSTNAME=${N8N_HOSTNAME}
|
|
- WEBUI_HOSTNAME=${WEBUI_HOSTNAME}
|
|
- FLOWISE_HOSTNAME=${FLOWISE_HOSTNAME}
|
|
- SUPABASE_HOSTNAME=${SUPABASE_HOSTNAME}
|
|
- SEARXNG_HOSTNAME=${SEARXNG_HOSTNAME}
|
|
- LANGFUSE_HOSTNAME=${LANGFUSE_HOSTNAME}
|
|
- LETSENCRYPT_EMAIL=${LETSENCRYPT_EMAIL:-internal}
|
|
- PROMETHEUS_HOSTNAME=${PROMETHEUS_HOSTNAME}
|
|
- GRAFANA_HOSTNAME=${GRAFANA_HOSTNAME}
|
|
- PROMETHEUS_USERNAME=${PROMETHEUS_USERNAME}
|
|
- PROMETHEUS_PASSWORD_HASH=${PROMETHEUS_PASSWORD_HASH}
|
|
- SEARXNG_USERNAME=${SEARXNG_USERNAME}
|
|
- SEARXNG_PASSWORD_HASH=${SEARXNG_PASSWORD_HASH}
|
|
cap_drop:
|
|
- ALL
|
|
cap_add:
|
|
- NET_BIND_SERVICE
|
|
logging:
|
|
driver: "json-file"
|
|
options:
|
|
max-size: "1m"
|
|
max-file: "1"
|
|
|
|
langfuse-worker:
|
|
image: langfuse/langfuse-worker:3
|
|
restart: always
|
|
depends_on: &langfuse-depends-on
|
|
postgres:
|
|
condition: service_healthy
|
|
minio:
|
|
condition: service_healthy
|
|
redis:
|
|
condition: service_healthy
|
|
clickhouse:
|
|
condition: service_healthy
|
|
environment: &langfuse-worker-env
|
|
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/postgres
|
|
SALT: ${LANGFUSE_SALT}
|
|
ENCRYPTION_KEY: ${ENCRYPTION_KEY}
|
|
TELEMETRY_ENABLED: ${TELEMETRY_ENABLED:-true}
|
|
LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: ${LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES:-true}
|
|
CLICKHOUSE_MIGRATION_URL: ${CLICKHOUSE_MIGRATION_URL:-clickhouse://clickhouse:9000}
|
|
CLICKHOUSE_URL: ${CLICKHOUSE_URL:-http://clickhouse:8123}
|
|
CLICKHOUSE_USER: ${CLICKHOUSE_USER:-clickhouse}
|
|
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD}
|
|
CLICKHOUSE_CLUSTER_ENABLED: ${CLICKHOUSE_CLUSTER_ENABLED:-false}
|
|
LANGFUSE_S3_EVENT_UPLOAD_BUCKET: ${LANGFUSE_S3_EVENT_UPLOAD_BUCKET:-langfuse}
|
|
LANGFUSE_S3_EVENT_UPLOAD_REGION: ${LANGFUSE_S3_EVENT_UPLOAD_REGION:-auto}
|
|
LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID:-minio}
|
|
LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD}
|
|
LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: ${LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT:-http://minio:9000}
|
|
LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE:-true}
|
|
LANGFUSE_S3_EVENT_UPLOAD_PREFIX: ${LANGFUSE_S3_EVENT_UPLOAD_PREFIX:-events/}
|
|
LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: ${LANGFUSE_S3_MEDIA_UPLOAD_BUCKET:-langfuse}
|
|
LANGFUSE_S3_MEDIA_UPLOAD_REGION: ${LANGFUSE_S3_MEDIA_UPLOAD_REGION:-auto}
|
|
LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID:-minio}
|
|
LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD}
|
|
LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: ${LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT:-http://localhost:9090}
|
|
LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE:-true}
|
|
LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: ${LANGFUSE_S3_MEDIA_UPLOAD_PREFIX:-media/}
|
|
LANGFUSE_S3_BATCH_EXPORT_ENABLED: ${LANGFUSE_S3_BATCH_EXPORT_ENABLED:-false}
|
|
LANGFUSE_S3_BATCH_EXPORT_BUCKET: ${LANGFUSE_S3_BATCH_EXPORT_BUCKET:-langfuse}
|
|
LANGFUSE_S3_BATCH_EXPORT_PREFIX: ${LANGFUSE_S3_BATCH_EXPORT_PREFIX:-exports/}
|
|
LANGFUSE_S3_BATCH_EXPORT_REGION: ${LANGFUSE_S3_BATCH_EXPORT_REGION:-auto}
|
|
LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_ENDPOINT:-http://minio:9000}
|
|
LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT:-http://localhost:9090}
|
|
LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: ${LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID:-minio}
|
|
LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD}
|
|
LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: ${LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE:-true}
|
|
LANGFUSE_INGESTION_QUEUE_DELAY_MS: ${LANGFUSE_INGESTION_QUEUE_DELAY_MS:-}
|
|
LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: ${LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS:-}
|
|
REDIS_HOST: ${REDIS_HOST:-redis}
|
|
REDIS_PORT: ${REDIS_PORT:-6379}
|
|
REDIS_AUTH: ${REDIS_AUTH:-LOCALONLYREDIS}
|
|
REDIS_TLS_ENABLED: ${REDIS_TLS_ENABLED:-false}
|
|
REDIS_TLS_CA: ${REDIS_TLS_CA:-/certs/ca.crt}
|
|
REDIS_TLS_CERT: ${REDIS_TLS_CERT:-/certs/redis.crt}
|
|
REDIS_TLS_KEY: ${REDIS_TLS_KEY:-/certs/redis.key}
|
|
|
|
langfuse-web:
|
|
image: langfuse/langfuse:3
|
|
restart: always
|
|
depends_on: *langfuse-depends-on
|
|
environment:
|
|
<<: *langfuse-worker-env
|
|
NEXTAUTH_URL: https://{$LANGFUSE_HOSTNAME}
|
|
NEXTAUTH_SECRET: ${NEXTAUTH_SECRET}
|
|
LANGFUSE_INIT_ORG_ID: ${LANGFUSE_INIT_ORG_ID:-}
|
|
LANGFUSE_INIT_ORG_NAME: ${LANGFUSE_INIT_ORG_NAME:-}
|
|
LANGFUSE_INIT_PROJECT_ID: ${LANGFUSE_INIT_PROJECT_ID:-}
|
|
LANGFUSE_INIT_PROJECT_NAME: ${LANGFUSE_INIT_PROJECT_NAME:-}
|
|
LANGFUSE_INIT_PROJECT_PUBLIC_KEY: ${LANGFUSE_INIT_PROJECT_PUBLIC_KEY:-}
|
|
LANGFUSE_INIT_PROJECT_SECRET_KEY: ${LANGFUSE_INIT_PROJECT_SECRET_KEY:-}
|
|
LANGFUSE_INIT_USER_EMAIL: ${LANGFUSE_INIT_USER_EMAIL:-}
|
|
LANGFUSE_INIT_USER_NAME: ${LANGFUSE_INIT_USER_NAME:-}
|
|
LANGFUSE_INIT_USER_PASSWORD: ${LANGFUSE_INIT_USER_PASSWORD:-}
|
|
|
|
clickhouse:
|
|
image: clickhouse/clickhouse-server
|
|
restart: always
|
|
user: "101:101"
|
|
environment:
|
|
CLICKHOUSE_DB: default
|
|
CLICKHOUSE_USER: clickhouse
|
|
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD}
|
|
volumes:
|
|
- langfuse_clickhouse_data:/var/lib/clickhouse
|
|
- langfuse_clickhouse_logs:/var/log/clickhouse-server
|
|
healthcheck:
|
|
test: wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1
|
|
interval: 5s
|
|
timeout: 5s
|
|
retries: 10
|
|
start_period: 1s
|
|
|
|
minio:
|
|
image: minio/minio
|
|
restart: always
|
|
entrypoint: sh
|
|
# create the 'langfuse' bucket before starting the service
|
|
command: -c 'mkdir -p /data/langfuse && minio server --address ":9000" --console-address ":9001" /data'
|
|
environment:
|
|
MINIO_ROOT_USER: minio
|
|
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
|
|
volumes:
|
|
- langfuse_minio_data:/data
|
|
healthcheck:
|
|
test: ["CMD", "mc", "ready", "local"]
|
|
interval: 1s
|
|
timeout: 5s
|
|
retries: 5
|
|
start_period: 1s
|
|
|
|
postgres:
|
|
image: postgres:${POSTGRES_VERSION:-latest}
|
|
restart: unless-stopped
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
interval: 3s
|
|
timeout: 3s
|
|
retries: 10
|
|
environment:
|
|
POSTGRES_USER: postgres
|
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
|
POSTGRES_DB: postgres
|
|
volumes:
|
|
- langfuse_postgres_data:/var/lib/postgresql/data
|
|
|
|
redis:
|
|
container_name: redis
|
|
image: docker.io/valkey/valkey:8-alpine
|
|
command: valkey-server --save 30 1 --loglevel warning
|
|
restart: unless-stopped
|
|
volumes:
|
|
- valkey-data:/data
|
|
cap_drop:
|
|
- ALL
|
|
cap_add:
|
|
- SETGID
|
|
- SETUID
|
|
- DAC_OVERRIDE
|
|
logging:
|
|
driver: "json-file"
|
|
options:
|
|
max-size: "1m"
|
|
max-file: "1"
|
|
healthcheck:
|
|
test: ["CMD", "redis-cli", "ping"]
|
|
interval: 3s
|
|
timeout: 10s
|
|
retries: 10
|
|
|
|
searxng:
|
|
container_name: searxng
|
|
image: docker.io/searxng/searxng:latest
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ./searxng:/etc/searxng:rw
|
|
environment:
|
|
- SEARXNG_BASE_URL=https://${SEARXNG_HOSTNAME:-localhost}/
|
|
- UWSGI_WORKERS=${SEARXNG_UWSGI_WORKERS:-4}
|
|
- UWSGI_THREADS=${SEARXNG_UWSGI_THREADS:-4}
|
|
cap_drop:
|
|
- ALL
|
|
cap_add:
|
|
- CHOWN
|
|
- SETGID
|
|
- SETUID
|
|
logging:
|
|
driver: "json-file"
|
|
options:
|
|
max-size: "1m"
|
|
max-file: "1"
|
|
|
|
prometheus:
|
|
image: prom/prometheus:latest
|
|
container_name: prometheus
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
|
- prometheus_data:/prometheus
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
|
|
node-exporter:
|
|
image: prom/node-exporter:latest
|
|
container_name: node-exporter
|
|
restart: unless-stopped
|
|
volumes:
|
|
- /proc:/host/proc:ro
|
|
- /sys:/host/sys:ro
|
|
- /:/rootfs:ro
|
|
command:
|
|
- "--path.procfs=/host/proc"
|
|
- "--path.sysfs=/host/sys"
|
|
- "--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
|
|
expose:
|
|
- 9100
|
|
|
|
cadvisor:
|
|
image: gcr.io/cadvisor/cadvisor:latest
|
|
container_name: cadvisor
|
|
restart: unless-stopped
|
|
volumes:
|
|
- /:/rootfs:ro
|
|
- /var/run:/var/run:rw
|
|
- /sys:/sys:ro
|
|
- /var/lib/docker/:/var/lib/docker:ro
|
|
expose:
|
|
- 8080
|
|
|
|
grafana:
|
|
image: grafana/grafana:latest
|
|
container_name: grafana
|
|
restart: unless-stopped
|
|
environment:
|
|
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin}
|
|
- GF_PROVISIONING_PATH=/etc/grafana/provisioning
|
|
volumes:
|
|
- grafana:/var/lib/grafana
|
|
- ./grafana/provisioning:/etc/grafana/provisioning
|
|
- ./grafana/dashboards:/var/lib/grafana/dashboards # Standard path often used, let's use Grafana's managed dashboards dir
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
depends_on:
|
|
- prometheus
|
|
|
|
crawl4ai:
|
|
image: unclecode/crawl4ai:latest # Use official image
|
|
container_name: crawl4ai
|
|
restart: unless-stopped
|
|
shm_size: 1g # Recommended for browser operations
|
|
env_file:
|
|
- .env
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
cpus: "1.0"
|
|
memory: 4G # Increased based on documentation recommendation
|