include: - ./supabase/docker/docker-compose.yml volumes: n8n_storage: ollama_storage: qdrant_storage: open-webui: flowise: caddy-data: caddy-config: valkey-data: langfuse_postgres_data: langfuse_clickhouse_data: langfuse_clickhouse_logs: langfuse_minio_data: x-n8n: &service-n8n image: n8nio/n8n:latest environment: - DB_TYPE=postgresdb - DB_POSTGRESDB_HOST=db - DB_POSTGRESDB_USER=postgres - DB_POSTGRESDB_PASSWORD=${POSTGRES_PASSWORD} - DB_POSTGRESDB_DATABASE=postgres - N8N_DIAGNOSTICS_ENABLED=false - N8N_PERSONALIZATION_ENABLED=false - N8N_ENCRYPTION_KEY - N8N_USER_MANAGEMENT_JWT_SECRET - WEBHOOK_URL=${N8N_HOSTNAME:+https://}${N8N_HOSTNAME:-http://localhost:5678} x-ollama: &service-ollama image: ollama/ollama:latest container_name: ollama restart: unless-stopped expose: - 11434/tcp environment: - OLLAMA_CONTEXT_LENGTH=8192 volumes: - ollama_storage:/root/.ollama x-init-ollama: &init-ollama image: ollama/ollama:latest container_name: ollama-pull-llama volumes: - ollama_storage:/root/.ollama entrypoint: /bin/sh command: - "-c" - "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull qwen2.5:7b-instruct-q4_K_M; OLLAMA_HOST=ollama:11434 ollama pull nomic-embed-text" # For a larger context length verison of the model, run these commands: # echo "FROM qwen2.5:7b-instruct-q4_K_M\n\nPARAMETER num_ctx 8096" > Modelfile # ollama create qwen2.5:7b-8k -f ./Modelfile # Change the name of the LLM and num_ctx as you see fit. services: flowise: image: flowiseai/flowise restart: unless-stopped container_name: flowise expose: - 3001/tcp environment: - PORT=3001 - FLOWISE_USERNAME=${FLOWISE_USERNAME} - FLOWISE_PASSWORD=${FLOWISE_PASSWORD} extra_hosts: - "host.docker.internal:host-gateway" volumes: - ~/.flowise:/root/.flowise entrypoint: /bin/sh -c "sleep 3; flowise start" open-webui: image: ghcr.io/open-webui/open-webui:main restart: unless-stopped container_name: open-webui expose: - 8080/tcp extra_hosts: - "host.docker.internal:host-gateway" volumes: - open-webui:/app/backend/data n8n-import: <<: *service-n8n container_name: n8n-import entrypoint: /bin/sh command: - "-c" - "n8n import:credentials --separate --input=/backup/credentials && n8n import:workflow --separate --input=/backup/workflows" volumes: - ./n8n/backup:/backup n8n: <<: *service-n8n container_name: n8n restart: unless-stopped expose: - 5678/tcp volumes: - n8n_storage:/home/node/.n8n - ./n8n/backup:/backup - ./shared:/data/shared depends_on: n8n-import: condition: service_completed_successfully qdrant: image: qdrant/qdrant container_name: qdrant restart: unless-stopped expose: - 6333/tcp - 6334/tcp volumes: - qdrant_storage:/qdrant/storage neo4j: image: neo4j:latest volumes: - ./neo4j/logs:/logs - ./neo4j/config:/config - ./neo4j/data:/data - ./neo4j/plugins:/plugins expose: - 7473/tcp - 7474/tcp - 7687/tcp environment: - NEO4J_AUTH=${NEO4J_AUTH:-"neo4j/your_password"} restart: always caddy: container_name: caddy image: docker.io/library/caddy:2-alpine network_mode: host restart: unless-stopped ports: - 80:80/tcp - 443:443/tcp expose: - 2019/tcp - 443/tcp - 443/udp - 80/tcp volumes: - ./Caddyfile:/etc/caddy/Caddyfile:ro - caddy-data:/data:rw - caddy-config:/config:rw environment: - N8N_HOSTNAME=${N8N_HOSTNAME:-":8001"} - WEBUI_HOSTNAME=${WEBUI_HOSTNAME:-":8002"} - FLOWISE_HOSTNAME=${FLOWISE_HOSTNAME:-":8003"} - OLLAMA_HOSTNAME=${OLLAMA_HOSTNAME:-":8004"} - SUPABASE_HOSTNAME=${SUPABASE_HOSTNAME:-":8005"} - SEARXNG_HOSTNAME=${SEARXNG_HOSTNAME:-":8006"} - LANGFUSE_HOSTNAME=${LANGFUSE_HOSTNAME:-":8007"} - NEO4J_HOSTNAME=${NEO4J_HOSTNAME:-":8008"} - LETSENCRYPT_EMAIL=${LETSENCRYPT_EMAIL:-internal} cap_drop: - ALL cap_add: - NET_BIND_SERVICE logging: driver: "json-file" options: max-size: "1m" max-file: "1" langfuse-worker: image: langfuse/langfuse-worker:3 restart: always depends_on: &langfuse-depends-on postgres: condition: service_healthy minio: condition: service_healthy redis: condition: service_healthy clickhouse: condition: service_healthy expose: - 3030/tcp environment: &langfuse-worker-env DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/postgres SALT: ${LANGFUSE_SALT} ENCRYPTION_KEY: ${ENCRYPTION_KEY} TELEMETRY_ENABLED: ${TELEMETRY_ENABLED:-true} LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: ${LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES:-true} CLICKHOUSE_MIGRATION_URL: ${CLICKHOUSE_MIGRATION_URL:-clickhouse://clickhouse:9000} CLICKHOUSE_URL: ${CLICKHOUSE_URL:-http://clickhouse:8123} CLICKHOUSE_USER: ${CLICKHOUSE_USER:-clickhouse} CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD} CLICKHOUSE_CLUSTER_ENABLED: ${CLICKHOUSE_CLUSTER_ENABLED:-false} LANGFUSE_S3_EVENT_UPLOAD_BUCKET: ${LANGFUSE_S3_EVENT_UPLOAD_BUCKET:-langfuse} LANGFUSE_S3_EVENT_UPLOAD_REGION: ${LANGFUSE_S3_EVENT_UPLOAD_REGION:-auto} LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID:-minio} LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD} LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: ${LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT:-http://minio:9000} LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE:-true} LANGFUSE_S3_EVENT_UPLOAD_PREFIX: ${LANGFUSE_S3_EVENT_UPLOAD_PREFIX:-events/} LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: ${LANGFUSE_S3_MEDIA_UPLOAD_BUCKET:-langfuse} LANGFUSE_S3_MEDIA_UPLOAD_REGION: ${LANGFUSE_S3_MEDIA_UPLOAD_REGION:-auto} LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID:-minio} LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD} LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: ${LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT:-http://localhost:9090} LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE:-true} LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: ${LANGFUSE_S3_MEDIA_UPLOAD_PREFIX:-media/} LANGFUSE_S3_BATCH_EXPORT_ENABLED: ${LANGFUSE_S3_BATCH_EXPORT_ENABLED:-false} LANGFUSE_S3_BATCH_EXPORT_BUCKET: ${LANGFUSE_S3_BATCH_EXPORT_BUCKET:-langfuse} LANGFUSE_S3_BATCH_EXPORT_PREFIX: ${LANGFUSE_S3_BATCH_EXPORT_PREFIX:-exports/} LANGFUSE_S3_BATCH_EXPORT_REGION: ${LANGFUSE_S3_BATCH_EXPORT_REGION:-auto} LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_ENDPOINT:-http://minio:9000} LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT:-http://localhost:9090} LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: ${LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID:-minio} LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD} LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: ${LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE:-true} LANGFUSE_INGESTION_QUEUE_DELAY_MS: ${LANGFUSE_INGESTION_QUEUE_DELAY_MS:-} LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: ${LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS:-} REDIS_HOST: ${REDIS_HOST:-redis} REDIS_PORT: ${REDIS_PORT:-6379} REDIS_AUTH: ${REDIS_AUTH:-LOCALONLYREDIS} REDIS_TLS_ENABLED: ${REDIS_TLS_ENABLED:-false} REDIS_TLS_CA: ${REDIS_TLS_CA:-/certs/ca.crt} REDIS_TLS_CERT: ${REDIS_TLS_CERT:-/certs/redis.crt} REDIS_TLS_KEY: ${REDIS_TLS_KEY:-/certs/redis.key} langfuse-web: image: langfuse/langfuse:3 restart: always depends_on: *langfuse-depends-on expose: - 3000/tcp environment: <<: *langfuse-worker-env NEXTAUTH_URL: http://localhost:3002 NEXTAUTH_SECRET: ${NEXTAUTH_SECRET} LANGFUSE_INIT_ORG_ID: ${LANGFUSE_INIT_ORG_ID:-} LANGFUSE_INIT_ORG_NAME: ${LANGFUSE_INIT_ORG_NAME:-} LANGFUSE_INIT_PROJECT_ID: ${LANGFUSE_INIT_PROJECT_ID:-} LANGFUSE_INIT_PROJECT_NAME: ${LANGFUSE_INIT_PROJECT_NAME:-} LANGFUSE_INIT_PROJECT_PUBLIC_KEY: ${LANGFUSE_INIT_PROJECT_PUBLIC_KEY:-} LANGFUSE_INIT_PROJECT_SECRET_KEY: ${LANGFUSE_INIT_PROJECT_SECRET_KEY:-} LANGFUSE_INIT_USER_EMAIL: ${LANGFUSE_INIT_USER_EMAIL:-} LANGFUSE_INIT_USER_NAME: ${LANGFUSE_INIT_USER_NAME:-} LANGFUSE_INIT_USER_PASSWORD: ${LANGFUSE_INIT_USER_PASSWORD:-} clickhouse: image: clickhouse/clickhouse-server restart: always user: "101:101" expose: - 8123/tcp - 9000/tcp - 9009/tcp environment: CLICKHOUSE_DB: default CLICKHOUSE_USER: clickhouse CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD} volumes: - langfuse_clickhouse_data:/var/lib/clickhouse - langfuse_clickhouse_logs:/var/log/clickhouse-server healthcheck: test: wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1 interval: 5s timeout: 5s retries: 10 start_period: 1s minio: image: minio/minio restart: always entrypoint: sh # create the 'langfuse' bucket before starting the service command: -c 'mkdir -p /data/langfuse && minio server --address ":9000" --console-address ":9001" /data' expose: - 9000/tcp - 9001/tcp environment: MINIO_ROOT_USER: minio MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD} volumes: - langfuse_minio_data:/data healthcheck: test: ["CMD", "mc", "ready", "local"] interval: 1s timeout: 5s retries: 5 start_period: 1s postgres: image: postgres:${POSTGRES_VERSION:-latest} restart: unless-stopped healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 3s timeout: 3s retries: 10 expose: - 5432/tcp environment: POSTGRES_USER: postgres POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} POSTGRES_DB: postgres volumes: - langfuse_postgres_data:/var/lib/postgresql/data redis: container_name: redis image: docker.io/valkey/valkey:8-alpine command: valkey-server --save 30 1 --loglevel warning restart: unless-stopped expose: - 6379/tcp volumes: - valkey-data:/data cap_drop: - ALL cap_add: - SETGID - SETUID - DAC_OVERRIDE logging: driver: "json-file" options: max-size: "1m" max-file: "1" healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 3s timeout: 10s retries: 10 searxng: container_name: searxng image: docker.io/searxng/searxng:latest restart: unless-stopped expose: - 8080/tcp volumes: - ./searxng:/etc/searxng:rw environment: - SEARXNG_BASE_URL=https://${SEARXNG_HOSTNAME:-localhost}/ - UWSGI_WORKERS=${SEARXNG_UWSGI_WORKERS:-4} - UWSGI_THREADS=${SEARXNG_UWSGI_THREADS:-4} cap_drop: - ALL cap_add: - CHOWN - SETGID - SETUID logging: driver: "json-file" options: max-size: "1m" max-file: "1" ollama-cpu: profiles: ["cpu"] <<: *service-ollama ollama-gpu: profiles: ["gpu-nvidia"] <<: *service-ollama deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] ollama-gpu-amd: profiles: ["gpu-amd"] <<: *service-ollama image: ollama/ollama:rocm devices: - "/dev/kfd" - "/dev/dri" ollama-pull-llama-cpu: profiles: ["cpu"] <<: *init-ollama depends_on: - ollama-cpu ollama-pull-llama-gpu: profiles: ["gpu-nvidia"] <<: *init-ollama depends_on: - ollama-gpu ollama-pull-llama-gpu-amd: profiles: [gpu-amd] <<: *init-ollama image: ollama/ollama:rocm depends_on: - ollama-gpu-amd