Files
n8n-install/docker-compose.yml
Yury Kossakovsky 26485b32c0 feat(gost): require upstream proxy for geo-bypass
gost now always requires an external upstream proxy to function.
wizard prompts for upstream proxy url when gost is selected.
if no upstream provided, gost is removed from selection.
2025-12-20 15:21:27 -07:00

1163 lines
36 KiB
YAML

volumes:
caddy-config:
caddy-data:
comfyui_data:
docling_cache:
flowise:
grafana:
langfuse_clickhouse_data:
langfuse_clickhouse_logs:
langfuse_minio_data:
langfuse_postgres_data:
letta_data:
libretranslate_api_keys:
libretranslate_models:
lightrag_data:
lightrag_inputs:
n8n_storage:
ollama_storage:
open-webui:
paddle_cache:
paddleocr_cache:
paddlex_data:
portainer_data:
postgresus_data:
postiz-config:
postiz-uploads:
prometheus_data:
qdrant_storage:
ragflow_data:
ragflow_elasticsearch_data:
ragflow_minio_data:
ragflow_mysql_data:
ragflow_redis_data:
valkey-data:
weaviate_data:
# Shared proxy configuration for services that need outbound proxy support
x-proxy-env: &proxy-env
HTTP_PROXY: ${GOST_PROXY_URL:-}
HTTPS_PROXY: ${GOST_PROXY_URL:-}
http_proxy: ${GOST_PROXY_URL:-}
https_proxy: ${GOST_PROXY_URL:-}
NO_PROXY: ${GOST_NO_PROXY:-}
no_proxy: ${GOST_NO_PROXY:-}
x-n8n: &service-n8n
build:
context: ./n8n
dockerfile: Dockerfile.n8n
pull: true
environment: &service-n8n-env
<<: *proxy-env
DB_POSTGRESDB_DATABASE: postgres
DB_POSTGRESDB_HOST: postgres
DB_POSTGRESDB_PASSWORD: ${POSTGRES_PASSWORD}
DB_POSTGRESDB_USER: postgres
DB_TYPE: postgresdb
EXECUTIONS_MODE: ${EXECUTIONS_MODE:-queue}
GENERIC_TIMEZONE: ${GENERIC_TIMEZONE:-America/New_York}
LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
LANGCHAIN_ENDPOINT: ${LANGCHAIN_ENDPOINT}
LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
N8N_BINARY_DATA_MODE: database
N8N_BLOCK_ENV_ACCESS_IN_NODE: false
N8N_BLOCK_FILE_ACCESS_TO_N8N_FILES: ${N8N_BLOCK_FILE_ACCESS_TO_N8N_FILES:-true}
N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE: true
N8N_DIAGNOSTICS_ENABLED: false
N8N_EMAIL_MODE: ${N8N_EMAIL_MODE:-smtp}
N8N_ENCRYPTION_KEY: ${N8N_ENCRYPTION_KEY}
N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS: true
N8N_LOG_LEVEL: ${N8N_LOG_LEVEL:-info}
N8N_LOG_OUTPUT: ${N8N_LOG_OUTPUT:-console}
N8N_METRICS: true
N8N_PAYLOAD_SIZE_MAX: 256
N8N_PERSONALIZATION_ENABLED: false
N8N_RESTRICT_FILE_ACCESS_TO: /data/shared
N8N_RUNNERS_AUTH_TOKEN: ${N8N_RUNNERS_AUTH_TOKEN}
N8N_RUNNERS_BROKER_LISTEN_ADDRESS: 0.0.0.0
N8N_RUNNERS_ENABLED: true
N8N_RUNNERS_MODE: external
OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS: ${OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS:-true}
N8N_SMTP_HOST: ${N8N_SMTP_HOST:-}
N8N_SMTP_OAUTH_PRIVATE_KEY: ${N8N_SMTP_OAUTH_PRIVATE_KEY:-}
N8N_SMTP_OAUTH_SERVICE_CLIENT: ${N8N_SMTP_OAUTH_SERVICE_CLIENT:-}
N8N_SMTP_PASS: ${N8N_SMTP_PASS:-}
N8N_SMTP_PORT: ${N8N_SMTP_PORT:-}
N8N_SMTP_SENDER: ${N8N_SMTP_SENDER:-}
N8N_SMTP_SSL: ${N8N_SMTP_SSL:-true}
N8N_SMTP_STARTTLS: ${N8N_SMTP_STARTTLS:-true}
N8N_SMTP_USER: ${N8N_SMTP_USER:-}
N8N_TRUST_PROXY: true
N8N_USER_MANAGEMENT_JWT_SECRET: ${N8N_USER_MANAGEMENT_JWT_SECRET}
NODE_ENV: production
QUEUE_BULL_REDIS_HOST: ${REDIS_HOST:-redis}
QUEUE_BULL_REDIS_PORT: ${REDIS_PORT:-6379}
QUEUE_HEALTH_CHECK_ACTIVE: true
WEBHOOK_URL: ${N8N_HOSTNAME:+https://}${N8N_HOSTNAME:-http://localhost:5678}/
x-ollama: &service-ollama
image: ollama/ollama:latest
container_name: ollama
restart: unless-stopped
environment:
OLLAMA_CONTEXT_LENGTH: 8192
OLLAMA_FLASH_ATTENTION: 1
OLLAMA_KV_CACHE_TYPE: q8_0
OLLAMA_MAX_LOADED_MODELS: 2
volumes:
- ollama_storage:/root/.ollama
x-init-ollama: &init-ollama
image: ollama/ollama:latest
container_name: ollama-pull-llama
volumes:
- ollama_storage:/root/.ollama
entrypoint: /bin/sh
command:
- "-c"
- "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull qwen2.5:7b-instruct-q4_K_M; OLLAMA_HOST=ollama:11434 ollama pull nomic-embed-text"
# Worker-runner anchor for sidecar pattern (runner connects to worker via localhost)
x-n8n-worker-runner: &service-n8n-worker-runner
build:
context: ./n8n
dockerfile: Dockerfile.runner
pull: true
environment:
GENERIC_TIMEZONE: ${GENERIC_TIMEZONE:-America/New_York}
N8N_RUNNERS_AUTH_TOKEN: ${N8N_RUNNERS_AUTH_TOKEN}
N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT: 15
N8N_RUNNERS_MAX_CONCURRENCY: ${N8N_RUNNERS_MAX_CONCURRENCY:-5}
N8N_RUNNERS_TASK_BROKER_URI: http://127.0.0.1:5679
services:
flowise:
image: flowiseai/flowise
restart: unless-stopped
container_name: flowise
profiles: ["flowise"]
environment:
<<: *proxy-env
PORT: 3001
FLOWISE_USERNAME: ${FLOWISE_USERNAME}
FLOWISE_PASSWORD: ${FLOWISE_PASSWORD}
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- ~/.flowise:/root/.flowise
entrypoint: /bin/sh -c "sleep 3; flowise start"
open-webui:
image: ghcr.io/open-webui/open-webui:main
restart: unless-stopped
container_name: open-webui
profiles: ["open-webui"]
environment:
<<: *proxy-env
OLLAMA_BASE_URL: http://ollama:11434
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- open-webui:/app/backend/data
healthcheck:
test: ["CMD", "curl", "-fs", "http://localhost:8080/health"]
interval: 30s
timeout: 10s
retries: 5
start_period: 120s
n8n-import:
<<: *service-n8n
container_name: n8n-import
profiles: ["n8n"]
environment:
<<: *service-n8n-env
RUN_N8N_IMPORT: ${RUN_N8N_IMPORT:-false}
entrypoint: /bin/sh
command: /scripts/n8n_import_script.sh
volumes:
- ./n8n/backup:/backup
- ./n8n/n8n_import_script.sh:/scripts/n8n_import_script.sh:ro
depends_on:
postgres:
condition: service_healthy
n8n:
<<: *service-n8n
container_name: n8n
profiles: ["n8n"]
restart: unless-stopped
volumes:
- n8n_storage:/home/node/.n8n
- ./n8n/backup:/backup
- ./shared:/data/shared
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:5678/healthz || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
depends_on:
n8n-import:
condition: service_completed_successfully
# Template services for worker-runner pairs (used by docker-compose.n8n-workers.yml via extends)
# These templates use profile "n8n-template" which is never activated directly
n8n-worker-template:
<<: *service-n8n
profiles: ["n8n-template"]
command: worker
volumes:
- n8n_storage:/home/node/.n8n
- ./shared:/data/shared
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:5678/healthz || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
n8n-runner-template:
<<: *service-n8n-worker-runner
profiles: ["n8n-template"]
entrypoint: ["/bin/sh", "-c", "/usr/local/bin/task-runner-launcher javascript python"]
healthcheck:
test: ["CMD-SHELL", "wget -qO- http://localhost:5680/healthz || exit 1"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
qdrant:
image: qdrant/qdrant
container_name: qdrant
profiles: ["qdrant"]
restart: unless-stopped
volumes:
- qdrant_storage:/qdrant/storage
environment:
QDRANT__SERVICE__API_KEY: ${QDRANT_API_KEY}
expose:
- "6333"
neo4j:
image: neo4j:latest
container_name: neo4j
profiles: ["neo4j"]
restart: unless-stopped
volumes:
- ./neo4j/logs:/logs
- ./neo4j/config:/config
- ./neo4j/data:/data
- ./neo4j/plugins:/plugins
environment:
- NEO4J_AUTH=${NEO4J_AUTH_USERNAME}/${NEO4J_AUTH_PASSWORD}
healthcheck:
test:
[
"CMD-SHELL",
"wget --no-verbose --tries=1 --spider http://localhost:7474 || exit 1",
]
interval: 5s
timeout: 3s
retries: 5
ulimits:
nofile:
soft: 40000
hard: 40000
caddy:
container_name: caddy
image: docker.io/library/caddy:2-alpine
ports:
- "80:80"
- "443:443"
- "7687:7687"
restart: unless-stopped
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- ./caddy-addon:/etc/caddy/addons:ro
- ./welcome:/srv/welcome:ro
- caddy-data:/data:rw
- caddy-config:/config:rw
environment:
COMFYUI_HOSTNAME: ${COMFYUI_HOSTNAME}
COMFYUI_PASSWORD_HASH: ${COMFYUI_PASSWORD_HASH}
COMFYUI_USERNAME: ${COMFYUI_USERNAME}
DIFY_HOSTNAME: ${DIFY_HOSTNAME}
DOCLING_HOSTNAME: ${DOCLING_HOSTNAME}
DOCLING_PASSWORD_HASH: ${DOCLING_PASSWORD_HASH}
DOCLING_USERNAME: ${DOCLING_USERNAME}
FLOWISE_HOSTNAME: ${FLOWISE_HOSTNAME}
GRAFANA_HOSTNAME: ${GRAFANA_HOSTNAME}
LANGFUSE_HOSTNAME: ${LANGFUSE_HOSTNAME}
LETSENCRYPT_EMAIL: ${LETSENCRYPT_EMAIL:-internal}
LETTA_HOSTNAME: ${LETTA_HOSTNAME}
LIGHTRAG_HOSTNAME: ${LIGHTRAG_HOSTNAME}
LT_HOSTNAME: ${LT_HOSTNAME}
LT_USERNAME: ${LT_USERNAME}
LT_PASSWORD_HASH: ${LT_PASSWORD_HASH}
N8N_HOSTNAME: ${N8N_HOSTNAME}
NEO4J_HOSTNAME: ${NEO4J_HOSTNAME}
WAHA_HOSTNAME: ${WAHA_HOSTNAME}
PADDLEOCR_HOSTNAME: ${PADDLEOCR_HOSTNAME}
PADDLEOCR_PASSWORD_HASH: ${PADDLEOCR_PASSWORD_HASH}
PADDLEOCR_USERNAME: ${PADDLEOCR_USERNAME}
PORTAINER_HOSTNAME: ${PORTAINER_HOSTNAME}
POSTIZ_HOSTNAME: ${POSTIZ_HOSTNAME}
POSTGRESUS_HOSTNAME: ${POSTGRESUS_HOSTNAME}
PROMETHEUS_HOSTNAME: ${PROMETHEUS_HOSTNAME}
PROMETHEUS_PASSWORD_HASH: ${PROMETHEUS_PASSWORD_HASH}
PROMETHEUS_USERNAME: ${PROMETHEUS_USERNAME}
QDRANT_HOSTNAME: ${QDRANT_HOSTNAME}
RAGAPP_HOSTNAME: ${RAGAPP_HOSTNAME}
RAGFLOW_HOSTNAME: ${RAGFLOW_HOSTNAME}
RAGAPP_PASSWORD_HASH: ${RAGAPP_PASSWORD_HASH}
RAGAPP_USERNAME: ${RAGAPP_USERNAME}
SEARXNG_HOSTNAME: ${SEARXNG_HOSTNAME}
SEARXNG_PASSWORD_HASH: ${SEARXNG_PASSWORD_HASH}
SEARXNG_USERNAME: ${SEARXNG_USERNAME}
SUPABASE_HOSTNAME: ${SUPABASE_HOSTNAME}
WEAVIATE_HOSTNAME: ${WEAVIATE_HOSTNAME}
WEBUI_HOSTNAME: ${WEBUI_HOSTNAME}
WELCOME_HOSTNAME: ${WELCOME_HOSTNAME}
WELCOME_PASSWORD_HASH: ${WELCOME_PASSWORD_HASH}
WELCOME_USERNAME: ${WELCOME_USERNAME}
cap_drop:
- ALL
cap_add:
- NET_BIND_SERVICE
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
cloudflared:
image: cloudflare/cloudflared:latest
container_name: cloudflared
profiles: ["cloudflare-tunnel"]
restart: unless-stopped
command: tunnel --no-autoupdate run
environment:
TUNNEL_TOKEN: ${CLOUDFLARE_TUNNEL_TOKEN}
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
gost:
image: gogost/gost:latest
container_name: gost
profiles: ["gost"]
restart: unless-stopped
command:
- "-L"
- "http://${GOST_USERNAME}:${GOST_PASSWORD}@:8080"
- "-F"
- "${GOST_UPSTREAM_PROXY}"
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://localhost:8080 || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 5s
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
langfuse-worker:
image: langfuse/langfuse-worker:3
container_name: langfuse-worker
restart: always
profiles: ["langfuse"]
depends_on: &langfuse-depends-on
postgres:
condition: service_healthy
minio:
condition: service_healthy
redis:
condition: service_healthy
clickhouse:
condition: service_healthy
environment: &langfuse-worker-env
<<: *proxy-env
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/langfuse
SALT: ${LANGFUSE_SALT}
ENCRYPTION_KEY: ${ENCRYPTION_KEY}
TELEMETRY_ENABLED: ${TELEMETRY_ENABLED:-true}
LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: ${LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES:-true}
CLICKHOUSE_MIGRATION_URL: ${CLICKHOUSE_MIGRATION_URL:-clickhouse://clickhouse:9000}
CLICKHOUSE_URL: ${CLICKHOUSE_URL:-http://clickhouse:8123}
CLICKHOUSE_USER: ${CLICKHOUSE_USER:-clickhouse}
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD}
CLICKHOUSE_CLUSTER_ENABLED: ${CLICKHOUSE_CLUSTER_ENABLED:-false}
LANGFUSE_S3_EVENT_UPLOAD_BUCKET: ${LANGFUSE_S3_EVENT_UPLOAD_BUCKET:-langfuse}
LANGFUSE_S3_EVENT_UPLOAD_REGION: ${LANGFUSE_S3_EVENT_UPLOAD_REGION:-auto}
LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID:-minio}
LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD}
LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: ${LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT:-http://minio:9000}
LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE:-true}
LANGFUSE_S3_EVENT_UPLOAD_PREFIX: ${LANGFUSE_S3_EVENT_UPLOAD_PREFIX:-events/}
LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: ${LANGFUSE_S3_MEDIA_UPLOAD_BUCKET:-langfuse}
LANGFUSE_S3_MEDIA_UPLOAD_REGION: ${LANGFUSE_S3_MEDIA_UPLOAD_REGION:-auto}
LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID:-minio}
LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD}
LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: ${LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT:-http://localhost:9090}
LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE:-true}
LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: ${LANGFUSE_S3_MEDIA_UPLOAD_PREFIX:-media/}
LANGFUSE_S3_BATCH_EXPORT_ENABLED: ${LANGFUSE_S3_BATCH_EXPORT_ENABLED:-false}
LANGFUSE_S3_BATCH_EXPORT_BUCKET: ${LANGFUSE_S3_BATCH_EXPORT_BUCKET:-langfuse}
LANGFUSE_S3_BATCH_EXPORT_PREFIX: ${LANGFUSE_S3_BATCH_EXPORT_PREFIX:-exports/}
LANGFUSE_S3_BATCH_EXPORT_REGION: ${LANGFUSE_S3_BATCH_EXPORT_REGION:-auto}
LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_ENDPOINT:-http://minio:9000}
LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT:-http://localhost:9090}
LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: ${LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID:-minio}
LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: ${MINIO_ROOT_PASSWORD}
LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: ${LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE:-true}
LANGFUSE_INGESTION_QUEUE_DELAY_MS: ${LANGFUSE_INGESTION_QUEUE_DELAY_MS:-}
LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: ${LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS:-}
REDIS_HOST: ${REDIS_HOST:-redis}
REDIS_PORT: ${REDIS_PORT:-6379}
REDIS_AUTH: ${REDIS_AUTH:-}
REDIS_TLS_ENABLED: ${REDIS_TLS_ENABLED:-false}
REDIS_TLS_CA: ${REDIS_TLS_CA:-/certs/ca.crt}
REDIS_TLS_CERT: ${REDIS_TLS_CERT:-/certs/redis.crt}
REDIS_TLS_KEY: ${REDIS_TLS_KEY:-/certs/redis.key}
langfuse-web:
image: langfuse/langfuse:3
container_name: langfuse-web
restart: always
profiles: ["langfuse"]
depends_on: *langfuse-depends-on
environment:
<<: *langfuse-worker-env
NEXTAUTH_URL: https://${LANGFUSE_HOSTNAME}
NEXTAUTH_SECRET: ${NEXTAUTH_SECRET}
LANGFUSE_INIT_ORG_ID: ${LANGFUSE_INIT_ORG_ID:-organization_id}
LANGFUSE_INIT_ORG_NAME: ${LANGFUSE_INIT_ORG_NAME:-Organization}
LANGFUSE_INIT_PROJECT_ID: ${LANGFUSE_INIT_PROJECT_ID:-project_id}
LANGFUSE_INIT_PROJECT_NAME: ${LANGFUSE_INIT_PROJECT_NAME:-Project}
LANGFUSE_INIT_PROJECT_PUBLIC_KEY: ${LANGFUSE_INIT_PROJECT_PUBLIC_KEY:-}
LANGFUSE_INIT_PROJECT_SECRET_KEY: ${LANGFUSE_INIT_PROJECT_SECRET_KEY:-}
LANGFUSE_INIT_USER_EMAIL: ${LANGFUSE_INIT_USER_EMAIL:-}
LANGFUSE_INIT_USER_NAME: ${LANGFUSE_INIT_USER_NAME:-}
LANGFUSE_INIT_USER_PASSWORD: ${LANGFUSE_INIT_USER_PASSWORD:-}
AUTH_DISABLE_SIGNUP: ${AUTH_DISABLE_SIGNUP:-true}
clickhouse:
image: clickhouse/clickhouse-server
container_name: clickhouse
restart: always
profiles: ["langfuse"]
user: "101:101"
environment:
CLICKHOUSE_DB: default
CLICKHOUSE_USER: clickhouse
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD}
volumes:
- langfuse_clickhouse_data:/var/lib/clickhouse
- langfuse_clickhouse_logs:/var/log/clickhouse-server
healthcheck:
test: wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1
interval: 5s
timeout: 5s
retries: 10
start_period: 1s
minio:
image: minio/minio
container_name: minio
restart: always
profiles: ["langfuse"]
entrypoint: sh
# create the 'langfuse' bucket before starting the service
command: -c 'mkdir -p /data/langfuse && minio server --address ":9000" --console-address ":9001" /data'
environment:
MINIO_ROOT_USER: minio
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD}
volumes:
- langfuse_minio_data:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 1s
timeout: 5s
retries: 5
start_period: 1s
postgres:
container_name: postgres
image: postgres:${POSTGRES_VERSION:-17}
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 3s
timeout: 3s
retries: 10
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DB: postgres
volumes:
# TODO: Rename to postgres_data
- langfuse_postgres_data:/var/lib/postgresql/data
redis:
container_name: redis
image: docker.io/valkey/valkey:8-alpine
command: valkey-server --save 30 1 --loglevel warning
restart: unless-stopped
volumes:
- valkey-data:/data
cap_drop:
- ALL
cap_add:
- SETGID
- SETUID
- DAC_OVERRIDE
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 3s
timeout: 10s
retries: 10
searxng:
container_name: searxng
image: docker.io/searxng/searxng:latest
profiles: ["searxng"]
restart: unless-stopped
volumes:
- ./searxng:/etc/searxng:rw
environment:
SEARXNG_BASE_URL: https://${SEARXNG_HOSTNAME:-localhost}/
UWSGI_WORKERS: ${SEARXNG_UWSGI_WORKERS:-4}
UWSGI_THREADS: ${SEARXNG_UWSGI_THREADS:-4}
# cap_drop: - ALL # Temporarily commented out for first run
cap_add:
- CHOWN
- SETGID
- SETUID
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
ollama-cpu:
profiles: ["cpu"]
<<: *service-ollama
ollama-gpu:
profiles: ["gpu-nvidia"]
<<: *service-ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
ollama-gpu-amd:
profiles: ["gpu-amd"]
<<: *service-ollama
image: ollama/ollama:rocm
devices:
- "/dev/kfd"
- "/dev/dri"
ollama-pull-llama-cpu:
profiles: ["cpu"]
<<: *init-ollama
depends_on:
- ollama-cpu
ollama-pull-llama-gpu:
profiles: ["gpu-nvidia"]
<<: *init-ollama
depends_on:
- ollama-gpu
ollama-pull-llama-gpu-amd:
profiles: [gpu-amd]
<<: *init-ollama
image: ollama/ollama:rocm
depends_on:
- ollama-gpu-amd
prometheus:
image: prom/prometheus:latest
container_name: prometheus
profiles: ["monitoring"]
restart: unless-stopped
volumes:
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
extra_hosts:
- "host.docker.internal:host-gateway"
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
profiles: ["monitoring"]
restart: unless-stopped
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- "--path.procfs=/host/proc"
- "--path.sysfs=/host/sys"
- "--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
expose:
- 9100
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
container_name: cadvisor
profiles: ["monitoring"]
restart: unless-stopped
volumes:
- /:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
expose:
- 8080
grafana:
image: grafana/grafana:latest
container_name: grafana
profiles: ["monitoring"]
restart: unless-stopped
environment:
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD:-admin}
GF_PROVISIONING_PATH: /etc/grafana/provisioning
volumes:
- grafana:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
- ./grafana/dashboards:/var/lib/grafana/dashboards # Standard path often used, let's use Grafana's managed dashboards dir
extra_hosts:
- "host.docker.internal:host-gateway"
depends_on:
- prometheus
crawl4ai:
image: unclecode/crawl4ai:latest # Use official image
container_name: crawl4ai
profiles: ["crawl4ai"]
restart: unless-stopped
shm_size: 1g # Recommended for browser operations
env_file:
- .env
environment:
<<: *proxy-env
deploy:
resources:
limits:
cpus: "1.0"
memory: 4G # Increased based on documentation recommendation
gotenberg:
image: gotenberg/gotenberg:8
container_name: gotenberg
profiles: ["gotenberg"]
restart: unless-stopped
environment:
DISABLE_GOOGLE_CHROME: false
healthcheck:
test: ["CMD", "wget", "-qO", "/dev/null", "http://localhost:3000/health"]
interval: 30s
timeout: 10s
retries: 3
letta:
image: letta/letta:latest
container_name: letta
profiles: ["letta"]
restart: unless-stopped
volumes:
- letta_data:/var/lib/postgresql/data
environment:
<<: *proxy-env
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-}
OLLAMA_BASE_URL: ${OLLAMA_BASE_URL:-}
SECURE: ${LETTA_SECURE:-true}
LETTA_SERVER_PASSWORD: ${LETTA_SERVER_PASSWORD:-}
extra_hosts:
- "host.docker.internal:host-gateway"
weaviate:
image: cr.weaviate.io/semitechnologies/weaviate:latest
container_name: weaviate
profiles: ["weaviate"]
restart: unless-stopped
volumes:
- weaviate_data:/var/lib/weaviate
environment:
QUERY_DEFAULTS_LIMIT: 25
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: "false"
AUTHENTICATION_APIKEY_ENABLED: "true"
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_API_KEY}
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_USERNAME}
AUTHORIZATION_ENABLE_RBAC: "true"
AUTHORIZATION_RBAC_ROOT_USERS: ${WEAVIATE_USERNAME}
PERSISTENCE_DATA_PATH: "/var/lib/weaviate"
ENABLE_API_BASED_MODULES: "true"
CLUSTER_HOSTNAME: "node1"
DEFAULT_VECTORIZER_MODULE: "none"
healthcheck:
test:
[
"CMD-SHELL",
"wget -q --spider http://localhost:8080/v1/.well-known/ready || exit 1",
]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
ragapp:
image: ragapp/ragapp:latest
container_name: ragapp
profiles: ["ragapp"]
restart: unless-stopped
environment:
<<: *proxy-env
portainer:
image: portainer/portainer-ce:latest
container_name: portainer
profiles: ["portainer"]
restart: unless-stopped
volumes:
- portainer_data:/data
- ${DOCKER_SOCKET_LOCATION:-/var/run/docker.sock}:/var/run/docker.sock
postiz:
image: ghcr.io/gitroomhq/postiz-app:latest
container_name: postiz
profiles: ["postiz"]
restart: always
environment:
<<: *proxy-env
BACKEND_INTERNAL_URL: http://postiz:3000
DATABASE_URL: "postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/postgres?schema=postiz"
DISABLE_REGISTRATION: ${POSTIZ_DISABLE_REGISTRATION}
FRONTEND_URL: ${POSTIZ_HOSTNAME:+https://}${POSTIZ_HOSTNAME}
IS_GENERAL: "true" # Required for self-hosting.
JWT_SECRET: ${JWT_SECRET}
MAIN_URL: ${POSTIZ_HOSTNAME:+https://}${POSTIZ_HOSTNAME}
NEXT_PUBLIC_BACKEND_URL: ${POSTIZ_HOSTNAME:+https://}${POSTIZ_HOSTNAME}/api
NEXT_PUBLIC_UPLOAD_DIRECTORY: "/uploads"
REDIS_URL: "redis://redis:6379"
STORAGE_PROVIDER: "local"
UPLOAD_DIRECTORY: "/uploads"
# Social Media API Settings
X_API_KEY: ${X_API_KEY}
X_API_SECRET: ${X_API_SECRET}
LINKEDIN_CLIENT_ID: ${LINKEDIN_CLIENT_ID}
LINKEDIN_CLIENT_SECRET: ${LINKEDIN_CLIENT_SECRET}
REDDIT_CLIENT_ID: ${REDDIT_CLIENT_ID}
REDDIT_CLIENT_SECRET: ${REDDIT_CLIENT_SECRET}
GITHUB_CLIENT_ID: ${GITHUB_CLIENT_ID}
GITHUB_CLIENT_SECRET: ${GITHUB_CLIENT_SECRET}
BEEHIIVE_API_KEY: ${BEEHIIVE_API_KEY}
BEEHIIVE_PUBLICATION_ID: ${BEEHIIVE_PUBLICATION_ID}
THREADS_APP_ID: ${THREADS_APP_ID}
THREADS_APP_SECRET: ${THREADS_APP_SECRET}
FACEBOOK_APP_ID: ${FACEBOOK_APP_ID}
FACEBOOK_APP_SECRET: ${FACEBOOK_APP_SECRET}
YOUTUBE_CLIENT_ID: ${YOUTUBE_CLIENT_ID}
YOUTUBE_CLIENT_SECRET: ${YOUTUBE_CLIENT_SECRET}
TIKTOK_CLIENT_ID: ${TIKTOK_CLIENT_ID}
TIKTOK_CLIENT_SECRET: ${TIKTOK_CLIENT_SECRET}
PINTEREST_CLIENT_ID: ${PINTEREST_CLIENT_ID}
PINTEREST_CLIENT_SECRET: ${PINTEREST_CLIENT_SECRET}
DRIBBBLE_CLIENT_ID: ${DRIBBBLE_CLIENT_ID}
DRIBBBLE_CLIENT_SECRET: ${DRIBBBLE_CLIENT_SECRET}
DISCORD_CLIENT_ID: ${DISCORD_CLIENT_ID}
DISCORD_CLIENT_SECRET: ${DISCORD_CLIENT_SECRET}
DISCORD_BOT_TOKEN_ID: ${DISCORD_BOT_TOKEN_ID}
SLACK_ID: ${SLACK_ID}
SLACK_SECRET: ${SLACK_SECRET}
SLACK_SIGNING_SECRET: ${SLACK_SIGNING_SECRET}
MASTODON_URL: ${MASTODON_URL}
MASTODON_CLIENT_ID: ${MASTODON_CLIENT_ID}
MASTODON_CLIENT_SECRET: ${MASTODON_CLIENT_SECRET}
volumes:
- postiz-config:/config/
- postiz-uploads:/uploads/
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
postgresus:
image: rostislavdugin/postgresus:latest
container_name: postgresus
profiles: ["postgresus"]
restart: unless-stopped
volumes:
- postgresus_data:/postgresus-data
comfyui:
image: yanwk/comfyui-boot:cu124-slim
container_name: comfyui
profiles: ["comfyui"]
restart: unless-stopped
environment:
CLI_ARGS: --listen 0.0.0.0 --cpu
volumes:
- comfyui_data:/home/runner
healthcheck:
test: ["CMD", "wget", "-qO", "/dev/null", "http://localhost:8188"]
interval: 10s
timeout: 5s
retries: 5
libretranslate:
image: libretranslate/libretranslate:latest
container_name: libretranslate
profiles: ["libretranslate"]
restart: unless-stopped
environment:
LT_API_KEYS: ${LT_API_KEYS:-false}
LT_BATCH_LIMIT: ${LT_BATCH_LIMIT:-}
LT_CHAR_LIMIT: ${LT_CHAR_LIMIT:-10000}
LT_DEBUG: ${LT_DEBUG:-false}
LT_FRONTEND_LANGUAGE_SOURCE: ${LT_FRONTEND_LANGUAGE_SOURCE:-auto}
LT_FRONTEND_LANGUAGE_TARGET: ${LT_FRONTEND_LANGUAGE_TARGET:-en}
LT_FRONTEND_TIMEOUT: ${LT_FRONTEND_TIMEOUT:-2000}
LT_HOST: ${LT_HOST:-0.0.0.0}
LT_LOAD_ONLY: ${LT_LOAD_ONLY:-}
LT_METRICS: ${LT_METRICS:-false}
LT_PORT: ${LT_PORT:-5000}
LT_REQ_LIMIT: ${LT_REQ_LIMIT:-}
LT_SSL: ${LT_SSL:-false}
LT_SUGGESTIONS: ${LT_SUGGESTIONS:-false}
LT_THREADS: ${LT_THREADS:-4}
LT_UPDATE_MODELS: ${LT_UPDATE_MODELS:-false}
volumes:
- libretranslate_api_keys:/app/db
- libretranslate_models:/home/libretranslate/.local:rw
healthcheck:
test: ["CMD-SHELL", "./venv/bin/python scripts/healthcheck.py"]
python-runner:
image: python:3.11-slim
container_name: python-runner
profiles: ["python-runner"]
restart: unless-stopped
working_dir: /app
command: /bin/sh -c 'if [ -f /app/requirements.txt ]; then python -m pip install --no-cache-dir -r /app/requirements.txt; fi; python /app/main.py'
volumes:
- ./python-runner:/app
paddleocr:
image: paddlepaddle/paddle:3.1.0
container_name: paddleocr
profiles: ["paddleocr"]
restart: unless-stopped
# python -m paddlex --get_pipeline_config PP-ChatOCRv4-doc --save_path /app; \
command: /bin/sh -c "set -e; \
python -m pip install --upgrade pip; \
python -m pip install --no-cache-dir 'paddlex[all,ocr,ie]'; \
python -m paddlex --install serving; \
exec python -m paddlex --serve --pipeline '/app/ocr_config.yml' --device cpu --port 8080"
volumes:
- ./paddlex/ocr_config.yml:/app/ocr_config.yml:ro
- paddleocr_cache:/root/.paddleocr
- paddle_cache:/root/.cache/paddle
- paddlex_data:/root/.paddlex
healthcheck:
test:
["CMD-SHELL", "wget -qO- http://localhost:8080 > /dev/null || exit 1"]
interval: 30s
timeout: 10s
retries: 5
waha:
image: devlikeapro/waha:latest
container_name: waha
profiles: ["waha"]
restart: unless-stopped
environment:
WAHA_ENGINE: ${WAHA_ENGINE}
WAHA_API_KEY: ${WAHA_API_KEY}
WAHA_DASHBOARD_USERNAME: ${WAHA_DASHBOARD_USERNAME}
WAHA_DASHBOARD_PASSWORD: ${WAHA_DASHBOARD_PASSWORD}
WHATSAPP_SWAGGER_USERNAME: ${WHATSAPP_SWAGGER_USERNAME}
WHATSAPP_SWAGGER_PASSWORD: ${WHATSAPP_SWAGGER_PASSWORD}
WAHA_DASHBOARD_ENABLED: ${WAHA_DASHBOARD_ENABLED:-true}
WHATSAPP_SWAGGER_ENABLED: ${WHATSAPP_SWAGGER_ENABLED:-true}
WAHA_BASE_URL: http://waha:3000
REDIS_URL: redis://redis:6379
WHATSAPP_SESSIONS_POSTGRESQL_URL: postgres://postgres:${POSTGRES_PASSWORD}@postgres:5432/postgres?sslmode=disable
depends_on:
redis:
condition: service_healthy
postgres:
condition: service_healthy
ragflow:
image: infiniflow/ragflow:latest
container_name: ragflow
profiles: ["ragflow"]
restart: unless-stopped
environment:
<<: *proxy-env
SVR_HTTP_PORT: 80
REDIS_HOST: ragflow-redis
REDIS_PORT: 6379
REDIS_PASSWORD: ${RAGFLOW_REDIS_PASSWORD}
MYSQL_HOST: ragflow-mysql
MYSQL_PORT: 3306
MYSQL_USER: root
MYSQL_PASSWORD: ${RAGFLOW_MYSQL_ROOT_PASSWORD}
MYSQL_DATABASE: rag_flow
DOC_ENGINE: elasticsearch
ES_HOST: ragflow-elasticsearch
ES_PORT: 9200
ELASTIC_PASSWORD: ${RAGFLOW_ELASTICSEARCH_PASSWORD}
MINIO_HOST: ragflow-minio
MINIO_PORT: 9000
MINIO_USER: minio
MINIO_PASSWORD: ${RAGFLOW_MINIO_ROOT_PASSWORD}
MINIO_BUCKET: ragflow
volumes:
- ragflow_data:/ragflow
- ./ragflow/nginx.conf:/etc/nginx/sites-available/default:ro
depends_on:
ragflow-elasticsearch:
condition: service_healthy
ragflow-mysql:
condition: service_healthy
ragflow-minio:
condition: service_healthy
ragflow-redis:
condition: service_healthy
ragflow-mysql:
image: mysql:8
container_name: ragflow-mysql
profiles: ["ragflow"]
restart: unless-stopped
environment:
MYSQL_ROOT_PASSWORD: ${RAGFLOW_MYSQL_ROOT_PASSWORD}
MYSQL_DATABASE: rag_flow
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -h localhost || exit 1"]
interval: 10s
timeout: 5s
retries: 10
start_period: 30s
volumes:
- ragflow_mysql_data:/var/lib/mysql
ragflow-minio:
image: minio/minio
container_name: ragflow-minio
profiles: ["ragflow"]
restart: unless-stopped
entrypoint: sh
command: -c 'mkdir -p /data/ragflow && minio server --address ":9000" --console-address ":9001" /data'
environment:
MINIO_ROOT_USER: minio
MINIO_ROOT_PASSWORD: ${RAGFLOW_MINIO_ROOT_PASSWORD}
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 10
start_period: 10s
volumes:
- ragflow_minio_data:/data
ragflow-redis:
image: docker.io/valkey/valkey:8-alpine
container_name: ragflow-redis
profiles: ["ragflow"]
restart: unless-stopped
command: valkey-server --save 30 1 --loglevel warning --requirepass ${RAGFLOW_REDIS_PASSWORD}
volumes:
- ragflow_redis_data:/data
cap_drop:
- ALL
cap_add:
- SETGID
- SETUID
- DAC_OVERRIDE
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "1"
healthcheck:
test: ["CMD", "valkey-cli", "-a", "${RAGFLOW_REDIS_PASSWORD}", "ping"]
interval: 3s
timeout: 10s
retries: 10
ragflow-elasticsearch:
image: elasticsearch:8.11.3
container_name: ragflow-elasticsearch
profiles: ["ragflow"]
restart: unless-stopped
environment:
node.name: ragflow-es01
discovery.type: single-node
ELASTIC_PASSWORD: ${RAGFLOW_ELASTICSEARCH_PASSWORD}
bootstrap.memory_lock: false
xpack.security.enabled: true
xpack.security.http.ssl.enabled: false
xpack.security.transport.ssl.enabled: false
cluster.routing.allocation.disk.watermark.low: 5gb
cluster.routing.allocation.disk.watermark.high: 3gb
cluster.routing.allocation.disk.watermark.flood_stage: 2gb
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
volumes:
- ragflow_elasticsearch_data:/usr/share/elasticsearch/data
healthcheck:
test:
[
"CMD-SHELL",
"curl -u elastic:${RAGFLOW_ELASTICSEARCH_PASSWORD} http://localhost:9200/_cluster/health || exit 1",
]
interval: 10s
timeout: 10s
retries: 120
start_period: 30s
deploy:
resources:
limits:
memory: 8g
lightrag:
image: ghcr.io/hkuds/lightrag:latest
container_name: lightrag
profiles: ["lightrag"]
restart: unless-stopped
environment:
<<: *proxy-env
# Server Configuration
HOST: 0.0.0.0
PORT: 9621
WEBUI_TITLE: LightRAG Knowledge Graph
WEBUI_DESCRIPTION: Graph-based RAG with Knowledge Extraction
# Authentication (Built-in)
AUTH_ACCOUNTS: ${LIGHTRAG_USERNAME}:${LIGHTRAG_PASSWORD}
LIGHTRAG_API_KEY: ${LIGHTRAG_API_KEY}
# LLM Configuration (Ollama)
LLM_BINDING: ollama
LLM_MODEL: qwen2.5:32b
LLM_BINDING_HOST: http://ollama:11434
OLLAMA_LLM_NUM_CTX: 32768
MAX_ASYNC: 4
# Embedding Configuration (Ollama)
EMBEDDING_BINDING: ollama
EMBEDDING_MODEL: bge-m3:latest
EMBEDDING_DIM: 1024
EMBEDDING_BINDING_HOST: http://ollama:11434
OLLAMA_EMBEDDING_NUM_CTX: 8192
EMBEDDING_FUNC_MAX_ASYNC: 16
EMBEDDING_BATCH_NUM: 32
# Query Configuration
ENABLE_LLM_CACHE: true
TOP_K: 60
CHUNK_TOP_K: 20
COSINE_THRESHOLD: 0.2
MAX_ENTITY_TOKENS: 6000
MAX_RELATION_TOKENS: 8000
MAX_TOTAL_TOKENS: 30000
# Document Processing
ENABLE_LLM_CACHE_FOR_EXTRACT: true
SUMMARY_LANGUAGE: English
CHUNK_SIZE: 1200
CHUNK_OVERLAP_SIZE: 100
SUMMARY_MAX_TOKENS: 500
SUMMARY_CONTEXT_SIZE: 10000
# Storage Configuration (Flexible - uses PostgreSQL/Neo4j if available)
LIGHTRAG_KV_STORAGE: JsonKVStorage
LIGHTRAG_DOC_STATUS_STORAGE: JsonDocStatusStorage
LIGHTRAG_GRAPH_STORAGE: ${LIGHTRAG_GRAPH_STORAGE:-NetworkXStorage}
LIGHTRAG_VECTOR_STORAGE: ${LIGHTRAG_VECTOR_STORAGE:-NanoVectorDBStorage}
# PostgreSQL Configuration (if using PostgreSQL storage)
POSTGRES_HOST: postgres
POSTGRES_PORT: 5432
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_DATABASE: postgres
POSTGRES_MAX_CONNECTIONS: 12
POSTGRES_VECTOR_INDEX_TYPE: HNSW
POSTGRES_HNSW_M: 16
POSTGRES_HNSW_EF: 200
# Neo4j Configuration (if using Neo4j storage)
NEO4J_URI: bolt://neo4j:7687
NEO4J_USERNAME: ${NEO4J_AUTH_USERNAME:-neo4j}
NEO4J_PASSWORD: ${NEO4J_AUTH_PASSWORD}
NEO4J_DATABASE: neo4j
# Directories
INPUT_DIR: /app/data/inputs
WORKING_DIR: /app/data/rag_storage
volumes:
- lightrag_data:/app/data/rag_storage
- lightrag_inputs:/app/data/inputs
extra_hosts:
- "host.docker.internal:host-gateway"
healthcheck:
test:
[
"CMD-SHELL",
"wget -qO- http://localhost:9621/health > /dev/null 2>&1 || exit 1",
]
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
docling:
image: ${DOCLING_IMAGE:-ghcr.io/docling-project/docling-serve-cpu}
container_name: docling
profiles: ["docling"]
restart: unless-stopped
environment:
<<: *proxy-env
DOCLING_SERVE_ENABLE_UI: 1
DOCLING_SERVE_ENABLE_REMOTE_SERVICES: ${DOCLING_SERVE_ENABLE_REMOTE_SERVICES:-true}
DOCLING_SERVE_LOAD_MODELS_AT_BOOT: ${DOCLING_SERVE_LOAD_MODELS_AT_BOOT:-false}
DOCLING_DEVICE: ${DOCLING_DEVICE:-cpu}
volumes:
- docling_cache:/opt/app-root/src/.cache
shm_size: 1g
healthcheck:
test:
[
"CMD-SHELL",
"wget -qO- http://localhost:5001/docs > /dev/null || exit 1",
]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s