mirror of
https://github.com/kossakovsky/n8n-install.git
synced 2026-03-21 16:31:03 +00:00
Add Ollama service configuration to docker-compose.yml and update related files
- Introduced Ollama service with CPU and GPU profiles in docker-compose.yml, allowing users to run large language models locally. - Added Ollama selection option in the wizard script for hardware profile configuration. - Updated README.md to include Ollama as a new available service. - Adjusted .env.example to include GRAFANA_HOSTNAME in the correct position.
This commit is contained in:
@@ -103,8 +103,8 @@ WEBUI_HOSTNAME=webui.yourdomain.com
|
||||
FLOWISE_HOSTNAME=flowise.yourdomain.com
|
||||
SUPABASE_HOSTNAME=supabase.yourdomain.com
|
||||
LANGFUSE_HOSTNAME=langfuse.yourdomain.com
|
||||
GRAFANA_HOSTNAME=grafana.yourdomain.com
|
||||
SEARXNG_HOSTNAME=searxng.yourdomain.com
|
||||
GRAFANA_HOSTNAME=grafana.yourdomain.com
|
||||
PROMETHEUS_HOSTNAME=prometheus.yourdomain.com
|
||||
LETTA_HOSTNAME=letta.yourdomain.com
|
||||
LETSENCRYPT_EMAIL=
|
||||
|
||||
@@ -42,6 +42,8 @@ The installer also makes the following powerful open-source tools **available fo
|
||||
|
||||
✅ [**Letta**](https://docs.letta.com/) - An open-source agent server and SDK that can be connected to various LLM API backends (OpenAI, Anthropic, Ollama, etc.), enabling you to build and manage AI agents.
|
||||
|
||||
✅ [**Ollama**](https://ollama.com/) - Run Llama 3, Mistral, Gemma, and other large language models locally.
|
||||
|
||||
✅ [**Prometheus**](https://prometheus.io/) - An open-source monitoring and alerting toolkit to keep an eye on system health.
|
||||
|
||||
✅ [**Grafana**](https://grafana.com/) - An open-source platform for visualizing monitoring data, helping you understand system performance at a glance.
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
volumes:
|
||||
n8n_storage:
|
||||
ollama_storage:
|
||||
qdrant_storage:
|
||||
open-webui:
|
||||
flowise:
|
||||
@@ -39,6 +40,29 @@ x-n8n: &service-n8n
|
||||
NODE_FUNCTION_ALLOW_BUILTIN: "*"
|
||||
NODE_FUNCTION_ALLOW_EXTERNAL: cheerio,axios,moment,lodash
|
||||
|
||||
x-ollama: &service-ollama
|
||||
image: ollama/ollama:latest
|
||||
container_name: ollama
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- OLLAMA_CONTEXT_LENGTH=8192
|
||||
volumes:
|
||||
- ollama_storage:/root/.ollama
|
||||
|
||||
x-init-ollama: &init-ollama
|
||||
image: ollama/ollama:latest
|
||||
container_name: ollama-pull-llama
|
||||
volumes:
|
||||
- ollama_storage:/root/.ollama
|
||||
entrypoint: /bin/sh
|
||||
command:
|
||||
- "-c"
|
||||
- "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull qwen2.5:7b-instruct-q4_K_M; OLLAMA_HOST=ollama:11434 ollama pull nomic-embed-text"
|
||||
# For a larger context length verison of the model, run these commands:
|
||||
# echo "FROM qwen2.5:7b-instruct-q4_K_M\n\nPARAMETER num_ctx 8096" > Modelfile
|
||||
# ollama create qwen2.5:7b-8k -f ./Modelfile
|
||||
# Change the name of the LLM and num_ctx as you see fit.
|
||||
|
||||
services:
|
||||
flowise:
|
||||
image: flowiseai/flowise
|
||||
@@ -334,6 +358,48 @@ services:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
|
||||
ollama-cpu:
|
||||
profiles: ["cpu"]
|
||||
<<: *service-ollama
|
||||
|
||||
ollama-gpu:
|
||||
profiles: ["gpu-nvidia"]
|
||||
<<: *service-ollama
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
|
||||
ollama-gpu-amd:
|
||||
profiles: ["gpu-amd"]
|
||||
<<: *service-ollama
|
||||
image: ollama/ollama:rocm
|
||||
devices:
|
||||
- "/dev/kfd"
|
||||
- "/dev/dri"
|
||||
|
||||
ollama-pull-llama-cpu:
|
||||
profiles: ["cpu"]
|
||||
<<: *init-ollama
|
||||
depends_on:
|
||||
- ollama-cpu
|
||||
|
||||
ollama-pull-llama-gpu:
|
||||
profiles: ["gpu-nvidia"]
|
||||
<<: *init-ollama
|
||||
depends_on:
|
||||
- ollama-gpu
|
||||
|
||||
ollama-pull-llama-gpu-amd:
|
||||
profiles: [gpu-amd]
|
||||
<<: *init-ollama
|
||||
image: ollama/ollama:rocm
|
||||
depends_on:
|
||||
- ollama-gpu-amd
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
container_name: prometheus
|
||||
|
||||
@@ -48,6 +48,7 @@ services=(
|
||||
"searxng" "SearXNG (Private Metasearch Engine)" "OFF"
|
||||
"crawl4ai" "Crawl4ai (Web Crawler for AI)" "OFF"
|
||||
"letta" "Letta (Agent Server & SDK)" "OFF"
|
||||
"ollama" "Ollama (Local LLM Runner - select hardware in next step)" "OFF"
|
||||
)
|
||||
|
||||
# Use whiptail to display the checklist
|
||||
@@ -83,10 +84,47 @@ fi
|
||||
|
||||
# Process selected services
|
||||
selected_profiles=()
|
||||
ollama_selected=0
|
||||
ollama_profile=""
|
||||
|
||||
if [ -n "$CHOICES" ]; then
|
||||
# Whiptail returns a string like "tag1" "tag2" "tag3"
|
||||
# We need to remove quotes and convert to an array
|
||||
eval "selected_profiles=($CHOICES)"
|
||||
temp_choices=()
|
||||
eval "temp_choices=($CHOICES)"
|
||||
|
||||
for choice in "${temp_choices[@]}"; do
|
||||
if [ "$choice" == "ollama" ]; then
|
||||
ollama_selected=1
|
||||
else
|
||||
selected_profiles+=("$choice")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# If Ollama was selected, prompt for the hardware profile
|
||||
if [ $ollama_selected -eq 1 ]; then
|
||||
ollama_hardware_options=(
|
||||
"cpu" "CPU (Recommended for most users)" "ON"
|
||||
"gpu-nvidia" "NVIDIA GPU (Requires NVIDIA drivers & CUDA)" "OFF"
|
||||
"gpu-amd" "AMD GPU (Requires ROCm drivers)" "OFF"
|
||||
)
|
||||
CHOSEN_OLLAMA_PROFILE=$(whiptail --title "Ollama Hardware Profile" --radiolist \
|
||||
"Choose the hardware profile for Ollama. This will be added to your Docker Compose profiles." 15 78 3 \
|
||||
"${ollama_hardware_options[@]}" \
|
||||
3>&1 1>&2 2>&3)
|
||||
|
||||
ollama_exitstatus=$?
|
||||
if [ $ollama_exitstatus -eq 0 ] && [ -n "$CHOSEN_OLLAMA_PROFILE" ]; then
|
||||
selected_profiles+=("$CHOSEN_OLLAMA_PROFILE")
|
||||
ollama_profile="$CHOSEN_OLLAMA_PROFILE" # Store for user message
|
||||
echo "INFO: Ollama hardware profile selected: $CHOSEN_OLLAMA_PROFILE"
|
||||
else
|
||||
echo "INFO: Ollama hardware profile selection cancelled or no choice made. Ollama will not be configured with a specific hardware profile."
|
||||
# ollama_selected remains 1, but no specific profile is added.
|
||||
# This means "ollama" won't be in COMPOSE_PROFILES unless a hardware profile is chosen.
|
||||
ollama_selected=0 # Mark as not fully selected if profile choice is cancelled
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "--------------------------------------------------------------------"
|
||||
@@ -98,7 +136,16 @@ else
|
||||
# Join the array into a comma-separated string
|
||||
COMPOSE_PROFILES_VALUE=$(IFS=,; echo "${selected_profiles[*]}")
|
||||
for profile in "${selected_profiles[@]}"; do
|
||||
echo " - $profile"
|
||||
# Check if the curr
|
||||
if [ "$profile" == "cpu" ] || [ "$profile" == "gpu-nvidia" ] || [ "$profile" == "gpu-amd" ]; then
|
||||
if [ "$profile" == "$ollama_profile" ]; then # Make sure this is the ollama profile we just selected
|
||||
echo " - Ollama ($profile profile)"
|
||||
else # It could be another service that happens to be named "cpu" if we add one later
|
||||
echo " - $profile"
|
||||
fi
|
||||
else
|
||||
echo " - $profile"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
echo "--------------------------------------------------------------------"
|
||||
|
||||
Reference in New Issue
Block a user