diff --git a/README.md b/README.md
index 6823d09..9c2766d 100644
--- a/README.md
+++ b/README.md
@@ -78,8 +78,24 @@ cd self-hosted-ai-starter-kit
docker compose up
```
-After you followed the quick start set-up below, change the Ollama credentials
-by using `http://host.docker.internal:11434/` as the host.
+##### For Mac users running OLLAMA locally
+
+If you're running OLLAMA locally on your Mac (not in Docker), you need to modify the OLLAMA_HOST environment variable
+in the n8n service configuration. Update the x-n8n section in your Docker Compose file as follows:
+
+```yaml
+x-n8n: &service-n8n
+ # ... other configurations ...
+ environment:
+ # ... other environment variables ...
+ - OLLAMA_HOST=host.docker.internal:11434
+```
+
+Additionally, after you see "Editor is now accessible via: ":
+
+1. Head to
+2. Click on "Local Ollama service"
+3. Change the base URL to "http://host.docker.internal:11434/"
#### For everyone else
diff --git a/docker-compose.yml b/docker-compose.yml
index 0bb3207..6aae070 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -19,6 +19,7 @@ x-n8n: &service-n8n
- N8N_PERSONALIZATION_ENABLED=false
- N8N_ENCRYPTION_KEY
- N8N_USER_MANAGEMENT_JWT_SECRET
+ - OLLAMA_HOST=ollama:11434
links:
- postgres
@@ -41,7 +42,7 @@ x-init-ollama: &init-ollama
entrypoint: /bin/sh
command:
- "-c"
- - "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull llama3.2"
+ - "sleep 3; ollama pull llama3.2"
services:
postgres: