diff --git a/.env b/.env
deleted file mode 100644
index 49be06f..0000000
--- a/.env
+++ /dev/null
@@ -1,7 +0,0 @@
-POSTGRES_USER=root
-POSTGRES_PASSWORD=password
-POSTGRES_DB=n8n
-
-N8N_ENCRYPTION_KEY=super-secret-key
-N8N_USER_MANAGEMENT_JWT_SECRET=even-more-secret
-N8N_DEFAULT_BINARY_DATA_MODE=filesystem
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..293d39c
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,13 @@
+POSTGRES_USER=root
+POSTGRES_PASSWORD=password
+POSTGRES_DB=n8n
+
+N8N_ENCRYPTION_KEY=super-secret-key
+N8N_USER_MANAGEMENT_JWT_SECRET=even-more-secret
+N8N_DEFAULT_BINARY_DATA_MODE=filesystem
+
+# For Mac users running OLLAMA locally
+# See https://github.com/n8n-io/self-hosted-ai-starter-kit?tab=readme-ov-file#for-mac--apple-silicon-users
+# OLLAMA_HOST=host.docker.internal:11434
+
+
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..4c49bd7
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+.env
diff --git a/README.md b/README.md
index c90c1b4..830023e 100644
--- a/README.md
+++ b/README.md
@@ -42,15 +42,17 @@ Engineering world, handles large amounts of data safely.
```bash
git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
cd self-hosted-ai-starter-kit
+cp .env.example .env # you should update secrets and passwords inside
```
### Running n8n using Docker Compose
#### For Nvidia GPU users
-```
+```bash
git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
cd self-hosted-ai-starter-kit
+cp .env.example .env # you should update secrets and passwords inside
docker compose --profile gpu-nvidia up
```
@@ -60,9 +62,10 @@ docker compose --profile gpu-nvidia up
### For AMD GPU users on Linux
-```
+```bash
git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
cd self-hosted-ai-starter-kit
+cp .env.example .env # you should update secrets and passwords inside
docker compose --profile gpu-amd up
```
@@ -80,36 +83,30 @@ If you want to run Ollama on your mac, check the
[Ollama homepage](https://ollama.com/)
for installation instructions, and run the starter kit as follows:
-```
+```bash
git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
cd self-hosted-ai-starter-kit
+cp .env.example .env # you should update secrets and passwords inside
docker compose up
```
##### For Mac users running OLLAMA locally
If you're running OLLAMA locally on your Mac (not in Docker), you need to modify the OLLAMA_HOST environment variable
-in the n8n service configuration. Update the x-n8n section in your Docker Compose file as follows:
-```yaml
-x-n8n: &service-n8n
- # ... other configurations ...
- environment:
- # ... other environment variables ...
- - OLLAMA_HOST=host.docker.internal:11434
-```
+1. Set OLLAMA_HOST to `host.docker.internal:11434` in your .env file.
+2. Additionally, after you see "Editor is now accessible via: ":
-Additionally, after you see "Editor is now accessible via: ":
-
-1. Head to
-2. Click on "Local Ollama service"
-3. Change the base URL to "http://host.docker.internal:11434/"
+ 1. Head to
+ 2. Click on "Local Ollama service"
+ 3. Change the base URL to "http://host.docker.internal:11434/"
#### For everyone else
-```
+```bash
git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
cd self-hosted-ai-starter-kit
+cp .env.example .env # you should update secrets and passwords inside
docker compose --profile cpu up
```
@@ -154,7 +151,7 @@ docker compose create && docker compose --profile gpu-nvidia up
* ### For Mac / Apple Silicon users
-```
+```bash
docker compose pull
docker compose create && docker compose up
```
diff --git a/docker-compose.yml b/docker-compose.yml
index b41f833..82aa1a2 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -19,9 +19,10 @@ x-n8n: &service-n8n
- N8N_PERSONALIZATION_ENABLED=false
- N8N_ENCRYPTION_KEY
- N8N_USER_MANAGEMENT_JWT_SECRET
- - OLLAMA_HOST=ollama:11434
+ - OLLAMA_HOST=${OLLAMA_HOST:-ollama:11434}
env_file:
- - .env
+ - path: .env
+ required: true
x-ollama: &service-ollama
image: ollama/ollama:latest