mirror of
https://github.com/n8n-io/self-hosted-ai-starter-kit.git
synced 2025-11-29 00:23:13 +00:00
Add Ollama init container (#1)
This commit is contained in:
11
README.md
11
README.md
@@ -9,9 +9,14 @@ This repo helps quickly bootstrap an n8n demo environment using docker-compose.
|
||||
### Setup
|
||||
- Clone this repo
|
||||
- **Optionally** edit the credentials in the `.env` file
|
||||
- If you have an Nvidia GPU, run `docker compose --profile gpu-nvidia up`
|
||||
- Otherwise to run inference services on your CPU, run `docker compose --profile cpu up`
|
||||
- Wait a couple of minutes for all the containers to become healthy.
|
||||
- Start the containers:
|
||||
- If you have an Nvidia GPU, run `docker compose --profile gpu-nvidia up`
|
||||
- Otherwise to run inference services on your CPU, run `docker compose --profile cpu up`
|
||||
- Wait a couple of minutes for all the containers to become healthy
|
||||
- Open http://localhost:5678 in your browser and fill in the details
|
||||
- Open the included workflow: http://localhost:5678/workflow/srOnR8PAY3u4RSwb
|
||||
- Wait until Ollama has downloaded the `llama3.1` model (you can check the
|
||||
docker console)
|
||||
|
||||
### Included service endpoints
|
||||
- [n8n](http://localhost:5678/)
|
||||
|
||||
@@ -32,6 +32,17 @@ x-ollama: &service-ollama
|
||||
volumes:
|
||||
- ollama_storage:/root/.ollama
|
||||
|
||||
x-init-ollama: &init-ollama
|
||||
image: ollama/ollama:latest
|
||||
networks: ['demo']
|
||||
container_name: ollama-pull-llama
|
||||
volumes:
|
||||
- ollama_storage:/root/.ollama
|
||||
entrypoint: /bin/sh
|
||||
command:
|
||||
- "-c"
|
||||
- "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull llama3.1"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
@@ -101,3 +112,15 @@ services:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
|
||||
ollama-pull-llama-cpu:
|
||||
profiles: ["cpu"]
|
||||
<<: *init-ollama
|
||||
depends_on:
|
||||
- ollama-cpu
|
||||
|
||||
ollama-pull-llama-gpu:
|
||||
profiles: ["gpu-nvidia"]
|
||||
<<: *init-ollama
|
||||
depends_on:
|
||||
- ollama-gpu
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
},
|
||||
{
|
||||
"parameters": {
|
||||
"model": "llama3.1:latest",
|
||||
"options": {}
|
||||
},
|
||||
"id": "3dee878b-d748-4829-ac0a-cfd6705d31e5",
|
||||
|
||||
Reference in New Issue
Block a user