diff --git a/README.md b/README.md index 4c69cd1..6a8ba62 100644 --- a/README.md +++ b/README.md @@ -9,9 +9,14 @@ This repo helps quickly bootstrap an n8n demo environment using docker-compose. ### Setup - Clone this repo - **Optionally** edit the credentials in the `.env` file -- If you have an Nvidia GPU, run `docker compose --profile gpu-nvidia up` -- Otherwise to run inference services on your CPU, run `docker compose --profile cpu up` -- Wait a couple of minutes for all the containers to become healthy. +- Start the containers: + - If you have an Nvidia GPU, run `docker compose --profile gpu-nvidia up` + - Otherwise to run inference services on your CPU, run `docker compose --profile cpu up` +- Wait a couple of minutes for all the containers to become healthy +- Open http://localhost:5678 in your browser and fill in the details +- Open the included workflow: http://localhost:5678/workflow/srOnR8PAY3u4RSwb +- Wait until Ollama has downloaded the `llama3.1` model (you can check the + docker console) ### Included service endpoints - [n8n](http://localhost:5678/) diff --git a/docker-compose.yml b/docker-compose.yml index 07803e4..383d574 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,6 +32,17 @@ x-ollama: &service-ollama volumes: - ollama_storage:/root/.ollama +x-init-ollama: &init-ollama + image: ollama/ollama:latest + networks: ['demo'] + container_name: ollama-pull-llama + volumes: + - ollama_storage:/root/.ollama + entrypoint: /bin/sh + command: + - "-c" + - "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull llama3.1" + services: postgres: image: postgres:16-alpine @@ -101,3 +112,15 @@ services: - driver: nvidia count: 1 capabilities: [gpu] + + ollama-pull-llama-cpu: + profiles: ["cpu"] + <<: *init-ollama + depends_on: + - ollama-cpu + + ollama-pull-llama-gpu: + profiles: ["gpu-nvidia"] + <<: *init-ollama + depends_on: + - ollama-gpu diff --git a/n8n/backup/workflows/srOnR8PAY3u4RSwb.json b/n8n/backup/workflows/srOnR8PAY3u4RSwb.json index 26fc79d..5a8e5b3 100644 --- a/n8n/backup/workflows/srOnR8PAY3u4RSwb.json +++ b/n8n/backup/workflows/srOnR8PAY3u4RSwb.json @@ -30,6 +30,7 @@ }, { "parameters": { + "model": "llama3.1:latest", "options": {} }, "id": "3dee878b-d748-4829-ac0a-cfd6705d31e5",