mirror of
https://github.com/n8n-io/self-hosted-ai-starter-kit.git
synced 2025-11-29 00:23:13 +00:00
support CPU only setup as well
This commit is contained in:
@@ -9,7 +9,9 @@ This repo helps quickly bootstrap an n8n demo environment using docker-compose.
|
||||
### Setup
|
||||
- Clone this repo
|
||||
- **Optionally** edit the credentials in the `.env` file
|
||||
- Run `docker compose up -d`, and wait a couple of minutes for all the containers to become healthy.
|
||||
- If you have an Nvidia GPU, run `docker compose --profile gpu-nvidia up`
|
||||
- Otherwise to run inference services on your CPU, run `docker compose --profile cpu up`
|
||||
- Wait a couple of minutes for all the containers to become healthy.
|
||||
|
||||
### Included service endpoints
|
||||
- [n8n](http://localhost:5678/)
|
||||
|
||||
@@ -9,6 +9,37 @@ volumes:
|
||||
networks:
|
||||
n8n:
|
||||
|
||||
x-ollama: &service-ollama
|
||||
image: ollama/ollama:latest
|
||||
container_name: ollama
|
||||
networks: ['n8n']
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 11434:11434
|
||||
volumes:
|
||||
- ollama_storage:/root/.ollama
|
||||
|
||||
x-infinity: &service-infinity
|
||||
image: michaelf34/infinity
|
||||
container_name: infinity
|
||||
networks: ['n8n']
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- SENTENCE_TRANSFORMERS_HOME=/infinity
|
||||
- MODEL_ID=BAAI/bge-small-en-v1.5
|
||||
ports:
|
||||
- 7997:7997
|
||||
volumes:
|
||||
- infinity_storage:/infinity
|
||||
|
||||
x-gpu-support: &deploy-gpu
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
@@ -55,23 +86,6 @@ services:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
container_name: ollama
|
||||
networks: ['n8n']
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 11434:11434
|
||||
volumes:
|
||||
- ollama_storage:/root/.ollama
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
|
||||
qdrant:
|
||||
image: qdrant/qdrant
|
||||
container_name: qdrant
|
||||
@@ -82,22 +96,22 @@ services:
|
||||
volumes:
|
||||
- qdrant_storage:/qdrant/storage
|
||||
|
||||
infinity:
|
||||
image: michaelf34/infinity
|
||||
container_name: infinity
|
||||
networks: ['n8n']
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- SENTENCE_TRANSFORMERS_HOME=/infinity
|
||||
- MODEL_ID=BAAI/bge-small-en-v1.5
|
||||
ports:
|
||||
- 7997:7997
|
||||
volumes:
|
||||
- infinity_storage:/infinity
|
||||
ollama-cpu:
|
||||
profiles: ["cpu"]
|
||||
<<: *service-ollama
|
||||
|
||||
infinity-cpu:
|
||||
profiles: ["cpu"]
|
||||
<<: *service-infinity
|
||||
|
||||
ollama-gpu:
|
||||
profiles: ["gpu-nvidia"]
|
||||
<<: *service-ollama
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
<<: *deploy-gpu
|
||||
|
||||
infinity-gpu:
|
||||
profiles: ["gpu-nvidia"]
|
||||
<<: *service-infinity
|
||||
deploy:
|
||||
<<: *deploy-gpu
|
||||
|
||||
Reference in New Issue
Block a user