support CPU only setup as well

This commit is contained in:
कारतोफ्फेलस्क्रिप्ट™
2024-02-23 17:51:33 +01:00
parent 0c8e983591
commit ab4f98debb
2 changed files with 52 additions and 36 deletions

View File

@@ -9,7 +9,9 @@ This repo helps quickly bootstrap an n8n demo environment using docker-compose.
### Setup ### Setup
- Clone this repo - Clone this repo
- **Optionally** edit the credentials in the `.env` file - **Optionally** edit the credentials in the `.env` file
- Run `docker compose up -d`, and wait a couple of minutes for all the containers to become healthy. - If you have an Nvidia GPU, run `docker compose --profile gpu-nvidia up`
- Otherwise to run inference services on your CPU, run `docker compose --profile cpu up`
- Wait a couple of minutes for all the containers to become healthy.
### Included service endpoints ### Included service endpoints
- [n8n](http://localhost:5678/) - [n8n](http://localhost:5678/)

View File

@@ -9,6 +9,37 @@ volumes:
networks: networks:
n8n: n8n:
x-ollama: &service-ollama
image: ollama/ollama:latest
container_name: ollama
networks: ['n8n']
restart: unless-stopped
ports:
- 11434:11434
volumes:
- ollama_storage:/root/.ollama
x-infinity: &service-infinity
image: michaelf34/infinity
container_name: infinity
networks: ['n8n']
restart: unless-stopped
environment:
- SENTENCE_TRANSFORMERS_HOME=/infinity
- MODEL_ID=BAAI/bge-small-en-v1.5
ports:
- 7997:7997
volumes:
- infinity_storage:/infinity
x-gpu-support: &deploy-gpu
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
services: services:
postgres: postgres:
image: postgres:16-alpine image: postgres:16-alpine
@@ -55,23 +86,6 @@ services:
postgres: postgres:
condition: service_healthy condition: service_healthy
ollama:
image: ollama/ollama:latest
container_name: ollama
networks: ['n8n']
restart: unless-stopped
ports:
- 11434:11434
volumes:
- ollama_storage:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
qdrant: qdrant:
image: qdrant/qdrant image: qdrant/qdrant
container_name: qdrant container_name: qdrant
@@ -82,22 +96,22 @@ services:
volumes: volumes:
- qdrant_storage:/qdrant/storage - qdrant_storage:/qdrant/storage
infinity: ollama-cpu:
image: michaelf34/infinity profiles: ["cpu"]
container_name: infinity <<: *service-ollama
networks: ['n8n']
restart: unless-stopped infinity-cpu:
environment: profiles: ["cpu"]
- SENTENCE_TRANSFORMERS_HOME=/infinity <<: *service-infinity
- MODEL_ID=BAAI/bge-small-en-v1.5
ports: ollama-gpu:
- 7997:7997 profiles: ["gpu-nvidia"]
volumes: <<: *service-ollama
- infinity_storage:/infinity
deploy: deploy:
resources: <<: *deploy-gpu
reservations:
devices: infinity-gpu:
- driver: nvidia profiles: ["gpu-nvidia"]
count: 1 <<: *service-infinity
capabilities: [gpu] deploy:
<<: *deploy-gpu