Small doc fixes

This commit is contained in:
Cole Medin
2025-06-11 09:20:44 -05:00
parent 02f7b94a03
commit 11667bcf4b
3 changed files with 9 additions and 7 deletions

View File

@@ -1,8 +1,11 @@
# Change the name of this file to .env after updating it!
############
# [required]
# n8n credentials - you set this to whatever you want, just make it a long and secure string for both!
# [required]
# n8n credentials - use the command `openssl rand -hex 32` to generate both
# openssl is available by default on Linux/Mac
# For Windows, you can use the 'Git Bash' terminal installed with git
# Or run the command: python -c "import secrets; print(secrets.token_hex(32))"
############
N8N_ENCRYPTION_KEY=super-secret-key

View File

@@ -235,7 +235,7 @@ to the IP address of your cloud instance.
**NOTE**: If you are using a cloud machine without the "docker compose" command available by default, such as a Ubuntu GPU instance on DigitalOcean, run these commands before running start_services.py:
- DOCKER_COMPOSE_VERSION=$(curl -s https://api.github.com/repos/docker/compose/releases/latest | grep 'tag_name' | cut -d\" -f4)
- DOCKER_COMPOSE_VERSION=$(curl -s https://api.github.com/repos/docker/compose/releases/latest | grep 'tag_name' | cut -d\\" -f4)
- sudo curl -L "https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-x86_64" -o /usr/local/bin/docker-compose
- sudo chmod +x /usr/local/bin/docker-compose
- sudo mkdir -p /usr/local/lib/docker/cli-plugins

View File

@@ -37,6 +37,9 @@ x-ollama: &service-ollama
- 11434/tcp
environment:
- OLLAMA_CONTEXT_LENGTH=8192
- OLLAMA_FLASH_ATTENTION=1
- OLLAMA_KV_CACHE_TYPE=q8_0
- OLLAMA_MAX_LOADED_MODELS=2
volumes:
- ollama_storage:/root/.ollama
@@ -49,10 +52,6 @@ x-init-ollama: &init-ollama
command:
- "-c"
- "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull qwen2.5:7b-instruct-q4_K_M; OLLAMA_HOST=ollama:11434 ollama pull nomic-embed-text"
# For a larger context length verison of the model, run these commands:
# echo "FROM qwen2.5:7b-instruct-q4_K_M\n\nPARAMETER num_ctx 8096" > Modelfile
# ollama create qwen2.5:7b-8k -f ./Modelfile
# Change the name of the LLM and num_ctx as you see fit.
services:
flowise: