refactor: consolidate shared utilities and add script documentation

- move common functions to utils.sh: init_paths, read_env_var, write_env_var,
  is_profile_active, load_env, gen_password, gen_hex, gen_base64, generate_bcrypt_hash
- add documentation headers to all installation scripts
- replace duplicate code with shared utility calls
- consolidate bcrypt hash generation loop in 03_generate_secrets.sh
- add DEBIAN_FRONTEND save/restore helpers for whiptail scripts
- standardize path initialization across all scripts
This commit is contained in:
Yury Kossakovsky
2025-12-12 09:58:12 -07:00
parent e297ff27ef
commit e0018f2b2d
16 changed files with 663 additions and 607 deletions

View File

@@ -1,9 +1,22 @@
#!/bin/bash
# =============================================================================
# 01_system_preparation.sh - System preparation and security hardening
# =============================================================================
# Prepares an Ubuntu/Debian system for running Docker services:
# - Updates system packages and installs essential CLI tools
# - Configures UFW firewall (allows SSH, HTTP, HTTPS; denies other incoming)
# - Enables Fail2Ban for SSH brute-force protection
# - Sets up automatic security updates via unattended-upgrades
# - Configures vm.max_map_count for Elasticsearch (required by RAGFlow)
#
# Required: Must be run as root (sudo)
# =============================================================================
set -e
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
init_paths
export DEBIAN_FRONTEND=noninteractive

View File

@@ -1,4 +1,16 @@
#!/bin/bash
# =============================================================================
# 02_install_docker.sh - Docker and Docker Compose installation
# =============================================================================
# Installs Docker Engine and Docker Compose plugin from the official repository:
# - Skips installation if Docker is already present
# - Adds Docker's official GPG key and APT repository
# - Installs docker-ce, docker-ce-cli, containerd.io, and compose plugin
# - Adds the invoking user to the 'docker' group
# - Includes retry logic for apt commands (handles lock contention)
#
# Required: Must be run as root (sudo) on Ubuntu
# =============================================================================
set -e
@@ -11,41 +23,41 @@ export DEBIAN_FRONTEND=noninteractive
APT_OPTIONS="-o Dpkg::Options::=--force-confold -o Dpkg::Options::=--force-confdef -y"
log_info "Preparing Docker installation..."
# Configuration for apt retry logic
APT_RETRY_COUNT=10
APT_RETRY_WAIT=10
# Function to run apt commands with retries for lock acquisition
run_apt_with_retry() {
local cmd_str="$*" # Capture command as a string for logging
local retries=10
local wait_time=10 # seconds
local cmd_str="$*"
for ((i=1; i<=retries; i++)); do
# Check for dpkg locks using fuser. Redirect stderr to /dev/null
for ((i=1; i<=APT_RETRY_COUNT; i++)); do
# Check for dpkg locks using fuser
if fuser /var/lib/dpkg/lock >/dev/null 2>&1 || fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1; then
sleep $wait_time
sleep $APT_RETRY_WAIT
continue
fi
# Check for apt locks using fuser
if fuser /var/lib/apt/lists/lock >/dev/null 2>&1 || fuser /var/cache/apt/archives/lock >/dev/null 2>&1; then
sleep $wait_time
continue
sleep $APT_RETRY_WAIT
continue
fi
# No lock detected, attempt the command
# Use eval to correctly handle arguments with spaces/quotes passed as a single string
if eval apt-get "$@"; then
return 0 # Success
# No lock detected, attempt the command (safe argument passing without eval)
if apt-get "$@"; then
return 0
else
local exit_code=$?
if [ $i -lt $retries ]; then
sleep $wait_time
if [ $i -lt $APT_RETRY_COUNT ]; then
sleep $APT_RETRY_WAIT
else
# Attempt to remove locks if they exist and seem stale? Maybe too risky.
return $exit_code # Failed after retries
return $exit_code
fi
fi
done
log_message "Failed to acquire lock or run command after $retries attempts: apt-get $cmd_str"
return 1 # Failed after retries
log_error "Failed to acquire lock or run command after $APT_RETRY_COUNT attempts: apt-get $cmd_str"
return 1
}

View File

@@ -1,22 +1,43 @@
#!/bin/bash
# =============================================================================
# 03_generate_secrets.sh - Secret and configuration generator
# =============================================================================
# Generates secure passwords, JWT secrets, API keys, and encryption keys for
# all services. Creates the .env file from .env.example template.
#
# Features:
# - Generates cryptographically secure random values (passwords, secrets, keys)
# - Creates bcrypt hashes for Caddy basic auth using `caddy hash-password`
# - Preserves existing user-provided values in .env on re-run
# - Supports --update flag to add new variables without regenerating existing
# - Prompts for domain name and Let's Encrypt email
#
# Secret types: password (alphanum), secret (base64), hex, api_key, jwt
#
# Usage: bash scripts/03_generate_secrets.sh [--update]
# =============================================================================
set -e
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
init_paths
# Setup cleanup for temporary files
TEMP_FILES=()
cleanup_temp_files() {
for f in "${TEMP_FILES[@]}"; do
rm -f "$f" 2>/dev/null
done
}
trap cleanup_temp_files EXIT
# Check for openssl
if ! command -v openssl &> /dev/null; then
log_error "openssl could not be found. Please ensure it is installed and available in your PATH." >&2
exit 1
fi
require_command "openssl" "Please ensure openssl is installed and available in your PATH."
# --- Configuration ---
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )"
TEMPLATE_FILE="$PROJECT_ROOT/.env.example"
OUTPUT_FILE="$PROJECT_ROOT/.env"
DOMAIN_PLACEHOLDER="yourdomain.com"
# Variables to generate: varName="type:length"
# Types: password (alphanum), secret (base64), hex, base64, alphanum
@@ -102,10 +123,7 @@ curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | tee /
apt install -y caddy
# Check for caddy
if ! command -v caddy &> /dev/null; then
log_error "caddy could not be found. Please ensure it is installed and available in your PATH." >&2
exit 1
fi
require_command "caddy" "Caddy installation failed. Please check the installation logs above."
require_whiptail
# Prompt for the domain name
@@ -113,7 +131,7 @@ DOMAIN="" # Initialize DOMAIN variable
# Try to get domain from existing .env file first
# Check if USER_DOMAIN_NAME is set in existing_env_vars and is not empty
if [[ -v existing_env_vars[USER_DOMAIN_NAME] && -n "${existing_env_vars[USER_DOMAIN_NAME]}" ]]; then
if [[ ${existing_env_vars[USER_DOMAIN_NAME]+_} && -n "${existing_env_vars[USER_DOMAIN_NAME]}" ]]; then
DOMAIN="${existing_env_vars[USER_DOMAIN_NAME]}"
# Ensure this value is carried over to generated_values for writing and template processing
# If it came from existing_env_vars, it might already be there, but this ensures it.
@@ -175,33 +193,7 @@ fi
log_info "Generating secrets and creating .env file..."
# --- Helper Functions ---
# Usage: gen_random <length> <characters>
gen_random() {
local length="$1"
local characters="$2"
head /dev/urandom | tr -dc "$characters" | head -c "$length"
}
# Usage: gen_password <length>
gen_password() {
gen_random "$1" 'A-Za-z0-9'
}
# Usage: gen_hex <length> (length = number of hex characters)
gen_hex() {
local length="$1"
local bytes=$(( (length + 1) / 2 )) # Calculate bytes needed
openssl rand -hex "$bytes" | head -c "$length"
}
# Usage: gen_base64 <length> (length = number of base64 characters)
gen_base64() {
local length="$1"
# Estimate bytes needed: base64 encodes 3 bytes to 4 chars.
# So, we need length * 3 / 4 bytes. Use ceil division.
local bytes=$(( (length * 3 + 3) / 4 ))
openssl rand -base64 "$bytes" | head -c "$length" # Truncate just in case
}
# Note: gen_random, gen_password, gen_hex, gen_base64 are now in utils.sh
# Function to update or add a variable to the .env file
# Usage: _update_or_add_env_var "VAR_NAME" "var_value"
@@ -227,26 +219,12 @@ _update_or_add_env_var() {
# trap - EXIT # Remove specific trap for this temp file if desired, or let main script's trap handle it.
}
# Function to generate a hash using Caddy
# Usage: local HASH=$(_generate_and_get_hash "$plain_password")
_generate_and_get_hash() {
local plain_password="$1"
local new_hash=""
if [[ -n "$plain_password" ]]; then
new_hash=$(caddy hash-password --algorithm bcrypt --plaintext "$plain_password" 2>/dev/null)
if [[ $? -ne 0 || -z "$new_hash" ]]; then
# Optionally, log a warning here if logging was re-enabled
# echo "Warning: Failed to hash password for use with $1 (placeholder)" >&2
new_hash="" # Ensure it's empty on failure
fi
fi
echo "$new_hash"
}
# Note: generate_bcrypt_hash() is now in utils.sh
# --- Main Logic ---
if [ ! -f "$TEMPLATE_FILE" ]; then
log_error "Template file not found at $TEMPLATE_FILE" >&2
log_error "Template file not found at $TEMPLATE_FILE"
exit 1
fi
@@ -278,8 +256,7 @@ generated_values["WELCOME_USERNAME"]="$USER_EMAIL" # Set Welcome page username f
# Create a temporary file for processing
TMP_ENV_FILE=$(mktemp)
# Ensure temp file is cleaned up on exit
trap 'rm -f "$TMP_ENV_FILE"' EXIT
TEMP_FILES+=("$TMP_ENV_FILE")
# Track whether our custom variables were found in the template
declare -A found_vars
@@ -320,7 +297,7 @@ while IFS= read -r line || [[ -n "$line" ]]; do
# Check if this is one of our user-input derived variables that might not have a value yet
# (e.g. OPENAI_API_KEY if user left it blank). These are handled by `found_vars` later if needed.
# Or, if variable needs generation AND is not already populated (or is empty) in generated_values
elif [[ -v VARS_TO_GENERATE["$varName"] && -z "${generated_values[$varName]}" ]]; then
elif [[ ${VARS_TO_GENERATE[$varName]+_} && -z "${generated_values[$varName]}" ]]; then
IFS=':' read -r type length <<< "${VARS_TO_GENERATE[$varName]}"
newValue=""
case "$type" in
@@ -355,7 +332,7 @@ while IFS= read -r line || [[ -n "$line" ]]; do
is_user_input_var=1
# Mark as found if it's in template, value taken from generated_values if already set or blank
found_vars["$varName"]=1
if [[ -v generated_values[$varName] ]]; then # if it was set (even to empty by user)
if [[ ${generated_values[$varName]+_} ]]; then # if it was set (even to empty by user)
processed_line="${varName}=\"${generated_values[$varName]}\""
else # Not set in generated_values, keep template's default if any, or make it empty
if [[ "$currentValue" =~ ^\$\{.*\} || -z "$currentValue" ]]; then # if template is ${VAR} or empty
@@ -432,7 +409,7 @@ fi
# Add any custom variables that weren't found in the template
for var in "FLOWISE_USERNAME" "DASHBOARD_USERNAME" "LETSENCRYPT_EMAIL" "RUN_N8N_IMPORT" "OPENAI_API_KEY" "PROMETHEUS_USERNAME" "SEARXNG_USERNAME" "LANGFUSE_INIT_USER_EMAIL" "N8N_WORKER_COUNT" "WEAVIATE_USERNAME" "NEO4J_AUTH_USERNAME" "COMFYUI_USERNAME" "RAGAPP_USERNAME" "PADDLEOCR_USERNAME" "LT_USERNAME" "LIGHTRAG_USERNAME" "WAHA_DASHBOARD_USERNAME" "WELCOME_USERNAME" "WHATSAPP_SWAGGER_USERNAME" "DOCLING_USERNAME"; do
if [[ ${found_vars["$var"]} -eq 0 && -v generated_values["$var"] ]]; then
if [[ ${found_vars["$var"]} -eq 0 && ${generated_values[$var]+_} ]]; then
# Before appending, check if it's already in TMP_ENV_FILE to avoid duplicates
if ! grep -q -E "^${var}=" "$TMP_ENV_FILE"; then
echo "${var}=\"${generated_values[$var]}\"" >> "$TMP_ENV_FILE" # Ensure quoting
@@ -530,119 +507,34 @@ fi
_update_or_add_env_var "WAHA_API_KEY_PLAIN" "${generated_values[WAHA_API_KEY_PLAIN]}"
_update_or_add_env_var "WAHA_API_KEY" "${generated_values[WAHA_API_KEY]}"
# Hash passwords using caddy with bcrypt
PROMETHEUS_PLAIN_PASS="${generated_values["PROMETHEUS_PASSWORD"]}"
SEARXNG_PLAIN_PASS="${generated_values["SEARXNG_PASSWORD"]}"
# Hash passwords using caddy with bcrypt (consolidated loop)
SERVICES_NEEDING_HASH=("PROMETHEUS" "SEARXNG" "COMFYUI" "PADDLEOCR" "RAGAPP" "LT" "DOCLING" "WELCOME")
# --- PROMETHEUS ---
# Try to get existing hash from memory (populated from .env if it was there)
FINAL_PROMETHEUS_HASH="${generated_values[PROMETHEUS_PASSWORD_HASH]}"
for service in "${SERVICES_NEEDING_HASH[@]}"; do
password_var="${service}_PASSWORD"
hash_var="${service}_PASSWORD_HASH"
# If no hash in memory, but we have a plain password, generate a new hash
if [[ -z "$FINAL_PROMETHEUS_HASH" && -n "$PROMETHEUS_PLAIN_PASS" ]]; then
NEW_HASH=$(_generate_and_get_hash "$PROMETHEUS_PLAIN_PASS")
if [[ -n "$NEW_HASH" ]]; then
FINAL_PROMETHEUS_HASH="$NEW_HASH"
generated_values["PROMETHEUS_PASSWORD_HASH"]="$NEW_HASH" # Update memory for consistency
plain_pass="${generated_values[$password_var]}"
existing_hash="${generated_values[$hash_var]}"
# If no hash exists but we have a plain password, generate new hash
if [[ -z "$existing_hash" && -n "$plain_pass" ]]; then
new_hash=$(generate_bcrypt_hash "$plain_pass")
if [[ -n "$new_hash" ]]; then
existing_hash="$new_hash"
generated_values["$hash_var"]="$new_hash"
fi
fi
fi
# Update the .env file with the final determined hash (could be empty if no plain pass or hash failed)
_update_or_add_env_var "PROMETHEUS_PASSWORD_HASH" "$FINAL_PROMETHEUS_HASH"
# --- SEARXNG ---
FINAL_SEARXNG_HASH="${generated_values[SEARXNG_PASSWORD_HASH]}"
_update_or_add_env_var "$hash_var" "$existing_hash"
done
if [[ -z "$FINAL_SEARXNG_HASH" && -n "$SEARXNG_PLAIN_PASS" ]]; then
NEW_HASH=$(_generate_and_get_hash "$SEARXNG_PLAIN_PASS")
if [[ -n "$NEW_HASH" ]]; then
FINAL_SEARXNG_HASH="$NEW_HASH"
generated_values["SEARXNG_PASSWORD_HASH"]="$NEW_HASH"
fi
fi
_update_or_add_env_var "SEARXNG_PASSWORD_HASH" "$FINAL_SEARXNG_HASH"
# --- COMFYUI ---
COMFYUI_PLAIN_PASS="${generated_values["COMFYUI_PASSWORD"]}"
FINAL_COMFYUI_HASH="${generated_values[COMFYUI_PASSWORD_HASH]}"
if [[ -z "$FINAL_COMFYUI_HASH" && -n "$COMFYUI_PLAIN_PASS" ]]; then
NEW_HASH=$(_generate_and_get_hash "$COMFYUI_PLAIN_PASS")
if [[ -n "$NEW_HASH" ]]; then
FINAL_COMFYUI_HASH="$NEW_HASH"
generated_values["COMFYUI_PASSWORD_HASH"]="$NEW_HASH"
fi
fi
_update_or_add_env_var "COMFYUI_PASSWORD_HASH" "$FINAL_COMFYUI_HASH"
# --- PADDLEOCR ---
PADDLEOCR_PLAIN_PASS="${generated_values["PADDLEOCR_PASSWORD"]}"
FINAL_PADDLEOCR_HASH="${generated_values[PADDLEOCR_PASSWORD_HASH]}"
if [[ -z "$FINAL_PADDLEOCR_HASH" && -n "$PADDLEOCR_PLAIN_PASS" ]]; then
NEW_HASH=$(_generate_and_get_hash "$PADDLEOCR_PLAIN_PASS")
if [[ -n "$NEW_HASH" ]]; then
FINAL_PADDLEOCR_HASH="$NEW_HASH"
generated_values["PADDLEOCR_PASSWORD_HASH"]="$NEW_HASH"
fi
fi
_update_or_add_env_var "PADDLEOCR_PASSWORD_HASH" "$FINAL_PADDLEOCR_HASH"
# --- RAGAPP ---
RAGAPP_PLAIN_PASS="${generated_values["RAGAPP_PASSWORD"]}"
FINAL_RAGAPP_HASH="${generated_values[RAGAPP_PASSWORD_HASH]}"
if [[ -z "$FINAL_RAGAPP_HASH" && -n "$RAGAPP_PLAIN_PASS" ]]; then
NEW_HASH=$(_generate_and_get_hash "$RAGAPP_PLAIN_PASS")
if [[ -n "$NEW_HASH" ]]; then
FINAL_RAGAPP_HASH="$NEW_HASH"
generated_values["RAGAPP_PASSWORD_HASH"]="$NEW_HASH"
fi
fi
_update_or_add_env_var "RAGAPP_PASSWORD_HASH" "$FINAL_RAGAPP_HASH"
# --- LIBRETRANSLATE ---
LT_PLAIN_PASS="${generated_values["LT_PASSWORD"]}"
FINAL_LT_HASH="${generated_values[LT_PASSWORD_HASH]}"
if [[ -z "$FINAL_LT_HASH" && -n "$LT_PLAIN_PASS" ]]; then
NEW_HASH=$(_generate_and_get_hash "$LT_PLAIN_PASS")
if [[ -n "$NEW_HASH" ]]; then
FINAL_LT_HASH="$NEW_HASH"
generated_values["LT_PASSWORD_HASH"]="$NEW_HASH"
fi
fi
_update_or_add_env_var "LT_PASSWORD_HASH" "$FINAL_LT_HASH"
# --- DOCLING ---
DOCLING_PLAIN_PASS="${generated_values["DOCLING_PASSWORD"]}"
FINAL_DOCLING_HASH="${generated_values[DOCLING_PASSWORD_HASH]}"
if [[ -z "$FINAL_DOCLING_HASH" && -n "$DOCLING_PLAIN_PASS" ]]; then
NEW_HASH=$(_generate_and_get_hash "$DOCLING_PLAIN_PASS")
if [[ -n "$NEW_HASH" ]]; then
FINAL_DOCLING_HASH="$NEW_HASH"
generated_values["DOCLING_PASSWORD_HASH"]="$NEW_HASH"
fi
fi
_update_or_add_env_var "DOCLING_PASSWORD_HASH" "$FINAL_DOCLING_HASH"
# --- WELCOME PAGE ---
WELCOME_PLAIN_PASS="${generated_values["WELCOME_PASSWORD"]}"
FINAL_WELCOME_HASH="${generated_values[WELCOME_PASSWORD_HASH]}"
if [[ -z "$FINAL_WELCOME_HASH" && -n "$WELCOME_PLAIN_PASS" ]]; then
NEW_HASH=$(_generate_and_get_hash "$WELCOME_PLAIN_PASS")
if [[ -n "$NEW_HASH" ]]; then
FINAL_WELCOME_HASH="$NEW_HASH"
generated_values["WELCOME_PASSWORD_HASH"]="$NEW_HASH"
fi
fi
_update_or_add_env_var "WELCOME_PASSWORD_HASH" "$FINAL_WELCOME_HASH"
if [ $? -eq 0 ]; then # This $? reflects the status of the last mv command from the last _update_or_add_env_var call.
# For now, assuming if we reached here and mv was fine, primary operations were okay.
echo ".env file generated successfully in the project root ($OUTPUT_FILE)."
else
log_error "Failed to generate .env file." >&2
rm -f "$OUTPUT_FILE" # Clean up potentially broken output file
exit 1
fi
log_success ".env file generated successfully in the project root ($OUTPUT_FILE)."
# Uninstall caddy
apt remove -y caddy
exit 0
# Cleanup any .bak files
cleanup_bak_files "$PROJECT_ROOT"
exit 0

View File

@@ -1,39 +1,31 @@
#!/bin/bash
# =============================================================================
# 04_wizard.sh - Interactive service selection wizard
# =============================================================================
# Guides the user through selecting which services to install using whiptail.
#
# Features:
# - Quick Start mode: pre-configured set (n8n + monitoring + backups)
# - Custom mode: multi-screen selection grouped by category
# - Core Services (n8n, Flowise, Dify, etc.)
# - AI & ML Services (Ollama with CPU/GPU, ComfyUI, etc.)
# - Databases & Vector Stores (Qdrant, Weaviate, Neo4j, etc.)
# - Infrastructure & Monitoring (Grafana, Prometheus, Portainer, etc.)
# - Preserves previously selected services on re-run
# - Updates COMPOSE_PROFILES in .env file
#
# Usage: bash scripts/04_wizard.sh
# =============================================================================
# Script to guide user through service selection
# Source utility functions, if any, assuming it's in the same directory
# and .env is in the parent directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
ENV_FILE="$PROJECT_ROOT/.env"
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
init_paths
# UTILS_SCRIPT="$SCRIPT_DIR/utils.sh" # Uncomment if utils.sh contains relevant functions
# Verify whiptail is available
require_whiptail
# if [ -f "$UTILS_SCRIPT" ]; then
# source "$UTILS_SCRIPT"
# fi
# Function to check if whiptail is installed
check_whiptail() {
if ! command -v whiptail &> /dev/null; then
log_error "'whiptail' is not installed."
log_info "This tool is required for the interactive service selection."
log_info "On Debian/Ubuntu, you can install it using: sudo apt-get install whiptail"
log_info "Please install whiptail and try again."
exit 1
fi
}
# Call the check
check_whiptail
# Store original DEBIAN_FRONTEND and set to dialog for whiptail
ORIGINAL_DEBIAN_FRONTEND="$DEBIAN_FRONTEND"
export DEBIAN_FRONTEND=dialog
# Set DEBIAN_FRONTEND for whiptail
save_debian_frontend
# --- Quick Start Pack Selection ---
# First screen: choose between Quick Start Pack or Custom Selection
@@ -57,27 +49,11 @@ if [ "$PACK_CHOICE" == "quick" ]; then
# Base Pack profiles
COMPOSE_PROFILES_VALUE="n8n,monitoring,postgresus,portainer"
# Ensure .env file exists
if [ ! -f "$ENV_FILE" ]; then
touch "$ENV_FILE"
fi
# Remove existing COMPOSE_PROFILES line if it exists
if grep -q "^COMPOSE_PROFILES=" "$ENV_FILE"; then
sed -i.bak "\|^COMPOSE_PROFILES=|d" "$ENV_FILE"
fi
# Add the new COMPOSE_PROFILES line
echo "COMPOSE_PROFILES=${COMPOSE_PROFILES_VALUE}" >> "$ENV_FILE"
# Update COMPOSE_PROFILES in .env
update_compose_profiles "$COMPOSE_PROFILES_VALUE"
log_info "The following Docker Compose profiles will be active: ${COMPOSE_PROFILES_VALUE}"
# Restore original DEBIAN_FRONTEND
if [ -n "$ORIGINAL_DEBIAN_FRONTEND" ]; then
export DEBIAN_FRONTEND="$ORIGINAL_DEBIAN_FRONTEND"
else
unset DEBIAN_FRONTEND
fi
restore_debian_frontend
exit 0
fi
@@ -166,11 +142,7 @@ CHOICES=$(whiptail --title "Service Selection Wizard" --checklist \
3>&1 1>&2 2>&3)
# Restore original DEBIAN_FRONTEND
if [ -n "$ORIGINAL_DEBIAN_FRONTEND" ]; then
export DEBIAN_FRONTEND="$ORIGINAL_DEBIAN_FRONTEND"
else
unset DEBIAN_FRONTEND
fi
restore_debian_frontend
# Exit if user pressed Cancel or Esc
exitstatus=$?
@@ -178,13 +150,7 @@ if [ $exitstatus -ne 0 ]; then
log_info "Service selection cancelled by user. Exiting wizard."
log_info "No changes made to service profiles. Default services will be used."
# Set COMPOSE_PROFILES to empty to ensure only core services run
if [ ! -f "$ENV_FILE" ]; then
touch "$ENV_FILE"
fi
if grep -q "^COMPOSE_PROFILES=" "$ENV_FILE"; then
sed -i.bak "/^COMPOSE_PROFILES=/d" "$ENV_FILE"
fi
echo "COMPOSE_PROFILES=" >> "$ENV_FILE"
update_compose_profiles ""
exit 0
fi
@@ -297,28 +263,14 @@ else
fi
# Update or add COMPOSE_PROFILES in .env file
# Ensure .env file exists (it should have been created by 03_generate_secrets.sh or exist from previous run)
if [ ! -f "$ENV_FILE" ]; then
log_warning "'.env' file not found at $ENV_FILE. Creating it."
touch "$ENV_FILE"
fi
# Remove existing COMPOSE_PROFILES line if it exists
if grep -q "^COMPOSE_PROFILES=" "$ENV_FILE"; then
# Using a different delimiter for sed because a profile name might contain '/' (unlikely here)
sed -i.bak "\|^COMPOSE_PROFILES=|d" "$ENV_FILE"
fi
# Add the new COMPOSE_PROFILES line
echo "COMPOSE_PROFILES=${COMPOSE_PROFILES_VALUE}" >> "$ENV_FILE"
update_compose_profiles "$COMPOSE_PROFILES_VALUE"
if [ -z "$COMPOSE_PROFILES_VALUE" ]; then
log_info "Only core services (Caddy, Postgres, Redis) will be started."
else
log_info "The following Docker Compose profiles will be active: ${COMPOSE_PROFILES_VALUE}"
fi
# Make the script executable (though install.sh calls it with bash)
chmod +x "$SCRIPT_DIR/04_wizard.sh"
# Cleanup any .bak files created by sed
cleanup_bak_files "$PROJECT_ROOT"
exit 0

View File

@@ -1,39 +1,31 @@
#!/bin/bash
# =============================================================================
# 05_configure_services.sh - Service-specific configuration
# =============================================================================
# Collects additional configuration needed by selected services via whiptail
# prompts and writes settings to .env file.
#
# Prompts for:
# - OpenAI API Key (optional, used by Supabase AI and Crawl4AI)
# - n8n workflow import option (~300 ready-made workflows)
# - Number of n8n workers to run
# - Cloudflare Tunnel token (if cloudflare-tunnel profile is active)
#
# Also handles:
# - Generates n8n worker-runner pairs configuration
# - Resolves service conflicts (e.g., removes Dify if Supabase is selected)
#
# Usage: bash scripts/05_configure_services.sh
# =============================================================================
set -e
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
ENV_FILE="$PROJECT_ROOT/.env"
init_paths
# Ensure .env exists
if [ ! -f "$ENV_FILE" ]; then
touch "$ENV_FILE"
fi
# Helper: read value from .env (without surrounding quotes)
read_env_var() {
local var_name="$1"
if grep -q "^${var_name}=" "$ENV_FILE"; then
grep "^${var_name}=" "$ENV_FILE" | cut -d'=' -f2- | sed 's/^"//' | sed 's/"$//'
else
echo ""
fi
}
# Helper: upsert value into .env (quote the value)
write_env_var() {
local var_name="$1"
local var_value="$2"
if grep -q "^${var_name}=" "$ENV_FILE"; then
# use different delimiter to be safe
sed -i.bak "\|^${var_name}=|d" "$ENV_FILE"
fi
echo "${var_name}=\"${var_value}\"" >> "$ENV_FILE"
}
ensure_file_exists "$ENV_FILE"
log_info "Configuring service options in .env..."
@@ -113,10 +105,10 @@ else
break
fi
else
log_error "Number of workers must be a positive integer." >&2
log_error "Number of workers must be a positive integer."
fi
else
log_error "Invalid input '$N8N_WORKER_COUNT_CANDIDATE'. Please enter a positive integer (e.g., 1, 2)." >&2
log_error "Invalid input '$N8N_WORKER_COUNT_CANDIDATE'. Please enter a positive integer (e.g., 1, 2)."
fi
done
fi
@@ -137,16 +129,11 @@ bash "$SCRIPT_DIR/generate_n8n_workers.sh"
# ----------------------------------------------------------------
# If Cloudflare Tunnel is selected (based on COMPOSE_PROFILES), prompt for the token and write to .env
COMPOSE_PROFILES_VALUE="$(read_env_var COMPOSE_PROFILES)"
cloudflare_selected=0
if [[ "$COMPOSE_PROFILES_VALUE" == *"cloudflare-tunnel"* ]]; then
cloudflare_selected=1
fi
# Set COMPOSE_PROFILES for is_profile_active to work
COMPOSE_PROFILES="$COMPOSE_PROFILES_VALUE"
if [ $cloudflare_selected -eq 1 ]; then
existing_cf_token=""
if grep -q "^CLOUDFLARE_TUNNEL_TOKEN=" "$ENV_FILE"; then
existing_cf_token=$(grep "^CLOUDFLARE_TUNNEL_TOKEN=" "$ENV_FILE" | cut -d'=' -f2- | sed 's/^\"//' | sed 's/\"$//')
fi
if is_profile_active "cloudflare-tunnel"; then
existing_cf_token="$(read_env_var CLOUDFLARE_TUNNEL_TOKEN)"
if [ -n "$existing_cf_token" ]; then
log_info "Cloudflare Tunnel token found in .env; reusing it."
@@ -154,15 +141,11 @@ if [ $cloudflare_selected -eq 1 ]; then
else
require_whiptail
input_cf_token=$(wt_input "Cloudflare Tunnel Token" "Enter your Cloudflare Tunnel token (leave empty to skip)." "") || true
token_to_write="$input_cf_token"
# Update the .env with the token (may be empty if user skipped)
if grep -q "^CLOUDFLARE_TUNNEL_TOKEN=" "$ENV_FILE"; then
sed -i.bak "/^CLOUDFLARE_TUNNEL_TOKEN=/d" "$ENV_FILE"
fi
echo "CLOUDFLARE_TUNNEL_TOKEN=\"$token_to_write\"" >> "$ENV_FILE"
write_env_var "CLOUDFLARE_TUNNEL_TOKEN" "$input_cf_token"
if [ -n "$token_to_write" ]; then
if [ -n "$input_cf_token" ]; then
log_success "Cloudflare Tunnel token saved to .env."
echo ""
echo "🔒 After confirming the tunnel works, consider closing ports 80, 443, and 7687 in your firewall."
@@ -176,7 +159,7 @@ fi
# ----------------------------------------------------------------
# Safety: If Supabase is present, remove Dify from COMPOSE_PROFILES (no prompts)
# ----------------------------------------------------------------
if [[ -n "$COMPOSE_PROFILES_VALUE" && "$COMPOSE_PROFILES_VALUE" == *"supabase"* ]]; then
if is_profile_active "supabase"; then
IFS=',' read -r -a profiles_array <<< "$COMPOSE_PROFILES_VALUE"
new_profiles=()
for p in "${profiles_array[@]}"; do
@@ -200,4 +183,7 @@ write_env_var "POSTGRES_HOST" "db"
log_success "Service configuration complete. .env updated at $ENV_FILE"
exit 0
# Cleanup any .bak files
cleanup_bak_files "$PROJECT_ROOT"
exit 0

View File

@@ -1,47 +1,50 @@
#!/bin/bash
# =============================================================================
# 06_run_services.sh - Service launcher
# =============================================================================
# Starts all selected services using Docker Compose via start_services.py.
#
# Pre-flight checks:
# - Verifies .env, docker-compose.yml, and Caddyfile exist
# - Ensures Docker daemon is running
# - Makes start_services.py executable if needed
#
# The actual service orchestration is handled by start_services.py which:
# - Starts services in correct dependency order
# - Handles profile-based service selection
# - Manages health checks and startup timeouts
#
# Usage: bash scripts/06_run_services.sh
# =============================================================================
set -e
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
init_paths
# 1. Check for .env file
if [ ! -f ".env" ]; then
log_error ".env file not found in project root." >&2
exit 1
fi
cd "$PROJECT_ROOT"
# 2. Check for docker-compose.yml file
if [ ! -f "docker-compose.yml" ]; then
log_error "docker-compose.yml file not found in project root." >&2
exit 1
fi
# Check required files
require_file "$ENV_FILE" ".env file not found in project root."
require_file "$PROJECT_ROOT/docker-compose.yml" "docker-compose.yml file not found in project root."
require_file "$PROJECT_ROOT/Caddyfile" "Caddyfile not found in project root. Reverse proxy might not work."
require_file "$PROJECT_ROOT/start_services.py" "start_services.py file not found in project root."
# 3. Check for Caddyfile (optional but recommended for reverse proxy)
if [ ! -f "Caddyfile" ]; then
log_warning "Caddyfile not found in project root. Reverse proxy might not work as expected." >&2
exit 1
fi
# 4. Check if Docker daemon is running
# Check if Docker daemon is running
if ! docker info > /dev/null 2>&1; then
log_error "Docker daemon is not running. Please start Docker and try again." >&2
log_error "Docker daemon is not running. Please start Docker and try again."
exit 1
fi
# 5. Check if start_services.py exists and is executable
if [ ! -f "start_services.py" ]; then
log_error "start_services.py file not found in project root." >&2
exit 1
fi
if [ ! -x "start_services.py" ]; then
# Ensure start_services.py is executable
if [ ! -x "$PROJECT_ROOT/start_services.py" ]; then
log_warning "start_services.py is not executable. Making it executable..."
chmod +x "start_services.py"
chmod +x "$PROJECT_ROOT/start_services.py"
fi
log_info "Launching services using start_services.py..."
# Execute start_services.py
./start_services.py
"$PROJECT_ROOT/start_services.py"
exit 0
exit 0

View File

@@ -1,25 +1,30 @@
#!/bin/bash
# =============================================================================
# 07_final_report.sh - Post-installation summary and credentials display
# =============================================================================
# Generates and displays the final installation report after all services
# are running.
#
# Actions:
# - Generates welcome page data (via generate_welcome_page.sh)
# - Displays Welcome Page URL and credentials
# - Shows next steps for configuring individual services
# - Provides guidance for first-run setup of n8n, Portainer, Flowise, etc.
#
# The Welcome Page serves as a central dashboard with all service credentials
# and access URLs, protected by basic auth.
#
# Usage: bash scripts/07_final_report.sh
# =============================================================================
set -e
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
# Get the directory where the script resides
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )"
ENV_FILE="$PROJECT_ROOT/.env"
# Check if .env file exists
if [ ! -f "$ENV_FILE" ]; then
log_error "The .env file ('$ENV_FILE') was not found."
exit 1
fi
init_paths
# Load environment variables from .env file
set -a
source "$ENV_FILE"
set +a
load_env || exit 1
# Generate welcome page data
if [ -f "$SCRIPT_DIR/generate_welcome_page.sh" ]; then
@@ -27,19 +32,6 @@ if [ -f "$SCRIPT_DIR/generate_welcome_page.sh" ]; then
bash "$SCRIPT_DIR/generate_welcome_page.sh" || log_warning "Failed to generate welcome page"
fi
# Function to check if a profile is active
is_profile_active() {
local profile_to_check="$1"
if [ -z "$COMPOSE_PROFILES" ]; then
return 1
fi
if [[ ",$COMPOSE_PROFILES," == *",$profile_to_check,"* ]]; then
return 0
else
return 1
fi
}
echo
echo "======================================================================="
echo " Installation Complete!"

View File

@@ -1,29 +1,34 @@
#!/bin/bash
# =============================================================================
# apply_update.sh - Service update and restart logic
# =============================================================================
# Called by update.sh after git pull. Performs the actual service updates:
# 1. Updates .env with any new variables (03_generate_secrets.sh --update)
# 2. Runs service selection wizard (04_wizard.sh) to update profiles
# 3. Configures services (05_configure_services.sh)
# 4. Pulls latest Docker images for selected services
# 5. Restarts all services (06_run_services.sh)
# 6. Displays final report (07_final_report.sh)
#
# Handles multiple compose files: main, n8n-workers, Supabase, and Dify.
#
# Usage: Called automatically by update.sh (not typically run directly)
# =============================================================================
set -e
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
init_paths
# Set the compose command explicitly to use docker compose subcommand
COMPOSE_CMD="docker compose"
# Navigate to the directory where this script is located
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Project root directory (one level up from scripts)
PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )"
# Path to the 06_run_services.sh script (Corrected from original update.sh which had 04)
# Path to the 06_run_services.sh script
RUN_SERVICES_SCRIPT="$SCRIPT_DIR/06_run_services.sh"
# Compose files (Not strictly needed here unless used directly, but good for context)
# MAIN_COMPOSE_FILE="$PROJECT_ROOT/docker-compose.yml"
# SUPABASE_COMPOSE_FILE="$PROJECT_ROOT/supabase/docker/docker-compose.yml"
ENV_FILE="$PROJECT_ROOT/.env"
# Check if run services script exists
if [ ! -f "$RUN_SERVICES_SCRIPT" ]; then
log_error "$RUN_SERVICES_SCRIPT not found."
exit 1
fi
require_file "$RUN_SERVICES_SCRIPT" "$RUN_SERVICES_SCRIPT not found."
cd "$PROJECT_ROOT"

View File

@@ -1,14 +1,25 @@
#!/bin/bash
# =============================================================================
# docker_cleanup.sh - Complete Docker system cleanup
# =============================================================================
# Aggressively cleans up the Docker system to reclaim disk space.
# WARNING: This action is irreversible!
#
# Removes:
# - All stopped containers
# - All networks not used by at least one container
# - All unused images (not just dangling ones)
# - All unused volumes
# - All build cache
#
# Usage: make clean OR sudo bash scripts/docker_cleanup.sh
# =============================================================================
set -e
# Source the utilities file
source "$(dirname "$0")/utils.sh"
# This script is intended for cleaning up the Docker system.
# It removes all unused containers, images, networks, and volumes.
# Use with caution, as this action is irreversible.
log_info "Starting Docker cleanup..."
# The 'docker system prune' command removes:

View File

@@ -3,46 +3,31 @@
# System diagnostics script for n8n-install
# Checks DNS, SSL, containers, disk space, memory, and configuration
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
init_paths
# Get the directory where the script resides
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )"
ENV_FILE="$PROJECT_ROOT/.env"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Counters
# Counters for summary
ERRORS=0
WARNINGS=0
OK=0
# Print status functions
print_ok() {
echo -e " ${GREEN}[OK]${NC} $1"
# Wrapper functions that also count results
count_ok() {
print_ok "$1"
OK=$((OK + 1))
}
print_warning() {
echo -e " ${YELLOW}[WARNING]${NC} $1"
count_warning() {
print_warning "$1"
WARNINGS=$((WARNINGS + 1))
}
print_error() {
echo -e " ${RED}[ERROR]${NC} $1"
count_error() {
print_error "$1"
ERRORS=$((ERRORS + 1))
}
print_info() {
echo -e " ${BLUE}[INFO]${NC} $1"
}
echo ""
echo "========================================"
echo " n8n-install System Diagnostics"
@@ -54,33 +39,31 @@ echo "Configuration:"
echo "--------------"
if [ -f "$ENV_FILE" ]; then
print_ok ".env file exists"
count_ok ".env file exists"
# Load environment variables
set -a
source "$ENV_FILE"
set +a
load_env
# Check required variables
if [ -n "$USER_DOMAIN_NAME" ]; then
print_ok "USER_DOMAIN_NAME is set: $USER_DOMAIN_NAME"
count_ok "USER_DOMAIN_NAME is set: $USER_DOMAIN_NAME"
else
print_error "USER_DOMAIN_NAME is not set"
count_error "USER_DOMAIN_NAME is not set"
fi
if [ -n "$LETSENCRYPT_EMAIL" ]; then
print_ok "LETSENCRYPT_EMAIL is set"
count_ok "LETSENCRYPT_EMAIL is set"
else
print_warning "LETSENCRYPT_EMAIL is not set (SSL certificates may not work)"
count_warning "LETSENCRYPT_EMAIL is not set (SSL certificates may not work)"
fi
if [ -n "$COMPOSE_PROFILES" ]; then
print_ok "Active profiles: $COMPOSE_PROFILES"
count_ok "Active profiles: $COMPOSE_PROFILES"
else
print_warning "No service profiles are active"
count_warning "No service profiles are active"
fi
else
print_error ".env file not found at $ENV_FILE"
count_error ".env file not found at $ENV_FILE"
echo ""
echo "Run 'make install' to set up the environment."
exit 1
@@ -93,21 +76,21 @@ echo "Docker:"
echo "-------"
if command -v docker &> /dev/null; then
print_ok "Docker is installed"
count_ok "Docker is installed"
if docker info &> /dev/null; then
print_ok "Docker daemon is running"
count_ok "Docker daemon is running"
else
print_error "Docker daemon is not running or not accessible"
count_error "Docker daemon is not running or not accessible"
fi
else
print_error "Docker is not installed"
count_error "Docker is not installed"
fi
if command -v docker-compose &> /dev/null || docker compose version &> /dev/null; then
print_ok "Docker Compose is available"
count_ok "Docker Compose is available"
else
print_warning "Docker Compose is not available"
count_warning "Docker Compose is not available"
fi
echo ""
@@ -120,11 +103,11 @@ DISK_USAGE=$(df -h / | awk 'NR==2 {print $5}' | tr -d '%')
DISK_AVAIL=$(df -h / | awk 'NR==2 {print $4}')
if [ "$DISK_USAGE" -lt 80 ]; then
print_ok "Disk usage: ${DISK_USAGE}% (${DISK_AVAIL} available)"
count_ok "Disk usage: ${DISK_USAGE}% (${DISK_AVAIL} available)"
elif [ "$DISK_USAGE" -lt 90 ]; then
print_warning "Disk usage: ${DISK_USAGE}% (${DISK_AVAIL} available) - Consider freeing space"
count_warning "Disk usage: ${DISK_USAGE}% (${DISK_AVAIL} available) - Consider freeing space"
else
print_error "Disk usage: ${DISK_USAGE}% (${DISK_AVAIL} available) - Critical!"
count_error "Disk usage: ${DISK_USAGE}% (${DISK_AVAIL} available) - Critical!"
fi
# Check Docker disk usage
@@ -146,11 +129,11 @@ if command -v free &> /dev/null; then
MEM_PERCENT=$(free | awk '/^Mem:/ {printf("%.0f", $3/$2 * 100)}')
if [ "$MEM_PERCENT" -lt 80 ]; then
print_ok "Memory usage: ${MEM_PERCENT}% (${MEM_AVAIL} available of ${MEM_TOTAL})"
count_ok "Memory usage: ${MEM_PERCENT}% (${MEM_AVAIL} available of ${MEM_TOTAL})"
elif [ "$MEM_PERCENT" -lt 90 ]; then
print_warning "Memory usage: ${MEM_PERCENT}% (${MEM_AVAIL} available)"
count_warning "Memory usage: ${MEM_PERCENT}% (${MEM_AVAIL} available)"
else
print_error "Memory usage: ${MEM_PERCENT}% - High memory pressure!"
count_error "Memory usage: ${MEM_PERCENT}% - High memory pressure!"
fi
else
print_info "Memory info not available (free command not found)"
@@ -174,7 +157,7 @@ while read -r line; do
name=$(echo "$line" | cut -d'|' -f1)
restarts=$(echo "$line" | cut -d'|' -f2)
if [ "$restarts" -gt 3 ]; then
print_warning "$name has restarted $restarts times"
count_warning "$name has restarted $restarts times"
HIGH_RESTARTS=$((HIGH_RESTARTS + 1))
fi
fi
@@ -185,17 +168,17 @@ done < <(docker ps --format '{{.Names}}|{{.Status}}' 2>/dev/null | while read co
done)
if [ "$HIGH_RESTARTS" -eq 0 ]; then
print_ok "No containers with excessive restarts"
count_ok "No containers with excessive restarts"
fi
# Check unhealthy containers
UNHEALTHY=$(docker ps --filter "health=unhealthy" --format '{{.Names}}' 2>/dev/null)
if [ -n "$UNHEALTHY" ]; then
for container in $UNHEALTHY; do
print_error "Container $container is unhealthy"
count_error "Container $container is unhealthy"
done
else
print_ok "No unhealthy containers"
count_ok "No unhealthy containers"
fi
echo ""
@@ -213,9 +196,9 @@ check_dns() {
fi
if host "$hostname" &> /dev/null; then
print_ok "$varname ($hostname) resolves"
count_ok "$varname ($hostname) resolves"
else
print_error "$varname ($hostname) does not resolve"
count_error "$varname ($hostname) does not resolve"
fi
}
@@ -236,16 +219,16 @@ echo "SSL/Caddy:"
echo "----------"
if docker ps --format '{{.Names}}' 2>/dev/null | grep -q "caddy"; then
print_ok "Caddy container is running"
count_ok "Caddy container is running"
# Check if Caddy can reach the config
if docker exec caddy caddy validate --config /etc/caddy/Caddyfile &> /dev/null; then
print_ok "Caddyfile is valid"
count_ok "Caddyfile is valid"
else
print_warning "Caddyfile validation failed (may be fine if using default)"
count_warning "Caddyfile validation failed (may be fine if using default)"
fi
else
print_warning "Caddy container is not running"
count_warning "Caddy container is not running"
fi
echo ""
@@ -259,10 +242,10 @@ check_service() {
local port="$2"
if docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${container}$"; then
print_ok "$container is running"
count_ok "$container is running"
else
if [[ ",$COMPOSE_PROFILES," == *",$container,"* ]] || [ "$container" == "postgres" ] || [ "$container" == "redis" ]; then
print_error "$container is not running (but expected)"
if is_profile_active "$container" || [ "$container" == "postgres" ] || [ "$container" == "redis" ] || [ "$container" == "caddy" ]; then
count_error "$container is not running (but expected)"
fi
fi
}
@@ -271,11 +254,11 @@ check_service "postgres" "5432"
check_service "redis" "6379"
check_service "caddy" "80"
if [[ ",$COMPOSE_PROFILES," == *",n8n,"* ]]; then
if is_profile_active "n8n"; then
check_service "n8n" "5678"
fi
if [[ ",$COMPOSE_PROFILES," == *",monitoring,"* ]]; then
if is_profile_active "monitoring"; then
check_service "grafana" "3000"
check_service "prometheus" "9090"
fi

View File

@@ -1,43 +1,33 @@
#!/bin/bash
# Генерирует docker-compose.n8n-workers.yml с N парами worker-runner
# Использование: N8N_WORKER_COUNT=3 bash scripts/generate_n8n_workers.sh
# Generates docker-compose.n8n-workers.yml with N worker-runner pairs
# Usage: N8N_WORKER_COUNT=3 bash scripts/generate_n8n_workers.sh
#
# Этот скрипт идемпотентен - при повторном запуске файл перезаписывается
# This script is idempotent - file is overwritten on each run
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
init_paths
# Source utilities if available
if [[ -f "$SCRIPT_DIR/utils.sh" ]]; then
source "$SCRIPT_DIR/utils.sh"
else
# Fallback logging functions
log_info() { echo "[INFO] $*"; }
log_warning() { echo "[WARN] $*"; }
log_error() { echo "[ERROR] $*" >&2; }
fi
# Загрузить N8N_WORKER_COUNT из .env если не задан
if [[ -z "${N8N_WORKER_COUNT:-}" ]] && [[ -f "$PROJECT_DIR/.env" ]]; then
# Strip quotes (single and double) from the value
N8N_WORKER_COUNT=$(grep -E "^N8N_WORKER_COUNT=" "$PROJECT_DIR/.env" | cut -d'=' -f2 | tr -d '"'"'" || echo "1")
# Load N8N_WORKER_COUNT from .env if not set
if [[ -z "${N8N_WORKER_COUNT:-}" ]] && [[ -f "$ENV_FILE" ]]; then
N8N_WORKER_COUNT=$(read_env_var "N8N_WORKER_COUNT" || echo "1")
fi
N8N_WORKER_COUNT=${N8N_WORKER_COUNT:-1}
# Валидация N8N_WORKER_COUNT
# Validate N8N_WORKER_COUNT
if ! [[ "$N8N_WORKER_COUNT" =~ ^[1-9][0-9]*$ ]]; then
log_error "N8N_WORKER_COUNT must be a positive integer, got: '$N8N_WORKER_COUNT'"
exit 1
fi
OUTPUT_FILE="$PROJECT_DIR/docker-compose.n8n-workers.yml"
OUTPUT_FILE="$PROJECT_ROOT/docker-compose.n8n-workers.yml"
log_info "Generating n8n worker-runner pairs configuration..."
log_info "N8N_WORKER_COUNT=$N8N_WORKER_COUNT"
# Перезаписываем файл (идемпотентно)
# Overwrite file (idempotent)
cat > "$OUTPUT_FILE" << 'EOF'
# Auto-generated file for n8n worker-runner pairs
# Regenerate with: bash scripts/generate_n8n_workers.sh

View File

@@ -4,20 +4,14 @@
set -e
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
init_paths
# Get the directory where the script resides
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )"
ENV_FILE="$PROJECT_ROOT/.env"
OUTPUT_FILE="$PROJECT_ROOT/welcome/data.json"
# Check if .env file exists
if [ ! -f "$ENV_FILE" ]; then
log_error "The .env file ('$ENV_FILE') was not found."
exit 1
fi
# Load environment variables from .env file
load_env || exit 1
# Ensure welcome directory exists
mkdir -p "$PROJECT_ROOT/welcome"
@@ -27,31 +21,6 @@ if [ -f "$OUTPUT_FILE" ]; then
rm -f "$OUTPUT_FILE"
fi
# Load environment variables from .env file
set -a
source "$ENV_FILE"
set +a
# Function to check if a profile is active
is_profile_active() {
local profile_to_check="$1"
if [ -z "$COMPOSE_PROFILES" ]; then
return 1
fi
if [[ ",$COMPOSE_PROFILES," == *",$profile_to_check,"* ]]; then
return 0
else
return 1
fi
}
# Function to escape JSON strings
json_escape() {
local str="$1"
# Escape backslashes, double quotes, and control characters
printf '%s' "$str" | sed 's/\\/\\\\/g; s/"/\\"/g; s/ /\\t/g' | tr -d '\n\r'
}
# Start building JSON
GENERATED_AT=$(date -u +"%Y-%m-%dT%H:%M:%SZ")

View File

@@ -1,4 +1,19 @@
#!/bin/bash
# =============================================================================
# install.sh - Main installation orchestrator for n8n-install
# =============================================================================
# This script runs the complete installation process by sequentially executing
# 7 installation steps:
# 1. System Preparation - updates packages, installs utilities, configures firewall
# 2. Docker Installation - installs Docker and Docker Compose
# 3. Secret Generation - creates .env file with secure passwords and secrets
# 4. Service Wizard - interactive service selection using whiptail
# 5. Service Configuration - prompts for API keys and service-specific settings
# 6. Service Launch - starts all selected services via Docker Compose
# 7. Final Report - displays credentials and access URLs
#
# Usage: sudo bash scripts/install.sh
# =============================================================================
set -e
@@ -28,8 +43,8 @@ if [[ "$current_path" == *"/n8n-install/n8n-install" ]]; then
fi
fi
# Get the directory where this script is located (which is the scripts directory)
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Initialize paths using utils.sh helper
init_paths
# Check if all required scripts exist and are executable in the current directory
required_scripts=(
@@ -82,31 +97,31 @@ fi
# Run installation steps sequentially using their full paths
log_info "========== STEP 1: System Preparation =========="
log_header "STEP 1: System Preparation"
bash "$SCRIPT_DIR/01_system_preparation.sh" || { log_error "System Preparation failed"; exit 1; }
log_success "System preparation complete!"
log_info "========== STEP 2: Installing Docker =========="
log_header "STEP 2: Installing Docker"
bash "$SCRIPT_DIR/02_install_docker.sh" || { log_error "Docker Installation failed"; exit 1; }
log_success "Docker installation complete!"
log_info "========== STEP 3: Generating Secrets and Configuration =========="
log_header "STEP 3: Generating Secrets and Configuration"
bash "$SCRIPT_DIR/03_generate_secrets.sh" || { log_error "Secret/Config Generation failed"; exit 1; }
log_success "Secret/Config Generation complete!"
log_info "========== STEP 4: Running Service Selection Wizard =========="
log_header "STEP 4: Running Service Selection Wizard"
bash "$SCRIPT_DIR/04_wizard.sh" || { log_error "Service Selection Wizard failed"; exit 1; }
log_success "Service Selection Wizard complete!"
log_info "========== STEP 5: Configure Services =========="
log_header "STEP 5: Configure Services"
bash "$SCRIPT_DIR/05_configure_services.sh" || { log_error "Configure Services failed"; exit 1; }
log_success "Configure Services complete!"
log_info "========== STEP 6: Running Services =========="
log_header "STEP 6: Running Services"
bash "$SCRIPT_DIR/06_run_services.sh" || { log_error "Running Services failed"; exit 1; }
log_success "Running Services complete!"
log_info "========== STEP 7: Generating Final Report =========="
log_header "STEP 7: Generating Final Report"
# --- Installation Summary ---
log_info "Installation Summary. The following steps were performed by the scripts:"
log_success "- System updated and basic utilities installed"

View File

@@ -1,16 +1,24 @@
#!/bin/bash
# =============================================================================
# update.sh - Main update orchestrator
# =============================================================================
# Performs a full system and service update:
# 1. Pulls latest changes from the git repository (git reset --hard + pull)
# 2. Updates Ubuntu system packages (apt-get update && upgrade)
# 3. Delegates to apply_update.sh for service updates
#
# This two-stage approach ensures apply_update.sh itself gets updated before
# running, so new update logic is always applied.
#
# Usage: make update OR sudo bash scripts/update.sh
# =============================================================================
set -e
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
init_paths
# Set the compose command explicitly to use docker compose subcommand
# Navigate to the directory where this script is located
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
# Project root directory (one level up from scripts)
PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )"
# Path to the apply_update.sh script
APPLY_UPDATE_SCRIPT="$SCRIPT_DIR/apply_update.sh"

View File

@@ -5,31 +5,12 @@
set -e
# Source the utilities file
# Source the utilities file and initialize paths
source "$(dirname "$0")/utils.sh"
# Get the directory where the script resides
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." &> /dev/null && pwd )"
ENV_FILE="$PROJECT_ROOT/.env"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Check if .env file exists
if [ ! -f "$ENV_FILE" ]; then
log_error "The .env file ('$ENV_FILE') was not found."
exit 1
fi
init_paths
# Load environment variables
set -a
source "$ENV_FILE"
set +a
load_env || exit 1
echo ""
echo "========================================"
@@ -101,7 +82,7 @@ check_image_update "caddy" "caddy:2-alpine"
echo ""
# Check n8n if profile is active
if [[ ",$COMPOSE_PROFILES," == *",n8n,"* ]]; then
if is_profile_active "n8n"; then
echo "n8n Services:"
echo "-------------"
check_image_update "n8n" "docker.n8n.io/n8nio/n8n:${N8N_VERSION:-latest}"
@@ -110,7 +91,7 @@ if [[ ",$COMPOSE_PROFILES," == *",n8n,"* ]]; then
fi
# Check monitoring if profile is active
if [[ ",$COMPOSE_PROFILES," == *",monitoring,"* ]]; then
if is_profile_active "monitoring"; then
echo "Monitoring Services:"
echo "--------------------"
check_image_update "grafana" "grafana/grafana:latest"
@@ -121,28 +102,28 @@ if [[ ",$COMPOSE_PROFILES," == *",monitoring,"* ]]; then
fi
# Check other common services
if [[ ",$COMPOSE_PROFILES," == *",flowise,"* ]]; then
if is_profile_active "flowise"; then
echo "Flowise:"
echo "--------"
check_image_update "flowise" "flowiseai/flowise:latest"
echo ""
fi
if [[ ",$COMPOSE_PROFILES," == *",open-webui,"* ]]; then
if is_profile_active "open-webui"; then
echo "Open WebUI:"
echo "-----------"
check_image_update "open-webui" "ghcr.io/open-webui/open-webui:main"
echo ""
fi
if [[ ",$COMPOSE_PROFILES," == *",portainer,"* ]]; then
if is_profile_active "portainer"; then
echo "Portainer:"
echo "----------"
check_image_update "portainer" "portainer/portainer-ce:latest"
echo ""
fi
if [[ ",$COMPOSE_PROFILES," == *",langfuse,"* ]]; then
if is_profile_active "langfuse"; then
echo "Langfuse:"
echo "---------"
check_image_update "langfuse-web" "langfuse/langfuse:latest"
@@ -150,28 +131,28 @@ if [[ ",$COMPOSE_PROFILES," == *",langfuse,"* ]]; then
echo ""
fi
if [[ ",$COMPOSE_PROFILES," == *",cpu,"* ]] || [[ ",$COMPOSE_PROFILES," == *",gpu-nvidia,"* ]] || [[ ",$COMPOSE_PROFILES," == *",gpu-amd,"* ]]; then
if is_profile_active "cpu" || is_profile_active "gpu-nvidia" || is_profile_active "gpu-amd"; then
echo "Ollama:"
echo "-------"
check_image_update "ollama" "ollama/ollama:latest"
echo ""
fi
if [[ ",$COMPOSE_PROFILES," == *",qdrant,"* ]]; then
if is_profile_active "qdrant"; then
echo "Qdrant:"
echo "-------"
check_image_update "qdrant" "qdrant/qdrant:latest"
echo ""
fi
if [[ ",$COMPOSE_PROFILES," == *",searxng,"* ]]; then
if is_profile_active "searxng"; then
echo "SearXNG:"
echo "--------"
check_image_update "searxng" "searxng/searxng:latest"
echo ""
fi
if [[ ",$COMPOSE_PROFILES," == *",postgresus,"* ]]; then
if is_profile_active "postgresus"; then
echo "Postgresus:"
echo "-----------"
check_image_update "postgresus" "ghcr.io/postgresus/postgresus:latest"

View File

@@ -1,56 +1,297 @@
#!/bin/bash
# =============================================================================
# utils.sh - Shared utilities for n8n-install scripts
# =============================================================================
# Common functions and utilities used across all installation scripts.
#
# Provides:
# - Path initialization (init_paths): Sets SCRIPT_DIR, PROJECT_ROOT, ENV_FILE
# - Logging functions: log_info, log_success, log_warning, log_error
# - .env manipulation: read_env_var, write_env_var, load_env
# - Whiptail wrappers: wt_input, wt_yesno, require_whiptail
# - Validation helpers: require_command, require_file, ensure_file_exists
# - Profile management: is_profile_active, update_compose_profiles
# - Doctor output helpers: print_ok, print_warning, print_error
#
# Usage: source "$(dirname "$0")/utils.sh" && init_paths
# =============================================================================
# Logging function that frames a message with a border and adds a timestamp
log_message() {
local message="$1"
local combined_message="${message}"
local length=${#combined_message}
local border_length=$((length + 4))
# Create the top border
local border=""
for ((i=0; i<border_length; i++)); do
border="${border}"
done
# Display the framed message with timestamp
echo "${border}"
echo "${combined_message}"
echo "${border}"
#=============================================================================
# CONSTANTS
#=============================================================================
DOMAIN_PLACEHOLDER="yourdomain.com"
#=============================================================================
# PATH INITIALIZATION
#=============================================================================
# Initialize standard paths - call at start of each script
# WARNING: Must be called directly from script top-level, NOT from within functions.
# BASH_SOURCE[1] refers to the script that sourced utils.sh.
# Usage: source utils.sh && init_paths
init_paths() {
# BASH_SOURCE[1] = the script that called this function (not utils.sh itself)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[1]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
ENV_FILE="$PROJECT_ROOT/.env"
}
# Example usage:
# log_message "This is a test message"
#=============================================================================
# LOGGING (Simplified)
#=============================================================================
log_success() {
local message="$1"
local timestamp=$(date +%H:%M:%S)
local combined_message="[SUCCESS] ${timestamp}: ${message}"
log_message "${combined_message}"
}
log_error() {
local message="$1"
local timestamp=$(date +%H:%M:%S)
local combined_message="[ERROR] ${timestamp}: ${message}"
log_message "${combined_message}"
}
log_warning() {
local message="$1"
local timestamp=$(date +%H:%M:%S)
local combined_message="[WARNING] ${timestamp}: ${message}"
log_message "${combined_message}"
# Internal logging function
_log() {
local level="$1"
local message="$2"
echo "[$level] $(date +%H:%M:%S): $message"
}
log_info() {
local message="$1"
local timestamp=$(date +%H:%M:%S)
local combined_message="[INFO] ${timestamp}: ${message}"
log_message "${combined_message}"
_log "INFO" "$1"
}
# --- Whiptail helpers ---
log_success() {
_log "OK" "$1"
}
log_warning() {
_log "WARN" "$1"
}
log_error() {
_log "ERROR" "$1" >&2
}
# Display a header for major sections
log_header() {
local message="$1"
echo ""
echo "========== $message =========="
}
#=============================================================================
# COLOR OUTPUT (for diagnostics and previews)
#=============================================================================
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
print_ok() {
echo -e " ${GREEN}[OK]${NC} $1"
}
print_error() {
echo -e " ${RED}[ERROR]${NC} $1"
}
print_warning() {
echo -e " ${YELLOW}[WARNING]${NC} $1"
}
print_info() {
echo -e " ${BLUE}[INFO]${NC} $1"
}
#=============================================================================
# ENVIRONMENT MANAGEMENT
#=============================================================================
# Load .env file safely
# Usage: load_env [env_file_path]
load_env() {
local env_file="${1:-$ENV_FILE}"
if [[ ! -f "$env_file" ]]; then
log_error ".env file not found: $env_file"
return 1
fi
set -a
source "$env_file"
set +a
}
# Read a variable from .env file
# Usage: value=$(read_env_var "VAR_NAME" [env_file])
read_env_var() {
local var_name="$1"
local env_file="${2:-$ENV_FILE}"
if grep -q "^${var_name}=" "$env_file" 2>/dev/null; then
grep "^${var_name}=" "$env_file" | cut -d'=' -f2- | sed 's/^"//' | sed 's/"$//' | sed "s/^'//" | sed "s/'$//"
fi
}
# Write/update a variable in .env file (with automatic .bak cleanup)
# Usage: write_env_var "VAR_NAME" "value" [env_file]
write_env_var() {
local var_name="$1"
local var_value="$2"
local env_file="${3:-$ENV_FILE}"
if grep -q "^${var_name}=" "$env_file" 2>/dev/null; then
sed -i.bak "\|^${var_name}=|d" "$env_file"
rm -f "${env_file}.bak"
fi
echo "${var_name}=\"${var_value}\"" >> "$env_file"
}
# Check if a Docker Compose profile is active
# IMPORTANT: Requires COMPOSE_PROFILES to be set before calling (via load_env or direct assignment)
# Usage: is_profile_active "n8n" && echo "n8n is active"
is_profile_active() {
local profile="$1"
[[ -n "$COMPOSE_PROFILES" && ",$COMPOSE_PROFILES," == *",$profile,"* ]]
}
#=============================================================================
# UTILITIES
#=============================================================================
# Require a command to be available
# Usage: require_command "docker" "Install Docker: https://docs.docker.com/engine/install/"
require_command() {
local cmd="$1"
local install_hint="${2:-Please install $cmd}"
if ! command -v "$cmd" &> /dev/null; then
log_error "'$cmd' not found. $install_hint"
exit 1
fi
}
# Cleanup .bak files created by sed -i
# Usage: cleanup_bak_files [directory]
cleanup_bak_files() {
local directory="${1:-$PROJECT_ROOT}"
find "$directory" -maxdepth 1 -name "*.bak" -type f -delete 2>/dev/null || true
}
# Escape string for JSON output
# Usage: escaped=$(json_escape "string with \"quotes\"")
json_escape() {
local str="$1"
printf '%s' "$str" | sed 's/\\/\\\\/g; s/"/\\"/g; s/ /\\t/g' | tr -d '\n\r'
}
#=============================================================================
# FILE UTILITIES
#=============================================================================
# Require a file to exist, exit with error if not found
# Usage: require_file "/path/to/file" "Custom error message"
require_file() {
local file="$1"
local error_msg="${2:-File not found: $file}"
if [[ ! -f "$file" ]]; then
log_error "$error_msg"
exit 1
fi
}
# Ensure a file exists, create empty file if it doesn't
# Usage: ensure_file_exists "/path/to/file"
ensure_file_exists() {
local file="$1"
[[ ! -f "$file" ]] && touch "$file"
}
#=============================================================================
# COMPOSE PROFILES MANAGEMENT
#=============================================================================
# Update COMPOSE_PROFILES in .env file
# Usage: update_compose_profiles "n8n,monitoring,portainer" [env_file]
update_compose_profiles() {
local profiles="$1"
local env_file="${2:-$ENV_FILE}"
ensure_file_exists "$env_file"
if grep -q "^COMPOSE_PROFILES=" "$env_file"; then
sed -i.bak "\|^COMPOSE_PROFILES=|d" "$env_file"
rm -f "${env_file}.bak"
fi
echo "COMPOSE_PROFILES=${profiles}" >> "$env_file"
}
#=============================================================================
# DEBIAN_FRONTEND MANAGEMENT
#=============================================================================
ORIGINAL_DEBIAN_FRONTEND=""
# Save current DEBIAN_FRONTEND and set to dialog for whiptail
# Usage: save_debian_frontend
save_debian_frontend() {
ORIGINAL_DEBIAN_FRONTEND="$DEBIAN_FRONTEND"
export DEBIAN_FRONTEND=dialog
}
# Restore original DEBIAN_FRONTEND value
# Usage: restore_debian_frontend
restore_debian_frontend() {
if [[ -n "$ORIGINAL_DEBIAN_FRONTEND" ]]; then
export DEBIAN_FRONTEND="$ORIGINAL_DEBIAN_FRONTEND"
else
unset DEBIAN_FRONTEND
fi
}
#=============================================================================
# SECRET GENERATION
#=============================================================================
# Generate random string with specified characters
# Usage: gen_random 32 'A-Za-z0-9'
gen_random() {
local length="$1"
local characters="$2"
head /dev/urandom | tr -dc "$characters" | head -c "$length"
}
# Generate alphanumeric password
# Usage: gen_password 32
gen_password() {
gen_random "$1" 'A-Za-z0-9'
}
# Generate hex string
# Usage: gen_hex 64 (returns 64 hex characters)
gen_hex() {
local length="$1"
local bytes=$(( (length + 1) / 2 ))
openssl rand -hex "$bytes" | head -c "$length"
}
# Generate base64 string
# Usage: gen_base64 64 (returns 64 base64 characters)
gen_base64() {
local length="$1"
local bytes=$(( (length * 3 + 3) / 4 ))
openssl rand -base64 "$bytes" | head -c "$length"
}
# Generate bcrypt hash using Caddy
# Usage: hash=$(generate_bcrypt_hash "plaintext_password")
generate_bcrypt_hash() {
local plaintext="$1"
if [[ -n "$plaintext" ]]; then
caddy hash-password --algorithm bcrypt --plaintext "$plaintext" 2>/dev/null
fi
}
#=============================================================================
# VALIDATION
#=============================================================================
# Validate that a value is a positive integer
# Usage: validate_positive_integer "5" && echo "valid"
validate_positive_integer() {
local value="$1"
[[ "$value" =~ ^0*[1-9][0-9]*$ ]]
}
#=============================================================================
# WHIPTAIL HELPERS
#=============================================================================
# Ensure whiptail is available
require_whiptail() {
if ! command -v whiptail >/dev/null 2>&1; then
@@ -59,8 +300,9 @@ require_whiptail() {
fi
}
# Input box. Usage: wt_input "Title" "Prompt" "default"
# Echoes the input on success; returns 0 on OK, 1 on Cancel
# Input box
# Usage: result=$(wt_input "Title" "Prompt" "default")
# Returns 0 on OK, 1 on Cancel
wt_input() {
local title="$1"
local prompt="$2"
@@ -75,8 +317,9 @@ wt_input() {
return 0
}
# Password box. Usage: wt_password "Title" "Prompt"
# Echoes the input on success; returns 0 on OK, 1 on Cancel
# Password box
# Usage: result=$(wt_password "Title" "Prompt")
# Returns 0 on OK, 1 on Cancel
wt_password() {
local title="$1"
local prompt="$2"
@@ -90,7 +333,8 @@ wt_password() {
return 0
}
# Yes/No box. Usage: wt_yesno "Title" "Prompt" "default" (default: yes|no)
# Yes/No box
# Usage: wt_yesno "Title" "Prompt" "default" (default: yes|no)
# Returns 0 for Yes, 1 for No/Cancel
wt_yesno() {
local title="$1"
@@ -103,10 +347,10 @@ wt_yesno() {
fi
}
# Message box. Usage: wt_msg "Title" "Message"
# Message box
# Usage: wt_msg "Title" "Message"
wt_msg() {
local title="$1"
local message="$2"
whiptail --title "$title" --msgbox "$message" 10 80
}