2026-01-06 03:32:20 -08:00
|
|
|
#!/usr/bin/env bash
|
|
|
|
|
# Migrate containers from r630-02 thin1-r630-02 to other thin pools
|
|
|
|
|
# This addresses the critical storage issue where thin1-r630-02 is at 97.78% capacity
|
|
|
|
|
|
|
|
|
|
set -euo pipefail
|
|
|
|
|
|
2026-02-12 15:46:57 -08:00
|
|
|
# Load IP configuration
|
|
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
|
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
|
|
|
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
|
|
|
|
|
|
|
|
|
|
|
2026-01-06 03:32:20 -08:00
|
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
|
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
|
|
|
LOG_DIR="${PROJECT_ROOT}/logs/migrations"
|
|
|
|
|
LOG_FILE="${LOG_DIR}/migrate-thin1-r630-02_$(date +%Y%m%d_%H%M%S).log"
|
|
|
|
|
|
|
|
|
|
# Colors
|
|
|
|
|
RED='\033[0;31m'
|
|
|
|
|
GREEN='\033[0;32m'
|
|
|
|
|
YELLOW='\033[1;33m'
|
|
|
|
|
BLUE='\033[0;34m'
|
|
|
|
|
CYAN='\033[0;36m'
|
|
|
|
|
NC='\033[0m'
|
|
|
|
|
|
|
|
|
|
log_info() { echo -e "${BLUE}[INFO]${NC} $1" | tee -a "$LOG_FILE"; }
|
|
|
|
|
log_success() { echo -e "${GREEN}[✓]${NC} $1" | tee -a "$LOG_FILE"; }
|
|
|
|
|
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1" | tee -a "$LOG_FILE"; }
|
|
|
|
|
log_error() { echo -e "${RED}[✗]${NC} $1" | tee -a "$LOG_FILE"; }
|
|
|
|
|
log_header() { echo -e "${CYAN}=== $1 ===${NC}" | tee -a "$LOG_FILE"; }
|
|
|
|
|
|
|
|
|
|
# Create log directory
|
|
|
|
|
mkdir -p "$LOG_DIR"
|
|
|
|
|
|
|
|
|
|
# Node configuration
|
|
|
|
|
NODE="r630-02"
|
2026-02-12 15:46:57 -08:00
|
|
|
NODE_IP="${PROXMOX_HOST_R630_02}"
|
2026-01-06 03:32:20 -08:00
|
|
|
NODE_PASSWORD="password"
|
|
|
|
|
SOURCE_STORAGE="thin1-r630-02"
|
|
|
|
|
|
|
|
|
|
# Target storage pools (all empty and available)
|
|
|
|
|
TARGET_POOLS=("thin2" "thin3" "thin5" "thin6")
|
|
|
|
|
CURRENT_POOL_INDEX=0
|
|
|
|
|
|
|
|
|
|
# SSH helper
|
|
|
|
|
ssh_node() {
|
|
|
|
|
sshpass -p "$NODE_PASSWORD" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 root@"$NODE_IP" "$@"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Get next target storage pool (round-robin)
|
|
|
|
|
get_next_target_pool() {
|
|
|
|
|
local pool="${TARGET_POOLS[$CURRENT_POOL_INDEX]}"
|
|
|
|
|
CURRENT_POOL_INDEX=$(( (CURRENT_POOL_INDEX + 1) % ${#TARGET_POOLS[@]} ))
|
|
|
|
|
echo "$pool"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Check if container is running
|
|
|
|
|
is_container_running() {
|
|
|
|
|
local vmid="$1"
|
|
|
|
|
ssh_node "pct status $vmid 2>/dev/null | grep -q 'running'" && return 0 || return 1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Get container storage info
|
|
|
|
|
get_container_storage() {
|
|
|
|
|
local vmid="$1"
|
|
|
|
|
ssh_node "pct config $vmid 2>/dev/null | grep '^rootfs:' | awk -F: '{print \$2}' | awk '{print \$1}'" | cut -d: -f1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Migrate a container
|
|
|
|
|
migrate_container() {
|
|
|
|
|
local vmid="$1"
|
|
|
|
|
local target_storage="$2"
|
|
|
|
|
local container_name="$3"
|
|
|
|
|
|
|
|
|
|
log_info "Migrating container $vmid ($container_name) to $target_storage..."
|
|
|
|
|
|
|
|
|
|
# Check if container is running
|
|
|
|
|
local was_running=false
|
|
|
|
|
if is_container_running "$vmid"; then
|
|
|
|
|
was_running=true
|
|
|
|
|
log_info "Container $vmid is running, will stop before migration..."
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Stop container if running
|
|
|
|
|
if [ "$was_running" = true ]; then
|
|
|
|
|
log_info "Stopping container $vmid..."
|
|
|
|
|
ssh_node "pct stop $vmid" || {
|
|
|
|
|
log_error "Failed to stop container $vmid"
|
|
|
|
|
return 1
|
|
|
|
|
}
|
|
|
|
|
sleep 2
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Perform migration using move-volume (for same-node storage migration)
|
|
|
|
|
log_info "Moving container $vmid disk from $SOURCE_STORAGE to $target_storage..."
|
|
|
|
|
|
|
|
|
|
# Use pct move-volume for same-node storage migration
|
|
|
|
|
# Syntax: pct move-volume <vmid> <volume> [<storage>]
|
|
|
|
|
# volume is "rootfs" for the root filesystem
|
|
|
|
|
if ssh_node "pct move-volume $vmid rootfs $target_storage" >> "$LOG_FILE" 2>&1; then
|
|
|
|
|
log_success "Container $vmid disk moved successfully to $target_storage"
|
|
|
|
|
|
|
|
|
|
# Start container if it was running before
|
|
|
|
|
if [ "$was_running" = true ]; then
|
|
|
|
|
log_info "Starting container $vmid..."
|
|
|
|
|
sleep 2
|
|
|
|
|
ssh_node "pct start $vmid" || log_warn "Failed to start container $vmid (may need manual start)"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
else
|
|
|
|
|
log_error "Failed to move container $vmid disk"
|
|
|
|
|
# Try to start container if it was stopped
|
|
|
|
|
if [ "$was_running" = true ]; then
|
|
|
|
|
log_info "Attempting to restart container $vmid..."
|
|
|
|
|
ssh_node "pct start $vmid" || true
|
|
|
|
|
fi
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Verify migration
|
|
|
|
|
verify_migration() {
|
|
|
|
|
local vmid="$1"
|
|
|
|
|
local target_storage="$2"
|
|
|
|
|
|
|
|
|
|
local current_storage=$(get_container_storage "$vmid")
|
|
|
|
|
if [ "$current_storage" = "$target_storage" ]; then
|
|
|
|
|
log_success "Verified: Container $vmid is now on $target_storage"
|
|
|
|
|
return 0
|
|
|
|
|
else
|
|
|
|
|
log_error "Verification failed: Container $vmid storage is $current_storage (expected $target_storage)"
|
|
|
|
|
return 1
|
|
|
|
|
fi
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Main migration function
|
|
|
|
|
main() {
|
|
|
|
|
log_header "Migration: thin1-r630-02 to Other Thin Pools"
|
|
|
|
|
echo ""
|
|
|
|
|
|
|
|
|
|
log_info "Source Node: $NODE ($NODE_IP)"
|
|
|
|
|
log_info "Source Storage: $SOURCE_STORAGE"
|
|
|
|
|
log_info "Target Storage Pools: ${TARGET_POOLS[*]}"
|
|
|
|
|
echo ""
|
|
|
|
|
|
|
|
|
|
# Get list of containers on source storage
|
|
|
|
|
log_info "Identifying containers on $SOURCE_STORAGE..."
|
|
|
|
|
local containers=$(ssh_node "pvesm list $SOURCE_STORAGE 2>/dev/null | tail -n +2 | awk '{print \$NF}' | sort -u")
|
|
|
|
|
|
|
|
|
|
if [ -z "$containers" ]; then
|
|
|
|
|
log_warn "No containers found on $SOURCE_STORAGE"
|
|
|
|
|
return 0
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local container_list=($containers)
|
|
|
|
|
|
|
|
|
|
# Filter out containers that are already on different storage
|
|
|
|
|
local containers_to_migrate=()
|
|
|
|
|
for vmid in "${container_list[@]}"; do
|
|
|
|
|
local current_storage=$(ssh_node "pct config $vmid 2>/dev/null | grep '^rootfs:' | awk '{print \$2}' | cut -d: -f1")
|
|
|
|
|
if [ "$current_storage" = "$SOURCE_STORAGE" ]; then
|
|
|
|
|
containers_to_migrate+=("$vmid")
|
|
|
|
|
else
|
|
|
|
|
log_info "Container $vmid is already on $current_storage, skipping..."
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
if [ ${#containers_to_migrate[@]} -eq 0 ]; then
|
|
|
|
|
log_success "All containers have been migrated from $SOURCE_STORAGE"
|
|
|
|
|
return 0
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
log_info "Found ${#containers_to_migrate[@]} containers to migrate: ${containers_to_migrate[*]}"
|
|
|
|
|
echo ""
|
|
|
|
|
|
|
|
|
|
# Update container_list to only include containers that need migration
|
|
|
|
|
container_list=("${containers_to_migrate[@]}")
|
|
|
|
|
|
|
|
|
|
# Get container names
|
|
|
|
|
declare -A container_names
|
|
|
|
|
for vmid in "${container_list[@]}"; do
|
|
|
|
|
local name=$(ssh_node "pct config $vmid 2>/dev/null | grep '^hostname:' | awk '{print \$2}'" || echo "unknown")
|
|
|
|
|
container_names[$vmid]="$name"
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
# Check storage availability
|
|
|
|
|
log_info "Checking target storage availability..."
|
|
|
|
|
for pool in "${TARGET_POOLS[@]}"; do
|
|
|
|
|
local available=$(ssh_node "pvesm status $pool 2>/dev/null | awk 'NR==2 {print \$6}'" || echo "0")
|
|
|
|
|
log_info " $pool: Available"
|
|
|
|
|
done
|
|
|
|
|
echo ""
|
|
|
|
|
|
|
|
|
|
# Confirm migration
|
|
|
|
|
log_warn "This will migrate ${#container_list[@]} containers from $SOURCE_STORAGE"
|
|
|
|
|
log_info "Containers to migrate:"
|
|
|
|
|
for vmid in "${container_list[@]}"; do
|
|
|
|
|
log_info " - VMID $vmid: ${container_names[$vmid]}"
|
|
|
|
|
done
|
|
|
|
|
echo ""
|
|
|
|
|
|
|
|
|
|
# Check for --yes flag for non-interactive mode
|
|
|
|
|
local auto_confirm=false
|
|
|
|
|
if [[ "${1:-}" == "--yes" ]] || [[ "${1:-}" == "-y" ]]; then
|
|
|
|
|
auto_confirm=true
|
|
|
|
|
log_info "Auto-confirm mode enabled"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if [ "$auto_confirm" = false ]; then
|
|
|
|
|
read -p "Continue with migration? (yes/no): " confirm
|
|
|
|
|
if [ "$confirm" != "yes" ]; then
|
|
|
|
|
log_info "Migration cancelled by user"
|
|
|
|
|
return 0
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
echo ""
|
|
|
|
|
|
|
|
|
|
# Perform migrations
|
|
|
|
|
local success_count=0
|
|
|
|
|
local fail_count=0
|
|
|
|
|
|
|
|
|
|
for vmid in "${container_list[@]}"; do
|
|
|
|
|
local target_pool=$(get_next_target_pool)
|
|
|
|
|
local container_name="${container_names[$vmid]}"
|
|
|
|
|
|
|
|
|
|
log_header "Migrating Container $vmid ($container_name)"
|
|
|
|
|
|
|
|
|
|
if migrate_container "$vmid" "$target_pool" "$container_name"; then
|
|
|
|
|
if verify_migration "$vmid" "$target_pool"; then
|
|
|
|
|
((success_count++))
|
|
|
|
|
else
|
|
|
|
|
((fail_count++))
|
|
|
|
|
fi
|
|
|
|
|
else
|
|
|
|
|
((fail_count++))
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
echo ""
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
# Final summary
|
|
|
|
|
log_header "Migration Summary"
|
|
|
|
|
log_info "Total containers: ${#container_list[@]}"
|
|
|
|
|
log_success "Successfully migrated: $success_count"
|
|
|
|
|
if [ $fail_count -gt 0 ]; then
|
|
|
|
|
log_error "Failed migrations: $fail_count"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Check final storage status
|
|
|
|
|
echo ""
|
|
|
|
|
log_info "Final storage status:"
|
|
|
|
|
ssh_node "pvesm status | grep -E '(thin1-r630-02|thin2|thin3|thin5|thin6)'" | tee -a "$LOG_FILE"
|
|
|
|
|
|
|
|
|
|
echo ""
|
|
|
|
|
log_success "Migration complete! Log saved to: $LOG_FILE"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Run main function
|
|
|
|
|
main "$@"
|