196 lines
6.3 KiB
Bash
196 lines
6.3 KiB
Bash
|
|
#!/usr/bin/env bash
|
||
|
|
# Migrate Remaining 6 Containers from ml110 to r630-02
|
||
|
|
# Quick migration script for remaining containers
|
||
|
|
|
||
|
|
set -euo pipefail
|
||
|
|
|
||
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||
|
|
REPORT_DIR="${PROJECT_ROOT}/reports/status"
|
||
|
|
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||
|
|
MIGRATION_LOG="${REPORT_DIR}/remaining_migrations_${TIMESTAMP}.log"
|
||
|
|
|
||
|
|
# Colors
|
||
|
|
RED='\033[0;31m'
|
||
|
|
GREEN='\033[0;32m'
|
||
|
|
YELLOW='\033[1;33m'
|
||
|
|
BLUE='\033[0;34m'
|
||
|
|
CYAN='\033[0;36m'
|
||
|
|
NC='\033[0m'
|
||
|
|
|
||
|
|
log_info() { echo -e "${BLUE}[INFO]${NC} $1" | tee -a "$MIGRATION_LOG"; }
|
||
|
|
log_success() { echo -e "${GREEN}[✓]${NC} $1" | tee -a "$MIGRATION_LOG"; }
|
||
|
|
log_error() { echo -e "${RED}[✗]${NC} $1" | tee -a "$MIGRATION_LOG"; }
|
||
|
|
|
||
|
|
mkdir -p "$REPORT_DIR"
|
||
|
|
|
||
|
|
declare -A NODES
|
||
|
|
NODES[ml110]="192.168.11.10:L@kers2010"
|
||
|
|
NODES[r630-02]="192.168.11.12:password"
|
||
|
|
|
||
|
|
ssh_node() {
|
||
|
|
local hostname="$1"
|
||
|
|
shift
|
||
|
|
local ip="${NODES[$hostname]%%:*}"
|
||
|
|
local password="${NODES[$hostname]#*:}"
|
||
|
|
|
||
|
|
if command -v sshpass >/dev/null 2>&1; then
|
||
|
|
sshpass -p "$password" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 root@"$ip" "$@"
|
||
|
|
else
|
||
|
|
ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 root@"$ip" "$@"
|
||
|
|
fi
|
||
|
|
}
|
||
|
|
|
||
|
|
migrate_container() {
|
||
|
|
local vmid=$1
|
||
|
|
local name=$2
|
||
|
|
local source_node="ml110"
|
||
|
|
local target_node="r630-02"
|
||
|
|
local target_storage="thin1-r630-02"
|
||
|
|
|
||
|
|
log_info "========================================="
|
||
|
|
log_info "Migrating CT $vmid ($name)"
|
||
|
|
log_info "========================================="
|
||
|
|
|
||
|
|
# Step 1: Backup
|
||
|
|
log_info "Step 1: Creating backup..."
|
||
|
|
local backup_result=$(ssh_node "$source_node" bash <<ENDSSH
|
||
|
|
vzdump $vmid \\
|
||
|
|
--storage local \\
|
||
|
|
--compress gzip \\
|
||
|
|
--mode stop \\
|
||
|
|
--remove 0 2>&1
|
||
|
|
ENDSSH
|
||
|
|
)
|
||
|
|
|
||
|
|
if echo "$backup_result" | grep -q "error\|Error\|ERROR\|failed"; then
|
||
|
|
log_error "Backup failed for CT $vmid"
|
||
|
|
return 1
|
||
|
|
fi
|
||
|
|
|
||
|
|
local backup_file=$(ssh_node "$source_node" "ls -t /var/lib/vz/dump/vzdump-lxc-$vmid-*.tar.gz 2>/dev/null | head -1" || echo "")
|
||
|
|
if [ -z "$backup_file" ]; then
|
||
|
|
log_error "Backup file not found for CT $vmid"
|
||
|
|
return 1
|
||
|
|
fi
|
||
|
|
|
||
|
|
local backup_name=$(basename "$backup_file")
|
||
|
|
log_success "Backup created: $backup_name"
|
||
|
|
|
||
|
|
# Step 2: Copy backup
|
||
|
|
log_info "Step 2: Copying backup to $target_node..."
|
||
|
|
local source_ip="${NODES[$source_node]%%:*}"
|
||
|
|
local target_ip="${NODES[$target_node]%%:*}"
|
||
|
|
local source_password="${NODES[$source_node]#*:}"
|
||
|
|
|
||
|
|
ssh_node "$target_node" "mkdir -p /var/lib/vz/dump" || true
|
||
|
|
|
||
|
|
if command -v sshpass >/dev/null 2>&1; then
|
||
|
|
sshpass -p "$source_password" scp -o StrictHostKeyChecking=no -o ConnectTimeout=30 \
|
||
|
|
root@"$source_ip:$backup_file" \
|
||
|
|
root@"$target_ip:/var/lib/vz/dump/$backup_name" 2>&1 | tee -a "$MIGRATION_LOG"
|
||
|
|
else
|
||
|
|
scp -o StrictHostKeyChecking=no -o ConnectTimeout=30 \
|
||
|
|
root@"$source_ip:$backup_file" \
|
||
|
|
root@"$target_ip:/var/lib/vz/dump/$backup_name" 2>&1 | tee -a "$MIGRATION_LOG"
|
||
|
|
fi
|
||
|
|
|
||
|
|
if [ ${PIPESTATUS[0]} -ne 0 ]; then
|
||
|
|
log_error "Failed to copy backup for CT $vmid"
|
||
|
|
return 1
|
||
|
|
fi
|
||
|
|
|
||
|
|
log_success "Backup copied"
|
||
|
|
|
||
|
|
# Step 3: Destroy on source
|
||
|
|
log_info "Step 3: Destroying container on source..."
|
||
|
|
ssh_node "$source_node" "pct destroy $vmid --force" 2>&1 | tee -a "$MIGRATION_LOG" || log_error "Destroy failed"
|
||
|
|
sleep 3
|
||
|
|
|
||
|
|
# Step 4: Restore on target
|
||
|
|
log_info "Step 4: Restoring container on $target_node..."
|
||
|
|
local restore_result=$(ssh_node "$target_node" bash <<ENDSSH
|
||
|
|
pct restore $vmid /var/lib/vz/dump/$backup_name \\
|
||
|
|
--storage $target_storage 2>&1
|
||
|
|
ENDSSH
|
||
|
|
)
|
||
|
|
|
||
|
|
if echo "$restore_result" | grep -q "error\|Error\|ERROR\|failed"; then
|
||
|
|
log_error "Restore failed for CT $vmid: $restore_result"
|
||
|
|
return 1
|
||
|
|
fi
|
||
|
|
|
||
|
|
log_success "Container restored"
|
||
|
|
|
||
|
|
# Step 5: Start
|
||
|
|
log_info "Step 5: Starting container..."
|
||
|
|
ssh_node "$target_node" "pct start $vmid" 2>&1 | tee -a "$MIGRATION_LOG" || log_error "Start failed"
|
||
|
|
sleep 5
|
||
|
|
|
||
|
|
# Step 6: Verify
|
||
|
|
local verify_status=$(ssh_node "$target_node" "pct status $vmid 2>/dev/null | awk '{print \$2}'" || echo "not_found")
|
||
|
|
if [ "$verify_status" != "not_found" ]; then
|
||
|
|
log_success "CT $vmid migrated successfully (status: $verify_status)"
|
||
|
|
return 0
|
||
|
|
else
|
||
|
|
log_error "CT $vmid not found on target after migration"
|
||
|
|
return 1
|
||
|
|
fi
|
||
|
|
}
|
||
|
|
|
||
|
|
main() {
|
||
|
|
log_info "=== Migrating Remaining Containers ==="
|
||
|
|
log_info "Timestamp: $(date)"
|
||
|
|
echo "" | tee -a "$MIGRATION_LOG"
|
||
|
|
|
||
|
|
# Check which containers are still on ml110
|
||
|
|
log_info "Checking which containers are still on ml110..."
|
||
|
|
local containers_on_ml110=$(ssh_node "ml110" "pct list 2>/dev/null | tail -n +2 | awk '{print \$1}'" || echo "")
|
||
|
|
|
||
|
|
declare -A containers
|
||
|
|
containers[1003]="besu-validator-4"
|
||
|
|
containers[1004]="besu-validator-5"
|
||
|
|
containers[1503]="besu-sentry-4"
|
||
|
|
containers[1504]="besu-sentry-ali"
|
||
|
|
containers[2401]="besu-rpc-thirdweb-0x8a-1"
|
||
|
|
|
||
|
|
# Filter to only containers that exist on ml110
|
||
|
|
declare -A containers_to_migrate
|
||
|
|
for vmid in "${!containers[@]}"; do
|
||
|
|
if echo "$containers_on_ml110" | grep -q "^$vmid$"; then
|
||
|
|
containers_to_migrate[$vmid]="${containers[$vmid]}"
|
||
|
|
log_info "CT $vmid (${containers[$vmid]}) found on ml110 - will migrate"
|
||
|
|
else
|
||
|
|
log_info "CT $vmid (${containers[$vmid]}) not on ml110 - skipping"
|
||
|
|
fi
|
||
|
|
done
|
||
|
|
|
||
|
|
if [ ${#containers_to_migrate[@]} -eq 0 ]; then
|
||
|
|
log_info "No containers to migrate - all already on target node"
|
||
|
|
return 0
|
||
|
|
fi
|
||
|
|
|
||
|
|
local success=0
|
||
|
|
local failed=0
|
||
|
|
|
||
|
|
for vmid in "${!containers_to_migrate[@]}"; do
|
||
|
|
local name="${containers_to_migrate[$vmid]}"
|
||
|
|
if migrate_container "$vmid" "$name"; then
|
||
|
|
((success++))
|
||
|
|
log_info "Waiting 10 seconds before next migration..."
|
||
|
|
sleep 10
|
||
|
|
else
|
||
|
|
((failed++))
|
||
|
|
log_error "Failed to migrate CT $vmid"
|
||
|
|
fi
|
||
|
|
done
|
||
|
|
|
||
|
|
log_info ""
|
||
|
|
log_info "=== Migration Summary ==="
|
||
|
|
log_info "Successfully migrated: $success containers"
|
||
|
|
log_info "Failed: $failed containers"
|
||
|
|
log_info "Log file: $MIGRATION_LOG"
|
||
|
|
}
|
||
|
|
|
||
|
|
main "$@"
|