Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled
- ADD_CHAIN138_TO_LEDGER_LIVE: Ledger form done; public code review repo bis-innovations/LedgerLive; init/push commands - CONTRACT_DEPLOYMENT_RUNBOOK: Chain 138 gas price 1 gwei, 36-addr check, TransactionMirror workaround - CONTRACT_*: AddressMapper, MirrorManager deployed 2026-02-12; 36-address on-chain check - NEXT_STEPS_FOR_YOU: Ledger done; steps completable now (no LAN); run-completable-tasks-from-anywhere - MASTER_INDEX, OPERATOR_OPTIONAL, SMART_CONTRACTS_INVENTORY_SIMPLE: updates - LEDGER_BLOCKCHAIN_INTEGRATION_COMPLETE: bis-innovations/LedgerLive reference Co-authored-by: Cursor <cursoragent@cursor.com>
297 lines
12 KiB
Bash
Executable File
297 lines
12 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
# Migrate RPC nodes from old VMIDs to new VMIDs
|
|
# Clones existing VMs to new VMIDs, updates network config, and deploys Besu node files
|
|
|
|
set -euo pipefail
|
|
|
|
# Load IP configuration
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
|
|
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m'
|
|
|
|
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
|
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
|
|
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
|
|
log_error() { echo -e "${RED}[✗]${NC} $1"; }
|
|
|
|
# Proxmox host
|
|
PROXMOX_HOST="${1:-192.168.11.10}"
|
|
DRY_RUN="${2:-false}"
|
|
|
|
# Migration mappings: Ordered to migrate to open VMIDs first
|
|
# Format: old_vmid:new_vmid:new_ip:new_name
|
|
# Phase 1: Migrate to completely new VMIDs (no conflicts)
|
|
# Phase 2: Migrate to VMIDs freed by Phase 1
|
|
declare -a MIGRATION_ORDER=(
|
|
# Phase 1: Migrate to open VMIDs (2101, 2201, 2301, 2303-2308, 2403)
|
|
"2500:2101:${RPC_CORE_1}:besu-rpc-core-1"
|
|
"2501:2201:${RPC_PUBLIC_1}:besu-rpc-public-1"
|
|
"2502:2301:${RPC_PRIVATE_1}:besu-rpc-private-1"
|
|
"2503:2303:${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-192.168.11.233}}}}}}}:besu-rpc-ali-0x8a"
|
|
"2504:2304:${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-192.168.11.234}}}}}}}:besu-rpc-ali-0x1"
|
|
"2505:2305:${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-192.168.11.235}}}}}}}:besu-rpc-luis-0x8a"
|
|
"2506:2306:${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-192.168.11.236}}}}}}}:besu-rpc-luis-0x1"
|
|
"2507:2307:${IP_RPC_237:-${IP_RPC_237:-${IP_RPC_237:-192.168.11.237}}}:besu-rpc-putu-0x8a"
|
|
"2508:2308:${IP_RPC_238:-${IP_RPC_238:-${IP_RPC_238:-192.168.11.238}}}:besu-rpc-putu-0x1"
|
|
"2402:2403:${RPC_THIRDWEB_3:-${RPC_THIRDWEB_3:-${RPC_THIRDWEB_3:-192.168.11.243}}}:besu-rpc-thirdweb-0x8a-3"
|
|
# Phase 2: Migrate to VMIDs freed by Phase 1 (2402 is now free)
|
|
"2401:2402:${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-192.168.11.242}}}}}}}:besu-rpc-thirdweb-0x8a-2"
|
|
# Phase 3: Migrate to VMIDs freed by Phase 2 (2401 is now free)
|
|
"2400:2401:${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-192.168.11.241}}}}}}}:besu-rpc-thirdweb-0x8a-1"
|
|
)
|
|
|
|
echo ""
|
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
echo "🔄 RPC VMID Migration Script"
|
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
echo ""
|
|
|
|
if [ "$DRY_RUN" = "true" ]; then
|
|
log_warn "DRY RUN MODE - No changes will be made"
|
|
echo ""
|
|
fi
|
|
|
|
# Check SSH access
|
|
log_info "Checking SSH access to $PROXMOX_HOST..."
|
|
if ! ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "echo 'connected'" &>/dev/null; then
|
|
log_error "Cannot access $PROXMOX_HOST via SSH"
|
|
exit 1
|
|
fi
|
|
log_success "SSH access confirmed"
|
|
echo ""
|
|
|
|
# Check if old VMID exists and get its type
|
|
check_vm_type() {
|
|
local vmid=$1
|
|
local result=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"pct list | grep -q '^$vmid ' && echo 'container' || (qm list | grep -q '^$vmid ' && echo 'vm' || echo 'not_found')" 2>/dev/null || echo "error")
|
|
echo "$result"
|
|
}
|
|
|
|
# Clone container
|
|
clone_container() {
|
|
local old_vmid=$1
|
|
local new_vmid=$2
|
|
local new_name=$3
|
|
local storage="${4:-local-lvm}"
|
|
|
|
log_info "Cloning container $old_vmid → $new_vmid ($new_name)..."
|
|
|
|
if [ "$DRY_RUN" = "true" ]; then
|
|
log_warn " [DRY RUN] Would clone: pct clone $old_vmid $new_vmid --hostname $new_name --storage $storage"
|
|
return 0
|
|
fi
|
|
|
|
# Check if new VMID already exists
|
|
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"pct list | grep -q '^$new_vmid '" 2>/dev/null; then
|
|
log_warn " VMID $new_vmid already exists, skipping clone"
|
|
return 0
|
|
fi
|
|
|
|
# Stop old container if running
|
|
local status=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"pct status $old_vmid 2>/dev/null | awk '{print \$2}'" || echo "stopped")
|
|
|
|
if [ "$status" = "running" ]; then
|
|
log_info " Stopping old container $old_vmid..."
|
|
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"pct stop $old_vmid" 2>/dev/null || true
|
|
sleep 3
|
|
fi
|
|
|
|
# Clone container
|
|
log_info " Cloning container (this may take a few minutes)..."
|
|
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"pct clone $old_vmid $new_vmid --hostname $new_name --storage $storage" 2>&1; then
|
|
log_success " ✓ Container cloned successfully"
|
|
return 0
|
|
else
|
|
log_error " ✗ Clone failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Clone VM
|
|
clone_vm() {
|
|
local old_vmid=$1
|
|
local new_vmid=$2
|
|
local new_name=$3
|
|
local storage="${4:-local-lvm}"
|
|
|
|
log_info "Cloning VM $old_vmid → $new_vmid ($new_name)..."
|
|
|
|
if [ "$DRY_RUN" = "true" ]; then
|
|
log_warn " [DRY RUN] Would clone: qm clone $old_vmid $new_vmid --name $new_name --storage $storage"
|
|
return 0
|
|
fi
|
|
|
|
# Check if new VMID already exists
|
|
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"qm list | grep -q '^$new_vmid '" 2>/dev/null; then
|
|
log_warn " VMID $new_vmid already exists, skipping clone"
|
|
return 0
|
|
fi
|
|
|
|
# Stop old VM if running
|
|
local status=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"qm status $old_vmid 2>/dev/null | awk '{print \$2}'" || echo "stopped")
|
|
|
|
if [ "$status" = "running" ]; then
|
|
log_info " Stopping old VM $old_vmid..."
|
|
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"qm stop $old_vmid" 2>/dev/null || true
|
|
sleep 5
|
|
fi
|
|
|
|
# Clone VM
|
|
log_info " Cloning VM (this may take several minutes)..."
|
|
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"qm clone $old_vmid $new_vmid --name $new_name --storage $storage --full" 2>&1; then
|
|
log_success " ✓ VM cloned successfully"
|
|
return 0
|
|
else
|
|
log_error " ✗ Clone failed"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Update network configuration for container
|
|
update_container_network() {
|
|
local vmid=$1
|
|
local new_ip=$2
|
|
|
|
log_info "Updating network configuration for container $vmid (IP: $new_ip)..."
|
|
|
|
if [ "$DRY_RUN" = "true" ]; then
|
|
log_warn " [DRY RUN] Would update network config"
|
|
return 0
|
|
fi
|
|
|
|
# Get current network config
|
|
local current_config=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"pct config $vmid | grep '^net0:'" 2>/dev/null || echo "")
|
|
|
|
if [ -z "$current_config" ]; then
|
|
log_warn " No network config found, may need manual configuration"
|
|
return 1
|
|
fi
|
|
|
|
# Extract current config parts (assuming format: net0=name=eth0,bridge=vmbr0,firewall=1,ip=192.168.11.XXX/24,gw=${NETWORK_GATEWAY:-192.168.11.1})
|
|
local bridge=$(echo "$current_config" | grep -oP 'bridge=\K[^,]+' || echo "vmbr0")
|
|
local gateway=$(echo "$current_config" | grep -oP 'gw=\K[^,]+' || echo "${NETWORK_GATEWAY:-192.168.11.1}")
|
|
|
|
# Update IP in config (format: name=eth0,bridge=vmbr0,firewall=1,ip=IP/24,gw=GATEWAY)
|
|
local new_config="name=eth0,bridge=$bridge,firewall=1,ip=$new_ip/24,gw=$gateway"
|
|
|
|
log_info " Setting network config: net0=$new_config"
|
|
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
|
|
"pct set $vmid --net0 '$new_config'" 2>&1; then
|
|
log_success " ✓ Network config updated"
|
|
return 0
|
|
else
|
|
log_error " ✗ Failed to update network config"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Update network configuration for VM
|
|
update_vm_network() {
|
|
local vmid=$1
|
|
local new_ip=$2
|
|
|
|
log_info "Updating network configuration for VM $vmid (IP: $new_ip)..."
|
|
|
|
if [ "$DRY_RUN" = "true" ]; then
|
|
log_warn " [DRY RUN] Would update network config"
|
|
return 0
|
|
fi
|
|
|
|
# For VMs, network is typically configured inside the VM
|
|
# We'll need to update it via the guest agent or manually
|
|
log_warn " VM network configuration may need manual update inside the guest OS"
|
|
log_info " Use: ssh to the VM and update /etc/netplan/*.yaml or /etc/network/interfaces"
|
|
return 0
|
|
}
|
|
|
|
# Main migration loop
|
|
migrated_count=0
|
|
failed_count=0
|
|
|
|
for migration_entry in "${MIGRATION_ORDER[@]}"; do
|
|
IFS=':' read -r old_vmid new_vmid new_ip new_name <<< "$migration_entry"
|
|
|
|
echo ""
|
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
log_info "Migrating: VMID $old_vmid → $new_vmid"
|
|
log_info " New IP: $new_ip"
|
|
log_info " New Name: $new_name"
|
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
echo ""
|
|
|
|
# Check if old VMID exists
|
|
vm_type=$(check_vm_type "$old_vmid")
|
|
|
|
if [ "$vm_type" = "not_found" ]; then
|
|
log_warn "Old VMID $old_vmid not found, skipping"
|
|
failed_count=$((failed_count + 1))
|
|
continue
|
|
fi
|
|
|
|
# Clone based on type
|
|
if [ "$vm_type" = "container" ]; then
|
|
if clone_container "$old_vmid" "$new_vmid" "$new_name"; then
|
|
if update_container_network "$new_vmid" "$new_ip"; then
|
|
log_success "✓ Migration complete for $old_vmid → $new_vmid"
|
|
migrated_count=$((migrated_count + 1))
|
|
else
|
|
log_warn "⚠ Clone succeeded but network update failed"
|
|
failed_count=$((failed_count + 1))
|
|
fi
|
|
else
|
|
log_error "✗ Migration failed for $old_vmid → $new_vmid"
|
|
failed_count=$((failed_count + 1))
|
|
fi
|
|
elif [ "$vm_type" = "vm" ]; then
|
|
if clone_vm "$old_vmid" "$new_vmid" "$new_name"; then
|
|
if update_vm_network "$new_vmid" "$new_ip"; then
|
|
log_success "✓ Migration complete for $old_vmid → $new_vmid"
|
|
migrated_count=$((migrated_count + 1))
|
|
else
|
|
log_warn "⚠ Clone succeeded but network update needs manual configuration"
|
|
migrated_count=$((migrated_count + 1))
|
|
fi
|
|
else
|
|
log_error "✗ Migration failed for $old_vmid → $new_vmid"
|
|
failed_count=$((failed_count + 1))
|
|
fi
|
|
fi
|
|
done
|
|
|
|
echo ""
|
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
log_info "Migration Summary:"
|
|
log_success " Migrated: $migrated_count"
|
|
if [ $failed_count -gt 0 ]; then
|
|
log_warn " Failed: $failed_count"
|
|
fi
|
|
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
echo ""
|
|
|
|
if [ "$DRY_RUN" = "false" ] && [ $migrated_count -gt 0 ]; then
|
|
log_info "Next Steps:"
|
|
echo " 1. Start new VMs: ssh root@$PROXMOX_HOST 'pct start <new_vmid>' or 'qm start <new_vmid>'"
|
|
echo " 2. Deploy Besu node files: bash scripts/deploy-besu-node-files.sh"
|
|
echo " 3. Verify connectivity: bash scripts/test-npmplus-full-connectivity.sh"
|
|
echo " 4. After verification, decommission old VMIDs"
|
|
echo ""
|
|
fi
|