Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled
- ADD_CHAIN138_TO_LEDGER_LIVE: Ledger form done; public code review repo bis-innovations/LedgerLive; init/push commands - CONTRACT_DEPLOYMENT_RUNBOOK: Chain 138 gas price 1 gwei, 36-addr check, TransactionMirror workaround - CONTRACT_*: AddressMapper, MirrorManager deployed 2026-02-12; 36-address on-chain check - NEXT_STEPS_FOR_YOU: Ledger done; steps completable now (no LAN); run-completable-tasks-from-anywhere - MASTER_INDEX, OPERATOR_OPTIONAL, SMART_CONTRACTS_INVENTORY_SIMPLE: updates - LEDGER_BLOCKCHAIN_INTEGRATION_COMPLETE: bis-innovations/LedgerLive reference Co-authored-by: Cursor <cursoragent@cursor.com>
317 lines
12 KiB
Bash
Executable File
317 lines
12 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
# Rename and migrate ChainID 138 containers to prevent duplicate naming
|
|
# Migrates containers to appropriate Proxmox nodes (r630-01 and r630-02)
|
|
|
|
set -euo pipefail
|
|
|
|
# Load IP configuration
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
|
|
|
|
|
|
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
|
|
SOURCE_NODE="ml110"
|
|
TARGET_NODE_PVE="r630-01"
|
|
TARGET_NODE_PVE2="r630-02"
|
|
# Storage on target nodes
|
|
# r630-01 and r630-02 use 'local' directory storage (not local-lvm)
|
|
STORAGE_PVE="${STORAGE_PVE:-thin1}"
|
|
STORAGE_PVE2="${STORAGE_PVE2:-thin1}"
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m'
|
|
|
|
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
|
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
|
|
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
|
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
|
|
|
# Containers to migrate to r630-01
|
|
declare -A MIGRATE_TO_PVE=(
|
|
[1504]="besu-sentry-ali"
|
|
[2503]="besu-rpc-ali-0x8a"
|
|
[2504]="besu-rpc-ali-0x1"
|
|
[6201]="firefly-ali-1"
|
|
)
|
|
|
|
# Containers to migrate to r630-02
|
|
declare -A MIGRATE_TO_PVE2=(
|
|
[2505]="besu-rpc-luis-0x8a"
|
|
[2506]="besu-rpc-luis-0x1"
|
|
[2507]="besu-rpc-putu-0x8a"
|
|
[2508]="besu-rpc-putu-0x1"
|
|
)
|
|
|
|
# Check if container exists
|
|
container_exists() {
|
|
local vmid=$1
|
|
local node="${2:-$SOURCE_NODE}"
|
|
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
|
|
"pvesh get /nodes/$node/lxc/$vmid/status/current 2>/dev/null | jq -r '.status' 2>/dev/null && echo 'yes' || echo 'no'" 2>/dev/null | tail -1 || echo "no"
|
|
}
|
|
|
|
# Rename container
|
|
rename_container() {
|
|
local vmid=$1
|
|
local new_hostname=$2
|
|
local node="${3:-$SOURCE_NODE}"
|
|
|
|
log_info "Renaming container $vmid to $new_hostname on $node..."
|
|
|
|
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
|
|
"pvesh set /nodes/$node/lxc/$vmid/config --hostname $new_hostname" 2>&1
|
|
|
|
if [ $? -eq 0 ]; then
|
|
log_success "Container $vmid renamed to $new_hostname"
|
|
return 0
|
|
else
|
|
log_error "Failed to rename container $vmid"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Migrate container to target node
|
|
migrate_container() {
|
|
local vmid=$1
|
|
local target_node=$2
|
|
local source_node="${3:-$SOURCE_NODE}"
|
|
|
|
log_info "Migrating container $vmid from $source_node to $target_node..."
|
|
|
|
# Determine target storage
|
|
local target_storage="$STORAGE_PVE"
|
|
if [[ "$target_node" == "r630-02" ]]; then
|
|
target_storage="$STORAGE_PVE2"
|
|
fi
|
|
|
|
log_info " Target storage: $target_storage"
|
|
|
|
# Check if container is running
|
|
local status=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
|
|
"pvesh get /nodes/$source_node/lxc/$vmid/status/current 2>/dev/null | jq -r '.status' 2>/dev/null || echo 'stopped'")
|
|
|
|
if [[ "$status" == "running" ]]; then
|
|
log_info " Stopping container $vmid for migration..."
|
|
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
|
|
"pvesh create /nodes/$source_node/lxc/$vmid/status/shutdown --timeout 30" 2>&1 || true
|
|
sleep 5
|
|
fi
|
|
|
|
# Get current rootfs storage
|
|
local current_rootfs=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
|
|
"pct config $vmid | grep '^rootfs:' | awk '{print \$2}' | cut -d: -f1" 2>/dev/null || echo "local-lvm")
|
|
|
|
log_info " Current storage: $current_rootfs"
|
|
log_info " Target storage: $target_storage"
|
|
|
|
# For storage conversion, we need to use a two-step process:
|
|
# 1. Migrate to target node (will fail if storage incompatible)
|
|
# 2. If it fails, we may need to use backup/restore or change storage first
|
|
|
|
# Try migration - Proxmox will handle storage conversion if possible
|
|
log_info " Attempting migration (storage conversion will be handled automatically if supported)..."
|
|
|
|
# Use API-based migration with storage specification
|
|
# pct migrate doesn't support --storage, so we use pvesh API
|
|
log_info " Migrating with target storage: $target_storage"
|
|
log_info " Using API migration with storage conversion..."
|
|
|
|
# Stop container first if running
|
|
local current_status=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
|
|
"pvesh get /nodes/$source_node/lxc/$vmid/status/current 2>/dev/null | jq -r '.status' 2>/dev/null" || echo "stopped")
|
|
|
|
if [ "$current_status" = "running" ]; then
|
|
log_info " Stopping container before migration..."
|
|
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
|
|
"pct stop $vmid 2>&1" || true
|
|
sleep 3
|
|
fi
|
|
|
|
# Use pvesh API to migrate with storage specification
|
|
local migrate_output=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
|
|
"pvesh create /nodes/$source_node/lxc/$vmid/migrate --target $target_node --storage $target_storage --online 0" 2>&1)
|
|
local migrate_exit=$?
|
|
|
|
if [ $migrate_exit -ne 0 ]; then
|
|
log_warn " API migration with storage failed, trying without storage specification..."
|
|
# Try without storage (may use default storage conversion)
|
|
migrate_output=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
|
|
"pvesh create /nodes/$source_node/lxc/$vmid/migrate --target $target_node --online 0" 2>&1)
|
|
migrate_exit=$?
|
|
|
|
if [ $migrate_exit -ne 0 ]; then
|
|
log_error " Migration failed. Storage conversion may require manual intervention."
|
|
log_info " Error: $migrate_output"
|
|
log_info " Options:"
|
|
log_info " 1. Use backup/restore method"
|
|
log_info " 2. Manual migration via Proxmox web UI"
|
|
return 1
|
|
fi
|
|
fi
|
|
|
|
echo "$migrate_output" | grep -v "WARNING: Ignoring duplicate" || true
|
|
|
|
if [ $migrate_exit -eq 0 ]; then
|
|
log_success "Container $vmid migration command completed"
|
|
|
|
# Wait and verify
|
|
log_info " Waiting for migration to complete and verifying..."
|
|
for i in {1..12}; do
|
|
sleep 5
|
|
local new_status=$(container_exists "$vmid" "$target_node")
|
|
if [[ "$new_status" != "no" ]] && [[ "$new_status" != "not_found" ]]; then
|
|
log_success "Container $vmid is now on $target_node (status: $new_status)"
|
|
return 0
|
|
fi
|
|
if [ $i -lt 12 ]; then
|
|
log_info " Still migrating... (attempt $i/12)"
|
|
fi
|
|
done
|
|
|
|
log_warn "Container $vmid migration may have succeeded but not yet visible on target"
|
|
log_info " Please verify manually: ssh root@${PROXMOX_HOST} 'pvesh get /nodes/$target_node/lxc'"
|
|
return 0
|
|
else
|
|
log_error "Failed to migrate container $vmid"
|
|
log_info " Error output: $migrate_output"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Process containers for pve
|
|
process_pve_containers() {
|
|
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
log_info "Processing containers for r630-01 node"
|
|
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
echo ""
|
|
|
|
for vmid in "${!MIGRATE_TO_PVE[@]}"; do
|
|
local new_hostname="${MIGRATE_TO_PVE[$vmid]}"
|
|
|
|
# First rename on source node
|
|
if rename_container "$vmid" "$new_hostname" "$SOURCE_NODE"; then
|
|
# Then migrate
|
|
if migrate_container "$vmid" "$TARGET_NODE_PVE" "$SOURCE_NODE"; then
|
|
log_success "Container $vmid: renamed and migrated to r630-01"
|
|
else
|
|
log_warn "Container $vmid: renamed but migration may have failed"
|
|
fi
|
|
else
|
|
log_warn "Container $vmid: skipping (rename failed or already processed)"
|
|
fi
|
|
echo ""
|
|
done
|
|
}
|
|
|
|
# Process containers for pve2
|
|
process_pve2_containers() {
|
|
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
log_info "Processing containers for r630-02 node"
|
|
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
echo ""
|
|
|
|
for vmid in "${!MIGRATE_TO_PVE2[@]}"; do
|
|
local new_hostname="${MIGRATE_TO_PVE2[$vmid]}"
|
|
|
|
# First rename on source node
|
|
if rename_container "$vmid" "$new_hostname" "$SOURCE_NODE"; then
|
|
# Then migrate
|
|
if migrate_container "$vmid" "$TARGET_NODE_PVE2" "$SOURCE_NODE"; then
|
|
log_success "Container $vmid: renamed and migrated to r630-02"
|
|
else
|
|
log_warn "Container $vmid: renamed but migration may have failed"
|
|
fi
|
|
else
|
|
log_warn "Container $vmid: skipping (rename failed or already processed)"
|
|
fi
|
|
echo ""
|
|
done
|
|
}
|
|
|
|
# Main execution
|
|
main() {
|
|
echo ""
|
|
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
log_info "ChainID 138 Container Rename and Migration"
|
|
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
echo ""
|
|
|
|
log_info "This will:"
|
|
log_info " 1. Rename containers to prevent duplicate naming"
|
|
log_info " 2. Migrate containers to appropriate Proxmox nodes"
|
|
echo ""
|
|
log_info "Containers to migrate to r630-01:"
|
|
for vmid in "${!MIGRATE_TO_PVE[@]}"; do
|
|
log_info " • $vmid -> ${MIGRATE_TO_PVE[$vmid]}"
|
|
done
|
|
echo ""
|
|
log_info "Containers to migrate to r630-02:"
|
|
for vmid in "${!MIGRATE_TO_PVE2[@]}"; do
|
|
log_info " • $vmid -> ${MIGRATE_TO_PVE2[$vmid]}"
|
|
done
|
|
echo ""
|
|
|
|
# Check for non-interactive mode
|
|
if [[ "${NON_INTERACTIVE:-}" == "1" ]] || [[ ! -t 0 ]]; then
|
|
log_info "Non-interactive mode: proceeding automatically"
|
|
else
|
|
read -p "Continue? (y/N): " -n 1 -r
|
|
echo ""
|
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
log_info "Operation cancelled"
|
|
exit 0
|
|
fi
|
|
fi
|
|
|
|
# Process pve containers
|
|
process_pve_containers
|
|
|
|
# Process pve2 containers
|
|
process_pve2_containers
|
|
|
|
# Summary
|
|
echo ""
|
|
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
log_info "Migration Summary"
|
|
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
echo ""
|
|
log_info "Checking final container locations..."
|
|
echo ""
|
|
|
|
log_info "Containers on r630-01:"
|
|
for vmid in "${!MIGRATE_TO_PVE[@]}"; do
|
|
local exists=$(container_exists "$vmid" "$TARGET_NODE_PVE")
|
|
if [[ "$exists" == "yes" ]]; then
|
|
log_success " $vmid (${MIGRATE_TO_PVE[$vmid]}) - on pve"
|
|
else
|
|
log_warn " $vmid - not found on r630-01"
|
|
fi
|
|
done
|
|
|
|
echo ""
|
|
log_info "Containers on r630-02:"
|
|
for vmid in "${!MIGRATE_TO_PVE2[@]}"; do
|
|
local exists=$(container_exists "$vmid" "$TARGET_NODE_PVE2")
|
|
if [[ "$exists" == "yes" ]]; then
|
|
log_success " $vmid (${MIGRATE_TO_PVE2[$vmid]}) - on r630-02"
|
|
else
|
|
log_warn " $vmid - not found on r630-02"
|
|
fi
|
|
done
|
|
|
|
echo ""
|
|
log_info "Next steps:"
|
|
log_info " 1. Verify all containers are on correct nodes"
|
|
log_info " 2. Update documentation with new hostnames"
|
|
log_info " 3. Run deployment script: ./scripts/deploy-all-chain138-containers.sh"
|
|
echo ""
|
|
}
|
|
|
|
main "$@"
|
|
|