Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled
- ADD_CHAIN138_TO_LEDGER_LIVE: Ledger form done; public code review repo bis-innovations/LedgerLive; init/push commands - CONTRACT_DEPLOYMENT_RUNBOOK: Chain 138 gas price 1 gwei, 36-addr check, TransactionMirror workaround - CONTRACT_*: AddressMapper, MirrorManager deployed 2026-02-12; 36-address on-chain check - NEXT_STEPS_FOR_YOU: Ledger done; steps completable now (no LAN); run-completable-tasks-from-anywhere - MASTER_INDEX, OPERATOR_OPTIONAL, SMART_CONTRACTS_INVENTORY_SIMPLE: updates - LEDGER_BLOCKCHAIN_INTEGRATION_COMPLETE: bis-innovations/LedgerLive reference Co-authored-by: Cursor <cursoragent@cursor.com>
298 lines
8.5 KiB
Bash
Executable File
298 lines
8.5 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
set -euo pipefail
|
|
|
|
# Load IP configuration
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
|
|
|
|
|
|
# Complete script to create RAID 10 on r630-01 using sdc-sdh
|
|
# Strategy: Use 4 available disks first, migrate data, then rebuild with all 6
|
|
|
|
set -u
|
|
|
|
TARGET_NODE="r630-01"
|
|
TARGET_NODE_IP="${PROXMOX_HOST_R630_01}"
|
|
TARGET_NODE_PASS="password"
|
|
|
|
# Colors
|
|
GREEN='\033[0;32m'
|
|
RED='\033[0;31m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m'
|
|
|
|
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
|
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
|
|
log_error() { echo -e "${RED}[✗]${NC} $1"; }
|
|
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
|
|
|
|
ssh_r630_01() {
|
|
sshpass -p "$TARGET_NODE_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 root@"$TARGET_NODE_IP" "$@" 2>&1
|
|
}
|
|
|
|
install_mdadm() {
|
|
log_info "Installing mdadm..."
|
|
ssh_r630_01 "apt-get update && apt-get install -y mdadm" || {
|
|
log_error "Failed to install mdadm"
|
|
return 1
|
|
}
|
|
log_success "mdadm installed"
|
|
return 0
|
|
}
|
|
|
|
create_raid10_4disk() {
|
|
log_info "Creating temporary RAID 10 with 4 available disks (sde-sdh)..."
|
|
|
|
# Create RAID 10 with 4 disks
|
|
ssh_r630_01 "mdadm --create /dev/md0 --level=10 --raid-devices=4 /dev/sde /dev/sdf /dev/sdg /dev/sdh" || {
|
|
log_error "Failed to create RAID 10"
|
|
return 1
|
|
}
|
|
|
|
log_success "RAID 10 created on /dev/md0"
|
|
|
|
# Wait for array to sync
|
|
log_info "Waiting for RAID array to initialize..."
|
|
local max_wait=3600 # 1 hour max
|
|
local waited=0
|
|
while [ $waited -lt $max_wait ]; do
|
|
local status=$(ssh_r630_01 "cat /proc/mdstat | grep -A 2 md0 | tail -1")
|
|
if echo "$status" | grep -q "\[UUUU\]"; then
|
|
log_success "RAID array is fully synchronized"
|
|
break
|
|
elif echo "$status" | grep -q "recovery\|resync"; then
|
|
local progress=$(echo "$status" | grep -oP '\d+\.\d+%' || echo "in progress")
|
|
log_info "RAID sync progress: $progress"
|
|
sleep 10
|
|
waited=$((waited + 10))
|
|
else
|
|
sleep 5
|
|
waited=$((waited + 5))
|
|
fi
|
|
done
|
|
|
|
# Save configuration
|
|
ssh_r630_01 "mdadm --detail --scan >> /etc/mdadm/mdadm.conf" || true
|
|
|
|
return 0
|
|
}
|
|
|
|
prepare_raid_for_lvm() {
|
|
log_info "Preparing RAID device for LVM..."
|
|
|
|
# Create physical volume
|
|
ssh_r630_01 "pvcreate /dev/md0" || {
|
|
log_error "Failed to create PV on RAID"
|
|
return 1
|
|
}
|
|
|
|
# Extend pve VG
|
|
ssh_r630_01 "vgextend pve /dev/md0" || {
|
|
log_error "Failed to extend pve VG"
|
|
return 1
|
|
}
|
|
|
|
log_success "RAID added to pve VG"
|
|
return 0
|
|
}
|
|
|
|
migrate_data_to_raid() {
|
|
log_info "Migrating data from sdc/sdd to RAID..."
|
|
|
|
# Migrate from sdc
|
|
log_info "Migrating data from sdc to RAID (this may take a while)..."
|
|
ssh_r630_01 "pvmove /dev/sdc /dev/md0" || {
|
|
log_error "Failed to migrate data from sdc"
|
|
return 1
|
|
}
|
|
|
|
# Migrate from sdd
|
|
log_info "Migrating data from sdd to RAID (this may take a while)..."
|
|
ssh_r630_01 "pvmove /dev/sdd /dev/md0" || {
|
|
log_error "Failed to migrate data from sdd"
|
|
return 1
|
|
}
|
|
|
|
log_success "Data migration completed"
|
|
return 0
|
|
}
|
|
|
|
remove_old_pvs() {
|
|
log_info "Removing sdc and sdd from pve VG..."
|
|
|
|
# Remove from VG
|
|
ssh_r630_01 "vgreduce pve /dev/sdc /dev/sdd" || {
|
|
log_error "Failed to remove PVs from VG"
|
|
return 1
|
|
}
|
|
|
|
# Remove PV labels
|
|
ssh_r630_01 "pvremove /dev/sdc /dev/sdd" || {
|
|
log_error "Failed to remove PV labels"
|
|
return 1
|
|
}
|
|
|
|
log_success "sdc and sdd removed from pve VG"
|
|
return 0
|
|
}
|
|
|
|
rebuild_raid10_6disk() {
|
|
log_info "Stopping RAID array to rebuild with all 6 disks..."
|
|
|
|
# Stop the array
|
|
ssh_r630_01 "mdadm --stop /dev/md0" || {
|
|
log_error "Failed to stop RAID array"
|
|
return 1
|
|
}
|
|
|
|
# Remove from mdadm.conf temporarily
|
|
ssh_r630_01 "sed -i '/md0/d' /etc/mdadm/mdadm.conf" || true
|
|
|
|
log_info "Creating RAID 10 with all 6 disks (sdc-sdh)..."
|
|
|
|
# Create new RAID 10 with all 6 disks
|
|
ssh_r630_01 "mdadm --create /dev/md0 --level=10 --raid-devices=6 /dev/sdc /dev/sdd /dev/sde /dev/sdf /dev/sdg /dev/sdh" || {
|
|
log_error "Failed to create RAID 10 with 6 disks"
|
|
log_warn "You may need to manually recover. The RAID with 4 disks may still be accessible."
|
|
return 1
|
|
}
|
|
|
|
log_success "RAID 10 created with all 6 disks"
|
|
|
|
# Wait for sync
|
|
log_info "Waiting for RAID array to synchronize..."
|
|
local max_wait=7200 # 2 hours max
|
|
local waited=0
|
|
while [ $waited -lt $max_wait ]; do
|
|
local status=$(ssh_r630_01 "cat /proc/mdstat | grep -A 2 md0 | tail -1")
|
|
if echo "$status" | grep -q "\[UUUUUU\]"; then
|
|
log_success "RAID array is fully synchronized"
|
|
break
|
|
elif echo "$status" | grep -q "recovery\|resync"; then
|
|
local progress=$(echo "$status" | grep -oP '\d+\.\d+%' || echo "in progress")
|
|
log_info "RAID sync progress: $progress"
|
|
sleep 30
|
|
waited=$((waited + 30))
|
|
else
|
|
sleep 10
|
|
waited=$((waited + 10))
|
|
fi
|
|
done
|
|
|
|
# Re-add to mdadm.conf
|
|
ssh_r630_01 "mdadm --detail --scan >> /etc/mdadm/mdadm.conf" || true
|
|
|
|
# Recreate PV (since device changed)
|
|
log_info "Recreating physical volume on new RAID..."
|
|
ssh_r630_01 "pvcreate --restorefile /etc/lvm/backup/pve /dev/md0" || {
|
|
log_warn "Restore file not found, creating new PV"
|
|
ssh_r630_01 "pvcreate /dev/md0" || {
|
|
log_error "Failed to create PV on new RAID"
|
|
return 1
|
|
}
|
|
}
|
|
|
|
# Extend VG (if needed)
|
|
if ! ssh_r630_01 "vgs pve | grep -q md0"; then
|
|
ssh_r630_01 "vgextend pve /dev/md0" || {
|
|
log_error "Failed to extend VG"
|
|
return 1
|
|
}
|
|
}
|
|
|
|
log_success "RAID 10 rebuild completed with all 6 disks"
|
|
return 0
|
|
}
|
|
|
|
show_final_status() {
|
|
log_info "=== Final RAID Status ==="
|
|
ssh_r630_01 "cat /proc/mdstat"
|
|
echo ""
|
|
ssh_r630_01 "mdadm --detail /dev/md0"
|
|
echo ""
|
|
log_info "=== LVM Status ==="
|
|
ssh_r630_01 "vgs pve"
|
|
ssh_r630_01 "pvs | grep pve"
|
|
}
|
|
|
|
main() {
|
|
echo ""
|
|
log_info "=== Complete RAID 10 Setup for R630-01 ==="
|
|
log_info "Strategy: Create RAID 10 with 4 disks, migrate data, rebuild with 6 disks"
|
|
log_info "Disks: sdc, sdd, sde, sdf, sdg, sdh"
|
|
echo ""
|
|
|
|
# Install mdadm
|
|
if ! install_mdadm; then
|
|
exit 1
|
|
fi
|
|
|
|
# Check current status
|
|
log_info "Current storage status:"
|
|
ssh_r630_01 "vgs pve"
|
|
ssh_r630_01 "pvs | grep pve"
|
|
echo ""
|
|
|
|
log_warn "WARNING: This process will:"
|
|
log_warn "1. Create RAID 10 with 4 disks (sde-sdh)"
|
|
log_warn "2. Migrate all data from sdc/sdd to RAID"
|
|
log_warn "3. Remove sdc/sdd from pve VG"
|
|
log_warn "4. Rebuild RAID 10 with all 6 disks"
|
|
log_warn ""
|
|
log_warn "This will take several hours and requires downtime!"
|
|
read -p "Continue? (yes/no): " confirm
|
|
if [ "$confirm" != "yes" ]; then
|
|
log_info "Operation cancelled"
|
|
exit 0
|
|
fi
|
|
|
|
# Step 1: Create RAID 10 with 4 disks
|
|
if ! create_raid10_4disk; then
|
|
log_error "Failed to create initial RAID"
|
|
exit 1
|
|
fi
|
|
|
|
# Step 2: Prepare for LVM
|
|
if ! prepare_raid_for_lvm; then
|
|
log_error "Failed to prepare RAID for LVM"
|
|
exit 1
|
|
fi
|
|
|
|
# Step 3: Migrate data
|
|
if ! migrate_data_to_raid; then
|
|
log_error "Data migration failed"
|
|
exit 1
|
|
fi
|
|
|
|
# Step 4: Remove old PVs
|
|
if ! remove_old_pvs; then
|
|
log_error "Failed to remove old PVs"
|
|
exit 1
|
|
fi
|
|
|
|
# Step 5: Rebuild with all 6 disks
|
|
log_warn "About to rebuild RAID with all 6 disks. This will stop the array temporarily."
|
|
read -p "Continue with rebuild? (yes/no): " confirm_rebuild
|
|
if [ "$confirm_rebuild" = "yes" ]; then
|
|
if ! rebuild_raid10_6disk; then
|
|
log_error "RAID rebuild failed"
|
|
exit 1
|
|
fi
|
|
else
|
|
log_warn "Skipping rebuild. RAID 10 is running with 4 disks."
|
|
fi
|
|
|
|
# Show final status
|
|
show_final_status
|
|
|
|
log_success "RAID 10 setup completed!"
|
|
log_info "RAID device: /dev/md0"
|
|
log_info "Capacity: ~700GB (RAID 10 with 6 disks)"
|
|
log_info "Performance: Excellent read/write speeds"
|
|
log_info "Redundancy: Can survive 1-3 disk failures"
|
|
}
|
|
|
|
main "$@"
|