- Organized 252 files across project - Root directory: 187 → 2 files (98.9% reduction) - Moved configuration guides to docs/04-configuration/ - Moved troubleshooting guides to docs/09-troubleshooting/ - Moved quick start guides to docs/01-getting-started/ - Moved reports to reports/ directory - Archived temporary files - Generated comprehensive reports and documentation - Created maintenance scripts and guides All files organized according to established standards.
315 lines
9.5 KiB
Bash
Executable File
315 lines
9.5 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
# Fix Storage Configuration for pve and pve2
|
|
# Enables storage to start stopped VMs and accept migrations
|
|
|
|
set -euo pipefail
|
|
|
|
# Configuration
|
|
PROXMOX_HOST_PVE="192.168.11.11"
|
|
PROXMOX_HOST_PVE2="192.168.11.12"
|
|
PVE_PASS="password"
|
|
PVE2_PASS="password"
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
NC='\033[0m'
|
|
|
|
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
|
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
|
|
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
|
|
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
|
log_header() { echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"; }
|
|
|
|
# SSH helper
|
|
ssh_node() {
|
|
local host=$1
|
|
local pass=$2
|
|
shift 2
|
|
sshpass -p "$pass" ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=accept-new root@"$host" "$@" 2>&1
|
|
}
|
|
|
|
# Check storage status
|
|
check_storage_status() {
|
|
local host=$1
|
|
local pass=$2
|
|
local storage=$3
|
|
|
|
local status=$(ssh_node "$host" "$pass" "pvesm status 2>/dev/null | grep '^$storage' | awk '{print \$3}'" || echo "")
|
|
echo "$status"
|
|
}
|
|
|
|
# Enable thin1 storage on pve2
|
|
fix_pve2_thin1() {
|
|
log_info "Fixing thin1 storage on pve2..."
|
|
|
|
# Check if thin1 pool exists
|
|
local pool_exists=$(ssh_node "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "lvs thin1/thin1 2>/dev/null | grep -q thin1 && echo 'yes' || echo 'no'")
|
|
|
|
if [ "$pool_exists" != "yes" ]; then
|
|
log_error "thin1 pool does not exist on pve2"
|
|
return 1
|
|
fi
|
|
|
|
log_success "thin1 pool exists on pve2"
|
|
|
|
# Check current storage status
|
|
local status=$(check_storage_status "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "thin1")
|
|
log_info "Current thin1 status: $status"
|
|
|
|
if [ "$status" == "active" ] || [ "$status" == "enabled" ]; then
|
|
log_success "thin1 storage is already active on pve2"
|
|
return 0
|
|
fi
|
|
|
|
# Remove existing disabled storage config
|
|
log_info "Removing disabled thin1 storage configuration..."
|
|
ssh_node "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "pvesm remove thin1 2>/dev/null" || true
|
|
sleep 2
|
|
|
|
# Re-add thin1 storage
|
|
log_info "Adding thin1 storage to Proxmox configuration..."
|
|
ssh_node "$PROXMOX_HOST_PVE2" "$PVE2_PASS" <<EOF
|
|
pvesm add lvmthin thin1 \
|
|
--thinpool thin1 \
|
|
--vgname thin1 \
|
|
--content images,rootdir \
|
|
--nodes pve2 2>&1
|
|
EOF
|
|
|
|
sleep 3
|
|
|
|
# Verify
|
|
local new_status=$(check_storage_status "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "thin1")
|
|
if [ "$new_status" == "active" ] || [ "$new_status" == "enabled" ]; then
|
|
log_success "thin1 storage is now active on pve2"
|
|
return 0
|
|
else
|
|
log_error "Failed to enable thin1 storage on pve2 (status: $new_status)"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Enable thin2 and thin3 on pve2
|
|
fix_pve2_thin23() {
|
|
for storage in thin2 thin3; do
|
|
log_info "Checking $storage on pve2..."
|
|
|
|
# Check if VG exists
|
|
local vg_name=$(echo "$storage" | tr '[:lower:]' '[:upper:]' | sed 's/THIN/thin/')
|
|
local vg_exists=$(ssh_node "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "vgs $vg_name 2>/dev/null | grep -q $vg_name && echo 'yes' || echo 'no'")
|
|
|
|
if [ "$vg_exists" != "yes" ]; then
|
|
log_warn "$storage volume group does not exist, skipping..."
|
|
continue
|
|
fi
|
|
|
|
# Check if pool exists
|
|
local pool_exists=$(ssh_node "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "lvs $vg_name/$storage 2>/dev/null | grep -q $storage && echo 'yes' || echo 'no'")
|
|
|
|
if [ "$pool_exists" != "yes" ]; then
|
|
log_warn "$storage pool does not exist, skipping..."
|
|
continue
|
|
fi
|
|
|
|
# Check status
|
|
local status=$(check_storage_status "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "$storage")
|
|
|
|
if [ "$status" == "active" ] || [ "$status" == "enabled" ]; then
|
|
log_success "$storage is already active"
|
|
continue
|
|
fi
|
|
|
|
# Remove and re-add
|
|
log_info "Enabling $storage storage..."
|
|
ssh_node "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "pvesm remove $storage 2>/dev/null" || true
|
|
sleep 2
|
|
|
|
ssh_node "$PROXMOX_HOST_PVE2" "$PVE2_PASS" <<EOF
|
|
pvesm add lvmthin $storage \
|
|
--thinpool $storage \
|
|
--vgname $vg_name \
|
|
--content images,rootdir \
|
|
--nodes pve2 2>&1
|
|
EOF
|
|
|
|
sleep 2
|
|
|
|
local new_status=$(check_storage_status "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "$storage")
|
|
if [ "$new_status" == "active" ] || [ "$new_status" == "enabled" ]; then
|
|
log_success "$storage is now active"
|
|
else
|
|
log_warn "$storage status: $new_status"
|
|
fi
|
|
done
|
|
}
|
|
|
|
# Fix pve storage (no VGs available, use local)
|
|
fix_pve_storage() {
|
|
log_info "Fixing storage on pve..."
|
|
|
|
# Check if local storage is active
|
|
local local_status=$(check_storage_status "$PROXMOX_HOST_PVE" "$PVE_PASS" "local")
|
|
|
|
if [ "$local_status" == "active" ]; then
|
|
log_success "local storage is active on pve (ready for migrations)"
|
|
else
|
|
log_error "local storage is not active on pve"
|
|
return 1
|
|
fi
|
|
|
|
# Check for volume groups
|
|
local vgs=$(ssh_node "$PROXMOX_HOST_PVE" "$PVE_PASS" "vgs --noheadings -o vg_name 2>/dev/null | head -1" | tr -d ' ' || echo "")
|
|
|
|
if [ -z "$vgs" ]; then
|
|
log_warn "No volume groups found on pve"
|
|
log_info "pve will use 'local' directory storage for migrations"
|
|
log_info "This is slower than LVM but works without additional setup"
|
|
return 0
|
|
else
|
|
log_info "Found volume group: $vgs on pve"
|
|
# Could create thin pools here if needed
|
|
fi
|
|
}
|
|
|
|
# Verify stopped VMs can start
|
|
verify_vm_storage() {
|
|
local host=$1
|
|
local pass=$2
|
|
local node_name=$3
|
|
|
|
log_info "Checking stopped VMs on $node_name..."
|
|
|
|
local stopped_vms=$(ssh_node "$host" "$pass" "pct list 2>/dev/null | grep 'stopped' | awk '{print \$1}'" || echo "")
|
|
|
|
if [ -z "$stopped_vms" ]; then
|
|
log_info "No stopped VMs found on $node_name"
|
|
return 0
|
|
fi
|
|
|
|
log_info "Stopped VMs: $stopped_vms"
|
|
|
|
# Check storage for each stopped VM
|
|
for vmid in $stopped_vms; do
|
|
local storage=$(ssh_node "$host" "$pass" "pct config $vmid 2>/dev/null | grep '^rootfs:' | awk '{print \$2}' | cut -d: -f1" || echo "unknown")
|
|
local status=$(check_storage_status "$host" "$pass" "$storage")
|
|
|
|
if [ "$status" == "active" ] || [ "$status" == "enabled" ]; then
|
|
log_success "VM $vmid: storage $storage is active"
|
|
else
|
|
log_warn "VM $vmid: storage $storage status is $status"
|
|
fi
|
|
done
|
|
}
|
|
|
|
# Main execution
|
|
main() {
|
|
echo ""
|
|
log_header
|
|
log_info "Proxmox Storage Fix Tool - pve and pve2"
|
|
log_header
|
|
echo ""
|
|
|
|
log_info "This script will:"
|
|
log_info " 1. Enable thin1 storage on pve2 (for stopped VMs)"
|
|
log_info " 2. Enable thin2/thin3 on pve2 if available"
|
|
log_info " 3. Verify pve storage (local) is ready for migrations"
|
|
log_info " 4. Verify stopped VMs can start"
|
|
echo ""
|
|
|
|
# Check for non-interactive mode
|
|
if [[ "${NON_INTERACTIVE:-}" != "1" ]] && [[ -t 0 ]]; then
|
|
read -p "Continue? (y/N): " -n 1 -r
|
|
echo ""
|
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
log_info "Operation cancelled"
|
|
exit 0
|
|
fi
|
|
fi
|
|
|
|
echo ""
|
|
|
|
local failed=0
|
|
|
|
# Fix pve2
|
|
log_header
|
|
log_info "FIXING pve2 STORAGE"
|
|
log_header
|
|
echo ""
|
|
|
|
if sshpass -p "$PVE2_PASS" ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=accept-new root@"$PROXMOX_HOST_PVE2" "echo 'connected'" 2>/dev/null; then
|
|
if fix_pve2_thin1; then
|
|
log_success "pve2 thin1 storage fixed"
|
|
else
|
|
log_error "Failed to fix pve2 thin1 storage"
|
|
failed=$((failed + 1))
|
|
fi
|
|
|
|
echo ""
|
|
fix_pve2_thin23
|
|
|
|
echo ""
|
|
verify_vm_storage "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "pve2"
|
|
else
|
|
log_error "Cannot connect to pve2"
|
|
failed=$((failed + 1))
|
|
fi
|
|
|
|
echo ""
|
|
|
|
# Fix pve
|
|
log_header
|
|
log_info "FIXING pve STORAGE"
|
|
log_header
|
|
echo ""
|
|
|
|
if sshpass -p "$PVE_PASS" ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=accept-new root@"$PROXMOX_HOST_PVE" "echo 'connected'" 2>/dev/null; then
|
|
if fix_pve_storage; then
|
|
log_success "pve storage verified"
|
|
else
|
|
log_error "Failed to verify pve storage"
|
|
failed=$((failed + 1))
|
|
fi
|
|
|
|
echo ""
|
|
verify_vm_storage "$PROXMOX_HOST_PVE" "$PVE_PASS" "pve"
|
|
else
|
|
log_error "Cannot connect to pve"
|
|
failed=$((failed + 1))
|
|
fi
|
|
|
|
echo ""
|
|
|
|
# Final status
|
|
log_header
|
|
log_info "FINAL STORAGE STATUS"
|
|
log_header
|
|
echo ""
|
|
|
|
log_info "pve2 storage:"
|
|
ssh_node "$PROXMOX_HOST_PVE2" "$PVE2_PASS" "pvesm status | grep -E '(thin1|thin2|thin3|local)'" || true
|
|
echo ""
|
|
|
|
log_info "pve storage:"
|
|
ssh_node "$PROXMOX_HOST_PVE" "$PVE_PASS" "pvesm status | grep -E '(local|thin)'" || true
|
|
echo ""
|
|
|
|
if [ $failed -eq 0 ]; then
|
|
log_success "Storage configuration complete!"
|
|
log_info ""
|
|
log_info "Next steps:"
|
|
log_info " 1. Start stopped VMs on pve2: pct start <VMID>"
|
|
log_info " 2. Migrate VMs to pve using 'local' storage"
|
|
echo ""
|
|
else
|
|
log_warn "Storage configuration completed with some failures"
|
|
echo ""
|
|
fi
|
|
}
|
|
|
|
main "$@"
|
|
|