Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled
- Config, docs, scripts, and backup manifests - Submodule refs unchanged (m = modified content in submodules) Made-with: Cursor
119 lines
4.0 KiB
Bash
Executable File
119 lines
4.0 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
# Audit storage node restrictions vs RPC VMID placement.
|
|
# Runs per-host: each VMID is checked on its actual Proxmox host (no cross-node config lookup).
|
|
#
|
|
# Usage:
|
|
# ./scripts/audit-proxmox-rpc-storage.sh
|
|
# PROXMOX_HOST=192.168.11.10 ./scripts/audit-proxmox-rpc-storage.sh # single-host mode (legacy)
|
|
|
|
set -euo pipefail
|
|
|
|
# Load IP configuration
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
|
|
|
|
# SSH user for shell (PROXMOX_USER in .env may be root@pam for API)
|
|
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-${PROXMOX_USER:-root}}"
|
|
[[ "$PROXMOX_SSH_USER" == *"@"* ]] && PROXMOX_SSH_USER="root"
|
|
|
|
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
|
|
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
|
|
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
|
|
|
|
# VMID:host (same mapping as check-rpc-vms-health.sh) — only VMIDs that exist on these hosts
|
|
RPC_NODES=(
|
|
"2101:$R630_01"
|
|
"2201:$R630_02"
|
|
"2301:$ML110"
|
|
"2303:$R630_02"
|
|
"2304:$ML110"
|
|
"2305:$ML110"
|
|
"2306:$ML110"
|
|
"2307:$ML110"
|
|
"2308:$ML110"
|
|
"2400:$ML110"
|
|
"2401:$R630_02"
|
|
"2402:$ML110"
|
|
"2403:$ML110"
|
|
)
|
|
|
|
# If PROXMOX_HOST is set, run single-host legacy mode (one host only)
|
|
PROXMOX_HOST="${PROXMOX_HOST:-}"
|
|
|
|
ssh_pve() {
|
|
local host="$1"
|
|
shift
|
|
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -o ConnectTimeout=5 "${PROXMOX_SSH_USER}@${host}" "$@"
|
|
}
|
|
|
|
if [[ -n "${PROXMOX_HOST:-}" ]]; then
|
|
echo "=== Proxmox RPC Storage Audit (single host: ${PROXMOX_HOST}) ==="
|
|
echo ""
|
|
NODE="$(ssh_pve "$PROXMOX_HOST" "hostname")"
|
|
echo "Node name: ${NODE}"
|
|
echo ""
|
|
echo "=== Storages active on this node (pvesm) ==="
|
|
ssh_pve "$PROXMOX_HOST" "pvesm status" | sed 's/^/ /'
|
|
echo ""
|
|
echo "=== storage.cfg: storages with node restrictions ==="
|
|
ssh_pve "$PROXMOX_HOST" "awk '
|
|
/^dir: /{s=\$2; t=\"dir\"; nodes=\"\"}
|
|
/^lvmthin: /{s=\$2; t=\"lvmthin\"; nodes=\"\"}
|
|
/^[[:space:]]*nodes /{nodes=\$2}
|
|
/^[[:space:]]*nodes /{print t \":\" s \" nodes=\" nodes}
|
|
' /etc/pve/storage.cfg" | sed 's/^/ /'
|
|
echo ""
|
|
# Single host: only VMIDs that belong on this node (we don't have full map in legacy mode, so list all and skip missing)
|
|
for entry in "${RPC_NODES[@]}"; do
|
|
IFS=: read -r vmid host <<< "$entry"
|
|
[[ "$host" != "$PROXMOX_HOST" ]] && continue
|
|
echo "--- VMID ${vmid} ---"
|
|
ssh_pve "$host" "pct status ${vmid} 2>&1; pct config ${vmid} 2>&1 | egrep -i 'hostname:|rootfs:|memory:|swap:|cores:|net0:'" | sed 's/^/ /'
|
|
echo ""
|
|
done
|
|
else
|
|
echo "=== Proxmox RPC Storage Audit (all RPC hosts) ==="
|
|
echo ""
|
|
|
|
# Collect unique hosts and show storage per host
|
|
declare -A seen_host
|
|
for entry in "${RPC_NODES[@]}"; do
|
|
IFS=: read -r vmid host <<< "$entry"
|
|
[[ -n "${seen_host[$host]:-}" ]] && continue
|
|
seen_host[$host]=1
|
|
NODE="$(ssh_pve "$host" "hostname" 2>/dev/null || echo "$host")"
|
|
echo "--- Host $host (node: $NODE) ---"
|
|
echo "Storages:"
|
|
ssh_pve "$host" "pvesm status" 2>/dev/null | sed 's/^/ /'
|
|
echo ""
|
|
done
|
|
|
|
echo "=== storage.cfg (from first host) ==="
|
|
first_host="${R630_01}"
|
|
ssh_pve "$first_host" "awk '
|
|
/^dir: /{s=\$2; t=\"dir\"; nodes=\"\"}
|
|
/^lvmthin: /{s=\$2; t=\"lvmthin\"; nodes=\"\"}
|
|
/^[[:space:]]*nodes /{nodes=\$2}
|
|
/^[[:space:]]*nodes /{print t \":\" s \" nodes=\" nodes}
|
|
' /etc/pve/storage.cfg" 2>/dev/null | sed 's/^/ /'
|
|
echo ""
|
|
|
|
echo "=== RPC VMID -> rootfs storage mapping (per host) ==="
|
|
for entry in "${RPC_NODES[@]}"; do
|
|
IFS=: read -r vmid host <<< "$entry"
|
|
echo "--- VMID ${vmid} (host $host) ---"
|
|
ssh_pve "$host" "pct status ${vmid} 2>&1; pct config ${vmid} 2>&1 | egrep -i 'hostname:|rootfs:|memory:|swap:|cores:|net0:'" | sed 's/^/ /'
|
|
echo ""
|
|
done
|
|
fi
|
|
|
|
cat <<'NOTE'
|
|
NOTE:
|
|
- If a VMID uses rootfs "local-lvm:*" on a node where storage.cfg restricts "local-lvm" to other nodes,
|
|
the container may fail to start after a shutdown/reboot.
|
|
- Fix is to update /etc/pve/storage.cfg nodes=... for that storage to include the node hosting the VMID,
|
|
or migrate the VMID to an allowed node/storage.
|
|
NOTE
|
|
|