- Update dbis_core, cross-chain-pmm-lps, explorer-monorepo, metamask-integration, pr-workspace/chains - Omit embedded publish git dirs and empty placeholders from index Made-with: Cursor
145 lines
4.6 KiB
Bash
Executable File
145 lines
4.6 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
# Audit storage node restrictions vs RPC VMID placement.
|
|
# Runs per-host: each VMID is checked on its actual Proxmox host (no cross-node config lookup).
|
|
#
|
|
# Usage:
|
|
# ./scripts/audit-proxmox-rpc-storage.sh
|
|
# ./scripts/audit-proxmox-rpc-storage.sh --vmid 2301
|
|
# PROXMOX_HOST=192.168.11.10 ./scripts/audit-proxmox-rpc-storage.sh # single-host mode (legacy)
|
|
|
|
set -euo pipefail
|
|
|
|
# Load shared project environment and VMID placement helper.
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
|
|
|
|
# SSH user for shell (PROXMOX_USER in .env may be root@pam for API)
|
|
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-${PROXMOX_USER:-root}}"
|
|
[[ "$PROXMOX_SSH_USER" == *"@"* ]] && PROXMOX_SSH_USER="root"
|
|
|
|
RPC_VMIDS=(2101 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403)
|
|
RPC_NODES=()
|
|
for vmid in "${RPC_VMIDS[@]}"; do
|
|
RPC_NODES+=("${vmid}:$(get_host_for_vmid "$vmid")")
|
|
done
|
|
|
|
# If PROXMOX_HOST is set, run single-host legacy mode (one host only)
|
|
PROXMOX_HOST="${PROXMOX_HOST:-}"
|
|
TARGET_VMIDS=()
|
|
|
|
usage() {
|
|
cat <<'EOF'
|
|
Usage: ./scripts/audit-proxmox-rpc-storage.sh [--vmid <N>]
|
|
|
|
Options:
|
|
--vmid <N> Limit the VMID section to one VMID; repeatable
|
|
EOF
|
|
}
|
|
|
|
while [[ $# -gt 0 ]]; do
|
|
case "$1" in
|
|
--vmid)
|
|
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
|
|
TARGET_VMIDS+=("$2")
|
|
shift 2
|
|
;;
|
|
-h|--help)
|
|
usage
|
|
exit 0
|
|
;;
|
|
*)
|
|
echo "Unknown argument: $1" >&2
|
|
usage >&2
|
|
exit 2
|
|
;;
|
|
esac
|
|
done
|
|
|
|
selected_vmid() {
|
|
local vmid="$1"
|
|
[[ ${#TARGET_VMIDS[@]} -eq 0 ]] && return 0
|
|
local wanted
|
|
for wanted in "${TARGET_VMIDS[@]}"; do
|
|
[[ "$vmid" == "$wanted" ]] && return 0
|
|
done
|
|
return 1
|
|
}
|
|
|
|
ssh_pve() {
|
|
local host="$1"
|
|
shift
|
|
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -o ConnectTimeout=5 "${PROXMOX_SSH_USER}@${host}" "$@"
|
|
}
|
|
|
|
if [[ -n "${PROXMOX_HOST:-}" ]]; then
|
|
echo "=== Proxmox RPC Storage Audit (single host: ${PROXMOX_HOST}) ==="
|
|
echo ""
|
|
NODE="$(ssh_pve "$PROXMOX_HOST" "hostname")"
|
|
echo "Node name: ${NODE}"
|
|
echo ""
|
|
echo "=== Storages active on this node (pvesm) ==="
|
|
ssh_pve "$PROXMOX_HOST" "pvesm status" | sed 's/^/ /'
|
|
echo ""
|
|
echo "=== storage.cfg: storages with node restrictions ==="
|
|
ssh_pve "$PROXMOX_HOST" "awk '
|
|
/^dir: /{s=\$2; t=\"dir\"; nodes=\"\"}
|
|
/^lvmthin: /{s=\$2; t=\"lvmthin\"; nodes=\"\"}
|
|
/^[[:space:]]*nodes /{nodes=\$2}
|
|
/^[[:space:]]*nodes /{print t \":\" s \" nodes=\" nodes}
|
|
' /etc/pve/storage.cfg" | sed 's/^/ /'
|
|
echo ""
|
|
# Single host: only VMIDs that belong on this node (we don't have full map in legacy mode, so list all and skip missing)
|
|
for entry in "${RPC_NODES[@]}"; do
|
|
IFS=: read -r vmid host <<< "$entry"
|
|
selected_vmid "$vmid" || continue
|
|
[[ "$host" != "$PROXMOX_HOST" ]] && continue
|
|
echo "--- VMID ${vmid} ---"
|
|
ssh_pve "$host" "pct status ${vmid} 2>&1; pct config ${vmid} 2>&1 | egrep -i 'hostname:|rootfs:|memory:|swap:|cores:|net0:'" | sed 's/^/ /'
|
|
echo ""
|
|
done
|
|
else
|
|
echo "=== Proxmox RPC Storage Audit (all RPC hosts) ==="
|
|
echo ""
|
|
|
|
# Collect unique hosts and show storage per host
|
|
declare -A seen_host
|
|
for entry in "${RPC_NODES[@]}"; do
|
|
IFS=: read -r vmid host <<< "$entry"
|
|
[[ -n "${seen_host[$host]:-}" ]] && continue
|
|
seen_host[$host]=1
|
|
NODE="$(ssh_pve "$host" "hostname" 2>/dev/null || echo "$host")"
|
|
echo "--- Host $host (node: $NODE) ---"
|
|
echo "Storages:"
|
|
ssh_pve "$host" "pvesm status" 2>/dev/null | sed 's/^/ /'
|
|
echo ""
|
|
done
|
|
|
|
echo "=== storage.cfg (from first host) ==="
|
|
first_host="${PROXMOX_HOST_R630_01}"
|
|
ssh_pve "$first_host" "awk '
|
|
/^dir: /{s=\$2; t=\"dir\"; nodes=\"\"}
|
|
/^lvmthin: /{s=\$2; t=\"lvmthin\"; nodes=\"\"}
|
|
/^[[:space:]]*nodes /{nodes=\$2}
|
|
/^[[:space:]]*nodes /{print t \":\" s \" nodes=\" nodes}
|
|
' /etc/pve/storage.cfg" 2>/dev/null | sed 's/^/ /'
|
|
echo ""
|
|
|
|
echo "=== RPC VMID -> rootfs storage mapping (per host) ==="
|
|
for entry in "${RPC_NODES[@]}"; do
|
|
IFS=: read -r vmid host <<< "$entry"
|
|
selected_vmid "$vmid" || continue
|
|
echo "--- VMID ${vmid} (host $host) ---"
|
|
ssh_pve "$host" "pct status ${vmid} 2>&1; pct config ${vmid} 2>&1 | egrep -i 'hostname:|rootfs:|memory:|swap:|cores:|net0:'" | sed 's/^/ /'
|
|
echo ""
|
|
done
|
|
fi
|
|
|
|
cat <<'NOTE'
|
|
NOTE:
|
|
- If a VMID uses rootfs "local-lvm:*" on a node where storage.cfg restricts "local-lvm" to other nodes,
|
|
the container may fail to start after a shutdown/reboot.
|
|
- Fix is to update /etc/pve/storage.cfg nodes=... for that storage to include the node hosting the VMID,
|
|
or migrate the VMID to an allowed node/storage.
|
|
NOTE
|