scripts: portal login, PMM mesh install, ops template audit, NPM verify, route matrix export
Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled

Made-with: Cursor
This commit is contained in:
defiQUG
2026-03-27 18:46:42 -07:00
parent 3e2d94b12d
commit bad8fdc98c
5 changed files with 490 additions and 0 deletions

View File

@@ -0,0 +1,94 @@
#!/usr/bin/env bash
# Enable working login on https://sankofa.nexus:
# - Fix Keycloak systemd (JAVA_HOME line; hostname + proxy headers for NPM).
# - Remove .env.local on CT 7801; install .env with PORTAL_LOCAL_LOGIN_* + NEXTAUTH_SECRET.
# - Run sync-sankofa-portal-7801.sh (rebuild portal with updated auth.ts).
#
# Usage: ./scripts/deployment/enable-sankofa-portal-login-7801.sh [--dry-run]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
VMID_PORTAL="${SANKOFA_PORTAL_VMID:-7801}"
VMID_KC="${SANKOFA_KEYCLOAK_VMID:-7802}"
SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=accept-new -o ConnectTimeout=15"
LOCAL_EMAIL="${PORTAL_LOCAL_LOGIN_EMAIL:-portal@sankofa.nexus}"
if [[ "${1:-}" == "--dry-run" ]]; then
echo "[DRY-RUN] Would patch Keycloak ${VMID_KC}, write .env on ${VMID_PORTAL}, sync portal"
exit 0
fi
GEN_PASS="$(openssl rand -base64 24 | tr -d '/+=' | cut -c1-24)"
NEXTAUTH_SEC="$(openssl rand -base64 32)"
ENV_TMP="$(mktemp)"
trap 'rm -f "$ENV_TMP"' EXIT
cat > "$ENV_TMP" <<EOF
NEXT_PUBLIC_GRAPHQL_ENDPOINT=http://192.168.11.50:4000/graphql
NEXT_PUBLIC_GRAPHQL_WS_ENDPOINT=ws://192.168.11.50:4000/graphql-ws
NEXTAUTH_URL=https://sankofa.nexus
NEXTAUTH_SECRET=${NEXTAUTH_SEC}
KEYCLOAK_URL=https://keycloak.sankofa.nexus
KEYCLOAK_REALM=master
KEYCLOAK_CLIENT_ID=sankofa-portal
KEYCLOAK_CLIENT_SECRET=
PORTAL_LOCAL_LOGIN_EMAIL=${LOCAL_EMAIL}
PORTAL_LOCAL_LOGIN_PASSWORD=${GEN_PASS}
PORT=3000
NODE_ENV=production
EOF
scp $SSH_OPTS "$ENV_TMP" "root@${PROXMOX_HOST}:/tmp/sankofa-portal.env"
ssh $SSH_OPTS "root@${PROXMOX_HOST}" bash -s "$VMID_KC" "$VMID_PORTAL" <<'REMOTE'
set -euo pipefail
VMID_KC="$1"
VMID_PORTAL="$2"
pct push "${VMID_PORTAL}" /tmp/sankofa-portal.env /opt/sankofa-portal/.env
rm -f /tmp/sankofa-portal.env
pct exec "${VMID_KC}" -- python3 <<'PY'
from pathlib import Path
p = Path("/etc/systemd/system/keycloak.service")
raw = p.read_text()
if 'Environment="JAVA_HOME=/usr/lib/jvm/java-21-openjdk-amd64"' not in raw:
raw = raw.replace(
'Environment="JAVA_HOME=/usr/lib/jvm/java-21-openjdk-amd64',
'Environment="JAVA_HOME=/usr/lib/jvm/java-21-openjdk-amd64"',
1,
)
if "KC_HOSTNAME=keycloak.sankofa.nexus" not in raw:
raw = raw.replace(
'Environment="KC_HTTP_PORT=8080"',
'Environment="KC_HTTP_PORT=8080"\nEnvironment="KC_HOSTNAME=keycloak.sankofa.nexus"\nEnvironment="KC_HOSTNAME_PORT=443"\nEnvironment="KC_PROXY_HEADERS=xforwarded"',
1,
)
p.write_text(raw)
PY
pct exec "${VMID_KC}" -- systemctl daemon-reload
pct exec "${VMID_KC}" -- systemctl restart keycloak
pct exec "${VMID_PORTAL}" -- rm -f /opt/sankofa-portal/.env.local
REMOTE
echo ""
echo "📤 Syncing portal source + rebuild…"
bash "${SCRIPT_DIR}/sync-sankofa-portal-7801.sh"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Sign in at https://sankofa.nexus"
echo " Email: ${LOCAL_EMAIL}"
echo " Password: ${GEN_PASS}"
echo ""
echo "SSO: Add NPM host keycloak.sankofa.nexus → ${IP_KEYCLOAK:-192.168.11.52}:8080, then create Keycloak"
echo " confidential client sankofa-portal; set KEYCLOAK_CLIENT_SECRET in .env and re-sync."
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -0,0 +1,184 @@
#!/usr/bin/env bash
# Push Chain 138 PMM mesh into Proxmox LXC and enable systemd.
# Copies: pmm-mesh-6s-automation.sh, update-oracle-price.sh, smom-dbis-138/.env, and this host's cast binary.
#
# Run from repo root (LAN + SSH root@Proxmox BatchMode). Requires: cast in PATH, smom-dbis-138/.env.
#
# Usage:
# ./scripts/deployment/install-pmm-mesh-systemd-on-proxmox-lxc.sh [--dry-run]
# Env:
# PMM_MESH_LXC_TARGETS="192.168.11.11:3500 192.168.11.12:5700"
#
# Note: Running the full mesh on multiple hosts repeats performUpkeep / oracle ticks (extra gas).
# Set ENABLE_MESH_KEEPER_TICK=0 in a drop-in if you want only one keeper driver.
#
# Hardened LXCs (e.g. unprivileged) may forbid writing /etc/systemd/system inside the guest.
# In that case this script installs chain138-pmm-mesh-pct-<VMID>.service on the Proxmox host
# so systemd runs: pct exec <VMID> -- bash …/pmm-mesh-6s-automation.sh
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="$PROJECT_ROOT/smom-dbis-138"
DRY_RUN=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true; done
TARGETS="${PMM_MESH_LXC_TARGETS:-192.168.11.11:3500 192.168.11.12:5700}"
CAST_SRC="$(command -v cast || true)"
[[ -x "$CAST_SRC" ]] || { echo "ERROR: cast not in PATH" >&2; exit 1; }
[[ -f "$SMOM/.env" ]] || { echo "ERROR: missing $SMOM/.env" >&2; exit 1; }
MESH_TGZ="$(mktemp /tmp/c138-mesh-XXXXXX.tgz)"
cleanup() { rm -f "$MESH_TGZ" 2>/dev/null || true; }
trap cleanup EXIT
tar czf "$MESH_TGZ" -C "$PROJECT_ROOT" \
smom-dbis-138/scripts/reserve/pmm-mesh-6s-automation.sh \
smom-dbis-138/scripts/update-oracle-price.sh \
smom-dbis-138/.env
log() { echo "[install-pmm-mesh] $*"; }
for pair in $TARGETS; do
host="${pair%%:*}"
vmid="${pair##*:}"
[[ -n "$host" && -n "$vmid" ]] || { log "skip bad target: $pair"; continue; }
log "root@$host VMID $vmid"
if [[ "$DRY_RUN" == true ]]; then
log "DRY-RUN: would scp + pct push $vmid + systemctl enable --now"
continue
fi
scp -o BatchMode=yes -o ConnectTimeout=20 "$MESH_TGZ" "root@${host}:/tmp/c138-mesh-install.tgz"
scp -o BatchMode=yes -o ConnectTimeout=20 "$CAST_SRC" "root@${host}:/tmp/cast-bin-lxc"
ssh -o BatchMode=yes -o ConnectTimeout=25 "root@${host}" \
"VMID=${vmid} bash -s" <<'REMOTE'
set -euo pipefail
[[ -n "${VMID:-}" ]] || exit 1
[[ -f /tmp/c138-mesh-install.tgz ]] || { echo "missing /tmp/c138-mesh-install.tgz"; exit 1; }
[[ -f /tmp/cast-bin-lxc ]] || { echo "missing /tmp/cast-bin-lxc"; exit 1; }
# Stop mesh before rm/tar so host pct unit or guest loop does not hit a missing script mid-upgrade.
systemctl stop "chain138-pmm-mesh-pct-${VMID}.service" 2>/dev/null || true
pct exec "$VMID" -- systemctl stop chain138-pmm-mesh-automation.service 2>/dev/null || true
sleep 1
pct push "$VMID" /tmp/c138-mesh-install.tgz /var/tmp/c138-mesh.tgz
pct push "$VMID" /tmp/cast-bin-lxc /var/tmp/cast-bin
# Unprivileged LXCs may have /opt and /var/lib root-owned on host as nobody: use /var/tmp (writable as CT root).
BASE=/var/tmp/chain138-mesh
pct exec "$VMID" -- mkdir -p "$BASE/bin"
pct exec "$VMID" -- rm -rf "$BASE/smom-dbis-138"
pct exec "$VMID" -- tar xzf /var/tmp/c138-mesh.tgz -C "$BASE"
if pct exec "$VMID" -- install -m 755 /var/tmp/cast-bin "$BASE/bin/cast" 2>/dev/null; then
:
else
pct exec "$VMID" -- cp /var/tmp/cast-bin "$BASE/bin/cast"
pct exec "$VMID" -- chmod 755 "$BASE/bin/cast"
fi
set +e
pct exec "$VMID" -- env DEBIAN_FRONTEND=noninteractive apt-get update -qq
A1=$?
pct exec "$VMID" -- env DEBIAN_FRONTEND=noninteractive apt-get install -y -qq curl ca-certificates >/dev/null
A2=$?
set -e
if [[ "$A1" != 0 || "$A2" != 0 ]]; then
echo "apt not usable in VMID $VMID; installing static curl into $BASE/bin/curl"
curl -fsSL "https://github.com/moparisthebest/static-curl/releases/latest/download/curl-amd64" -o "/tmp/curl-static-$VMID"
chmod 755 "/tmp/curl-static-$VMID"
pct push "$VMID" "/tmp/curl-static-$VMID" "$BASE/bin/curl"
rm -f "/tmp/curl-static-$VMID"
fi
pct exec "$VMID" -- chmod 755 "$BASE/bin/cast" 2>/dev/null || true
if pct exec "$VMID" -- test -f "$BASE/bin/curl"; then
pct exec "$VMID" -- chmod 755 "$BASE/bin/curl"
fi
pct exec "$VMID" -- env -i PATH="$BASE/bin:/usr/local/bin:/usr/bin:/bin" HOME=/tmp bash --noprofile --norc -lc 'cast --version | head -1; command -v curl >/dev/null && curl --version | head -1 || true'
HOST_UNIT="chain138-pmm-mesh-pct-${VMID}.service"
GUEST_UNIT="chain138-pmm-mesh-automation.service"
PCT_BIN="$(command -v pct)"
can_guest_systemd=false
if pct exec "$VMID" -- bash -c 't=/etc/systemd/system/.c138mesh_w; rm -f "$t"; touch "$t" && rm -f "$t"'; then
can_guest_systemd=true
fi
if [[ "$can_guest_systemd" == true ]]; then
systemctl disable --now "$HOST_UNIT" 2>/dev/null || true
rm -f "/etc/systemd/system/$HOST_UNIT"
systemctl daemon-reload 2>/dev/null || true
pct exec "$VMID" -- bash -c 'cat > /etc/systemd/system/chain138-pmm-mesh-automation.service' <<'UNITEOF'
[Unit]
Description=Chain 138 PMM mesh — oracle/keeper/WETH poll
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
Environment=PATH=/var/tmp/chain138-mesh/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
WorkingDirectory=/var/tmp/chain138-mesh/smom-dbis-138
Environment=PMM_MESH_INTERVAL_SEC=6
Environment=MESH_CAST_GAS_PRICE=2gwei
Environment=ENABLE_MESH_ORACLE_TICK=1
Environment=ENABLE_MESH_KEEPER_TICK=1
Environment=ENABLE_MESH_PMM_READS=1
Environment=ENABLE_MESH_WETH_READS=1
EnvironmentFile=-/var/tmp/chain138-mesh/smom-dbis-138/.env
ExecStart=/bin/bash /var/tmp/chain138-mesh/smom-dbis-138/scripts/reserve/pmm-mesh-6s-automation.sh
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
UNITEOF
pct exec "$VMID" -- systemctl daemon-reload
pct exec "$VMID" -- systemctl enable "$GUEST_UNIT"
pct exec "$VMID" -- systemctl restart "$GUEST_UNIT"
sleep 2
pct exec "$VMID" -- systemctl is-active "$GUEST_UNIT" || {
pct exec "$VMID" -- journalctl -u chain138-pmm-mesh-automation -n 40 --no-pager || true
exit 1
}
else
pct exec "$VMID" -- systemctl disable --now "$GUEST_UNIT" 2>/dev/null || true
pct exec "$VMID" -- rm -f "/etc/systemd/system/$GUEST_UNIT" 2>/dev/null || true
cat > "/etc/systemd/system/$HOST_UNIT" <<UNIT_HOST
[Unit]
Description=Chain 138 PMM mesh via pct into CT ${VMID}
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=${PCT_BIN} exec ${VMID} -- env PATH=${BASE}/bin:/usr/bin:/bin HOME=/tmp PMM_MESH_INTERVAL_SEC=6 MESH_CAST_GAS_PRICE=2gwei ENABLE_MESH_ORACLE_TICK=1 ENABLE_MESH_KEEPER_TICK=1 ENABLE_MESH_PMM_READS=1 ENABLE_MESH_WETH_READS=1 /bin/bash --noprofile --norc ${BASE}/smom-dbis-138/scripts/reserve/pmm-mesh-6s-automation.sh
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
UNIT_HOST
systemctl daemon-reload
systemctl enable "$HOST_UNIT"
systemctl restart "$HOST_UNIT"
sleep 2
systemctl is-active "$HOST_UNIT" || {
journalctl -u "$HOST_UNIT" -n 40 --no-pager || true
exit 1
}
fi
rm -f /tmp/c138-mesh-install.tgz /tmp/cast-bin-lxc
REMOTE
done
log "done. Guest logs: ssh root@<proxmox> \"pct exec <VMID> -- journalctl -u chain138-pmm-mesh-automation -f\""
log " Host-wrapped (hardened CT): ssh root@<proxmox> \"journalctl -u chain138-pmm-mesh-pct-<VMID> -f\""

View File

@@ -0,0 +1,100 @@
#!/usr/bin/env node
import fs from 'fs';
import path from 'path';
const projectRoot = process.cwd();
const inputPath = path.resolve(projectRoot, 'config/aggregator-route-matrix.json');
const outputPath = path.resolve(projectRoot, 'config/aggregator-route-matrix.csv');
const matrix = JSON.parse(fs.readFileSync(inputPath, 'utf8'));
const rows = [];
function csvEscape(value) {
const text = value == null ? '' : String(value);
if (text.includes('"') || text.includes(',') || text.includes('\n')) {
return `"${text.replace(/"/g, '""')}"`;
}
return text;
}
function pushRouteRow(kind, route) {
rows.push([
kind,
route.routeId,
route.status,
route.routeType,
route.fromChainId,
route.toChainId,
route.tokenInSymbol || route.assetSymbol || '',
route.tokenInAddress || route.assetAddress || '',
route.tokenOutSymbol || route.assetSymbol || '',
route.tokenOutAddress || route.assetAddress || '',
route.hopCount || '',
route.bridgeType || '',
route.bridgeAddress || '',
(route.aggregatorFamilies || []).join('|'),
(route.tags || []).join('|'),
(route.intermediateSymbols || []).join('|'),
(route.legs || []).map((leg) => leg.poolAddress || leg.executorAddress || leg.protocol || leg.kind).join('|'),
(route.notes || []).join(' | '),
]);
}
for (const route of matrix.liveSwapRoutes || []) {
pushRouteRow('liveSwapRoute', route);
}
for (const route of matrix.liveBridgeRoutes || []) {
pushRouteRow('liveBridgeRoute', route);
}
for (const route of matrix.blockedOrPlannedRoutes || []) {
rows.push([
'blockedOrPlannedRoute',
route.routeId,
route.status,
route.routeType,
route.fromChainId,
route.toChainId,
(route.tokenInSymbols || []).join('|'),
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
route.reason || '',
]);
}
const header = [
'kind',
'routeId',
'status',
'routeType',
'fromChainId',
'toChainId',
'tokenInSymbol',
'tokenInAddress',
'tokenOutSymbol',
'tokenOutAddress',
'hopCount',
'bridgeType',
'bridgeAddress',
'aggregatorFamilies',
'tags',
'intermediateSymbols',
'legRefs',
'notesOrReason',
];
const csv = [header, ...rows].map((row) => row.map(csvEscape).join(',')).join('\n') + '\n';
fs.writeFileSync(outputPath, csv, 'utf8');
console.log(`Wrote ${rows.length} rows to ${outputPath}`);

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
# NPMplus admin API on loopback :81 inside CT 10233 (r630-01).
# Ref: docs/04-configuration/NPMPLUS_QUICK_REF.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=../config
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SSH_HOST="${NPMPLUS_SSH_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
VMID="${NPMPLUS_VMID:-10233}"
SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new"
echo "NPMplus :81 check via ssh root@${SSH_HOST} pct exec ${VMID}"
# First hop only (no -L): NPM often 301/308 to HTTPS; following redirects breaks on localhost TLS.
raw="$(timeout 45 ssh $SSH_OPTS "root@${SSH_HOST}" "pct exec ${VMID} -- sh -c 'curl -s -o /dev/null -w \"%{http_code}\" --connect-timeout 5 http://127.0.0.1:81/ 2>/dev/null'" 2>/dev/null || true)"
code="$(echo "$raw" | tr -d '\r\n' | grep -oE '[0-9]{3}' | tail -1)"
[[ -n "$code" ]] || code="000"
echo "HTTP ${code}"
[[ "$code" =~ ^(2[0-9]{2}|3[0-9]{2}|401|403)$ ]] || { echo "Unexpected code (want 2xx/3xx/401/403 = reachable)"; exit 1; }
echo "OK"

View File

@@ -0,0 +1,92 @@
#!/usr/bin/env bash
# Read-only: compare expected VMIDs from config/proxmox-operational-template.json
# to live Proxmox inventory (pct/qm list) over SSH. No cluster changes.
#
# Usage (from repo root):
# bash scripts/verify/audit-proxmox-operational-template.sh
# SSH_USER=root SSH_OPTS="-o BatchMode=yes" bash scripts/verify/audit-proxmox-operational-template.sh
#
# Env:
# PROXMOX_HOSTS Space-separated IPs (default: sources config/ip-addresses.conf — ML110, R630-01, R630-02)
# SSH_USER default root
# SSH_OPTS extra ssh options (e.g. -i /path/key)
#
# Exit: 0 always (report-only). Prints [MISSING_ON_CLUSTER] / [EXTRA_ON_CLUSTER] when SSH works.
set -uo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
TEMPLATE_JSON="$PROJECT_ROOT/config/proxmox-operational-template.json"
SSH_USER="${SSH_USER:-root}"
SSH_OPTS="${SSH_OPTS:--o ConnectTimeout=6 -o StrictHostKeyChecking=accept-new}"
cd "$PROJECT_ROOT"
if ! command -v jq &>/dev/null; then
echo "[WARN] jq not installed; install jq to compare VMIDs."
exit 0
fi
if [[ ! -f "$TEMPLATE_JSON" ]]; then
echo "[ERROR] Missing $TEMPLATE_JSON"
exit 1
fi
# shellcheck source=/dev/null
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOSTS="${PROXMOX_HOSTS:-${PROXMOX_HOST_ML110:-192.168.11.10} ${PROXMOX_HOST_R630_01:-192.168.11.11} ${PROXMOX_HOST_R630_02:-192.168.11.12}}"
EXPECTED_VMIDS=$(jq -r '.services[] | select(.vmid != null) | .vmid' "$TEMPLATE_JSON" | sort -n | uniq)
echo "=== Proxmox template audit (read-only) ==="
echo "Template: $TEMPLATE_JSON"
echo "Expected VMIDs (non-null): $(echo "$EXPECTED_VMIDS" | wc -l) rows"
echo ""
ALL_LIVE=""
for h in $PROXMOX_HOSTS; do
out=$(ssh $SSH_OPTS "${SSH_USER}@${h}" "pct list 2>/dev/null | awk 'NR>1 {print \$1}'; qm list 2>/dev/null | awk 'NR>1 {print \$1}'" 2>/dev/null || true)
if [[ -z "$out" ]]; then
echo "[SKIP] No inventory from $h (SSH failed or empty)"
continue
fi
echo "--- Live inventory: $h ---"
while IFS= read -r vid; do
[[ -z "${vid:-}" ]] && continue
echo " VMID $vid"
ALL_LIVE+="$vid"$'\n'
done <<< "$out"
done
LIVE_SORTED=$(echo "$ALL_LIVE" | sed '/^$/d' | sort -n | uniq)
if [[ -z "$LIVE_SORTED" ]]; then
echo ""
echo "[INFO] No live VMIDs collected (no SSH to cluster). Run from LAN with keys to Proxmox nodes."
exit 0
fi
echo ""
echo "=== Diff (template expected vs union of live VMIDs) ==="
MISSING=0
while IFS= read -r ev; do
[[ -z "${ev:-}" ]] && continue
if ! echo "$LIVE_SORTED" | grep -qx "$ev"; then
echo "[MISSING_ON_CLUSTER] VMID $ev (in template, not seen on scanned nodes)"
MISSING=$((MISSING + 1))
fi
done <<< "$EXPECTED_VMIDS"
EXTRA=0
while IFS= read -r lv; do
[[ -z "${lv:-}" ]] && continue
if ! echo "$EXPECTED_VMIDS" | grep -qx "$lv"; then
echo "[EXTRA_ON_CLUSTER] VMID $lv (on cluster, not in template services[])"
EXTRA=$((EXTRA + 1))
fi
done <<< "$LIVE_SORTED"
echo ""
echo "Summary: missing_on_template_scan=$MISSING extra_vs_template=$EXTRA"
echo "Note: VMIDs on nodes not scanned (other hosts) appear as MISSING. Expand PROXMOX_HOSTS if needed."