From e6bc7a6d7cd7e4e3805f70c0700c274e1483f6b7 Mon Sep 17 00:00:00 2001 From: defiQUG Date: Tue, 21 Apr 2026 21:56:49 -0700 Subject: [PATCH] chore(verify): PR validation workflow, pnpm Solana peer, lockfile check, backup SSH - Gitea: add validate-on-pr.yml (run-all-validation only; no deploy) - .env.master.example: document NPM_EMAIL/NPM_PASSWORD for backup-npmplus - pnpm: allowedVersions for @solana/sysvars to quiet thirdweb/x402 peer drift - AGENTS + verify README: CI pointers and .env.master.example for env - backup-npmplus: npm_lxc_ssh helper; keep prior timeout/BatchMode behavior - check-pnpm-workspace-lockfile + run-all-validation step 1b (from prior work in same commit set) Made-with: Cursor --- .env.master.example | 2 + .gitea/workflows/validate-on-pr.yml | 16 +++ AGENTS.md | 9 +- package.json | 3 +- scripts/verify/README.md | 8 +- scripts/verify/backup-npmplus.sh | 136 ++++++++++++------ .../verify/check-pnpm-workspace-lockfile.sh | 50 +++++++ scripts/verify/run-all-validation.sh | 37 ++++- 8 files changed, 214 insertions(+), 47 deletions(-) create mode 100644 .gitea/workflows/validate-on-pr.yml create mode 100755 scripts/verify/check-pnpm-workspace-lockfile.sh diff --git a/.env.master.example b/.env.master.example index d0babefd..73b5e6e4 100644 --- a/.env.master.example +++ b/.env.master.example @@ -40,6 +40,8 @@ CLOUDNS_AUTH_ID= CLOUDNS_AUTH_PASSWORD= # --- NPM / NPMplus --- +# For scripts/verify/backup-npmplus.sh: NPM_EMAIL and NPM_PASSWORD are both required +# (no in-script defaults); see AGENTS.md operator / backup row. NPM_URL= NPM_EMAIL= NPM_PASSWORD= diff --git a/.gitea/workflows/validate-on-pr.yml b/.gitea/workflows/validate-on-pr.yml new file mode 100644 index 00000000..3cb7b9d9 --- /dev/null +++ b/.gitea/workflows/validate-on-pr.yml @@ -0,0 +1,16 @@ +# PR-only: push validation already runs in deploy-to-phoenix.yml; this gives PRs the same +# no-LAN checks without the deploy job (and without deploy secrets). +name: Validate (PR) +on: + pull_request: + types: [opened, synchronize, reopened] + branches: [main, master] + workflow_dispatch: +jobs: + run-all-validation: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: run-all-validation (no LAN, no genesis) + run: bash scripts/verify/run-all-validation.sh --skip-genesis diff --git a/AGENTS.md b/AGENTS.md index 681d6312..8131dc5a 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -17,6 +17,8 @@ Orchestration for Proxmox VE, Chain 138 (`smom-dbis-138/`), explorers, NPMplus, | Ops template + JSON | `docs/03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md`, `config/proxmox-operational-template.json` | | Live vs template (read-only SSH) | `bash scripts/verify/audit-proxmox-operational-template.sh` | | Config validation | `bash scripts/validation/validate-config-files.sh` | +| pnpm lockfile vs workspace (prevents `pnpm outdated` / importer bugs) | `bash scripts/verify/check-pnpm-workspace-lockfile.sh` — also run as **step 1b** in `run-all-validation.sh` | +| CI validation (no LAN) + cW* mesh matrix | `bash scripts/verify/run-all-validation.sh [--skip-genesis]` — same gate as **Gitea** push/PR: `run-all-validation` in `.gitea/workflows/deploy-to-phoenix.yml` (push) and `.gitea/workflows/validate-on-pr.yml` (PR only). Steps: dependencies, **pnpm workspace/lockfile check**, config, cW* mesh (when pair-discovery exists), **`node cross-chain-pmm-lps/scripts/validate-deployment-status.cjs`**, optional genesis. Manual only: `bash scripts/verify/build-cw-mesh-deployment-matrix.sh [--json-out …]` | | FQDN / NPM E2E verifier | `bash scripts/verify/verify-end-to-end-routing.sh --profile=public` — inventory: `docs/04-configuration/E2E_ENDPOINTS_LIST.md`. Gitea Actions URLs (no API): `bash scripts/verify/print-gitea-actions-urls.sh` | | Submodule trees clean (CI / post-merge) | `bash scripts/verify/submodules-clean.sh` | | Submodule + explorer remotes | `docs/00-meta/SUBMODULE_HYGIENE.md` | @@ -27,7 +29,12 @@ Orchestration for Proxmox VE, Chain 138 (`smom-dbis-138/`), explorers, NPMplus, | The Order portal (`https://the-order.sankofa.nexus`) | OSJ management UI (secure auth); source repo **the_order** at `~/projects/the_order`. NPM upstream defaults to **order-haproxy** CT **10210** (`IP_ORDER_HAPROXY:80`); use `THE_ORDER_UPSTREAM_*` to point at the Sankofa portal if 10210 is down. Provision HAProxy: `scripts/deployment/provision-order-haproxy-10210.sh`. **`www.the-order.sankofa.nexus`** → **301** apex (same as www.sankofa / www.phoenix). | | Portal login + Keycloak systemd + `.env` (prints password once) | `./scripts/deployment/enable-sankofa-portal-login-7801.sh` (`--dry-run` first) | | Completable (no LAN) | `./scripts/run-completable-tasks-from-anywhere.sh` | -| Operator (LAN + secrets) | `./scripts/run-all-operator-tasks-from-lan.sh` (use `--skip-backup` if `NPM_PASSWORD` unset) | +| smom-dbis-138 root `forge test` | Uses `foundry.toml` `[profile.default] skip` for legacy Uniswap V2 vendor trees (0.5/0.6 solc); scoped work still uses `bash scripts/forge/scope.sh …` | +| cWUSDT Mainnet USD pricing (on-chain + runbook) | `./scripts/deployment/price-cw-token-mainnet.sh` — `docs/03-deployment/CW_TOKEN_USD_PRICING_RUNBOOK.md` | +| Deployer LP balances (mesh inventory) | `python3 scripts/deployment/check-deployer-lp-balances.py` — scans `deployment-status.json` + `reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json`; **UniV2** `lpToken` = pair; **DODO DVM** LP shares = `balanceOf(pool)`; on failure, probes `_BASE_TOKEN_` / `_BASE_CAPITAL_TOKEN_` / `_QUOTE_CAPITAL_TOKEN_` + extra public RPCs (`--no-resolve-dodo` skips; `--chain-id N` for one chain). JSON: `lpTokenAddress`, `lpResolution`, `lpBalances[]`. Use `--deployer` / `DEPLOYER_ADDRESS` if no `PRIVATE_KEY` | +| Etherscan Value $0 for Mainnet `cW*` | Listing path (CoinGecko/CMC), not a contract toggle — `docs/04-configuration/coingecko/ETHERSCAN_USD_VALUE_MAINNET_TOKENS.md` | +| Verify contracts on explorers (all networks) | `cd smom-dbis-138 && ./scripts/deployment/verify-all-networks-explorers.sh` — Blockscout 138, Etherscan + multichain `cW*`, Avax/Arb bridges, optional Cronos/Wemix/CCIPLogger | +| Operator (LAN + secrets) | `./scripts/run-all-operator-tasks-from-lan.sh` (use `--skip-backup` if `NPM_PASSWORD` unset; backup also needs `NPM_EMAIL` in `.env`) | | Cloudflare bulk DNS → `PUBLIC_IP` | `./scripts/update-all-dns-to-public-ip.sh` — use **`--dry-run`** and **`--zone-only=sankofa.nexus`** (or `d-bis.org` / `mim4u.org` / `defi-oracle.io`) to limit scope; see script header. Prefer scoped **`CLOUDFLARE_API_TOKEN`** (see `.env.master.example`). | ## Git submodules diff --git a/package.json b/package.json index 03b4d381..4388c64d 100644 --- a/package.json +++ b/package.json @@ -56,7 +56,8 @@ "pnpm": { "peerDependencyRules": { "allowedVersions": { - "zod": "4" + "zod": "4", + "@solana/sysvars": "5" } }, "ignoredBuiltDependencies": [ diff --git a/scripts/verify/README.md b/scripts/verify/README.md index 5f81c163..59901993 100644 --- a/scripts/verify/README.md +++ b/scripts/verify/README.md @@ -29,9 +29,13 @@ One-line install (Debian/Ubuntu): `sudo apt install -y sshpass rsync dnsutils ip - `backup-npmplus.sh` - Full NPMplus backup (database, API exports, certificates) - `check-contracts-on-chain-138.sh` - Check that Chain 138 deployed contracts have bytecode on-chain (`cast code` for 31 addresses; requires `cast` and RPC access). Use `[RPC_URL]` or env `RPC_URL_138`; `--dry-run` lists addresses only (no RPC calls); `SKIP_EXIT=1` to exit 0 when RPC unreachable. +- `snapshot-mainnet-cwusdc-usdc-preflight.sh` - Read-only preflight snapshot for the Mainnet `cWUSDC/USDC` rail. Captures public-pair drift, defended DODO reserves, treasury-manager quote availability, receiver surplus, and defended-lane quote sizing into `reports/status/`. +- `plan-mainnet-cwusdc-usdc-repeg.sh` - Read-only repeg planner for the Mainnet `cWUSDC/USDC` rail. Consumes the latest preflight snapshot, computes defended-pool reserve-gap sizing, public-pair shortfalls, operator-wallet coverage, and emits copy-paste operator commands into `reports/status/`. +- `build-cw-mesh-deployment-matrix.sh` - Read-only merge of `cross-chain-pmm-lps/config/deployment-status.json` and `reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json` into a per-chain table (stdout markdown; optional `--json-out reports/status/cw-mesh-deployment-matrix-latest.json`). No RPC. Invoked from `run-all-validation.sh` when the discovery JSON is present. - `reconcile-env-canonical.sh` - Emit recommended .env lines for Chain 138 (canonical source of truth); use to reconcile `smom-dbis-138/.env` with [CONTRACT_ADDRESSES_REFERENCE](../../docs/11-references/CONTRACT_ADDRESSES_REFERENCE.md). Usage: `./scripts/verify/reconcile-env-canonical.sh [--print]` - `check-deployer-balance-blockscout-vs-rpc.sh` - Compare deployer native balance from Blockscout API vs RPC (to verify index matches current chain); see [EXPLORER_AND_BLOCKSCAN_REFERENCE](../../docs/11-references/EXPLORER_AND_BLOCKSCAN_REFERENCE.md) - `check-dependencies.sh` - Verify required tools (bash, curl, jq, openssl, ssh) +- `check-pnpm-workspace-lockfile.sh` - Ensures every path in `pnpm-workspace.yaml` has an `importer` in `pnpm-lock.yaml` (run `pnpm install` at root if it fails; avoids broken `pnpm outdated -r`) - `export-cloudflare-dns-records.sh` - Export Cloudflare DNS records - `export-npmplus-config.sh` - Export NPMplus proxy hosts and certificates via API - `generate-source-of-truth.sh` - Combine verification outputs into canonical JSON @@ -43,9 +47,9 @@ One-line install (Debian/Ubuntu): `sudo apt install -y sshpass rsync dnsutils ip ## Task runners (no LAN vs from LAN) -- **From anywhere (no LAN/creds):** `../run-completable-tasks-from-anywhere.sh` — runs config validation, on-chain contract check, run-all-validation --skip-genesis, and reconcile-env-canonical. +- **From anywhere (no LAN/creds):** `../run-completable-tasks-from-anywhere.sh` — runs config validation, on-chain contract check, `run-all-validation.sh --skip-genesis` (includes cW* mesh matrix when `reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json` exists), and reconcile-env-canonical. On Gitea, the same `run-all-validation` gate runs on **push** (in `deploy-to-phoenix` before deploy) and on **PRs** (`.gitea/workflows/validate-on-pr.yml` only, no deploy). - **From LAN (NPM_PASSWORD, optional PRIVATE_KEY):** `../run-operator-tasks-from-lan.sh` — runs W0-1 (NPMplus RPC fix), W0-3 (NPMplus backup), O-1 (Blockscout verification); use `--dry-run` to print commands only. See [ALL_TASKS_DETAILED_STEPS](../../docs/00-meta/ALL_TASKS_DETAILED_STEPS.md). ## Environment -Set variables in `.env` or export before running. See project root `.env.example` and [docs/04-configuration/VERIFICATION_GAPS_AND_TODOS.md](../../docs/04-configuration/VERIFICATION_GAPS_AND_TODOS.md). +Set variables in `.env` (from `.env.master.example` at repo root) or export before running. [docs/04-configuration/VERIFICATION_GAPS_AND_TODOS.md](../../docs/04-configuration/VERIFICATION_GAPS_AND_TODOS.md). NPM `NPM_EMAIL` + `NPM_PASSWORD` (see that template’s NPM / NPMplus section) are required for `backup-npmplus.sh` API steps. diff --git a/scripts/verify/backup-npmplus.sh b/scripts/verify/backup-npmplus.sh index 43e22e77..9135f9c0 100755 --- a/scripts/verify/backup-npmplus.sh +++ b/scripts/verify/backup-npmplus.sh @@ -40,9 +40,15 @@ fi NPMPLUS_VMID="${NPMPLUS_VMID:-${NPM_VMID:-10233}}" NPMPLUS_HOST="${NPMPLUS_HOST:-${NPM_PROXMOX_HOST:-${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}}}" NPM_URL="${NPM_URL:-https://${IP_NPMPLUS:-${IP_NPMPLUS:-192.168.11.167}}:81}" -NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}" +# Set NPM_EMAIL in .env (no default — avoids baking personal addresses in repo defaults) +NPM_EMAIL="${NPM_EMAIL:-}" NPM_PASSWORD="${NPM_PASSWORD:-}" +# Proxmox host: SSH to node running NPM LXC (consistent timeouts, BatchMode for automation) +npm_lxc_ssh() { + ssh -o ConnectTimeout=15 -o BatchMode=yes "root@$NPMPLUS_HOST" "$@" +} + DRY_RUN=false [[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true @@ -57,11 +63,16 @@ echo "💾 NPMplus Backup Script" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "" -# Validate NPM password (skip for dry-run) -if [ -z "$NPM_PASSWORD" ] && [[ "$DRY_RUN" != true ]]; then - log_error "NPM_PASSWORD environment variable is required" - log_info "Set it in .env file or export it before running this script" +# Validate NPM password and API identity (skip for dry-run) +if [[ "$DRY_RUN" != true ]]; then + if [ -z "$NPM_PASSWORD" ]; then + log_error "NPM_PASSWORD is required (set in .env or export before running)" exit 1 + fi + if [ -z "$NPM_EMAIL" ]; then + log_error "NPM_EMAIL is required for API export steps (set in .env; no default)" + exit 1 + fi fi if [[ "$DRY_RUN" == true ]]; then @@ -79,29 +90,56 @@ log_info "Step 1: Backing up NPMplus database..." DB_BACKUP_DIR="$BACKUP_DIR/database" mkdir -p "$DB_BACKUP_DIR" +# Discover database.sqlite (path differs across images / mount layouts) +NPM_DB_PATH=$( + npm_lxc_ssh "pct exec $NPMPLUS_VMID -- sh -c ' + for p in /data/database.sqlite /data/database/database.sqlite; do + [ -f \"\$p\" ] && { echo \"\$p\"; exit 0; } + done + f=\$(find /data -maxdepth 6 -name database.sqlite 2>/dev/null | head -1) + if [ -n \"\$f\" ] && [ -f \"\$f\" ]; then echo \"\$f\"; else echo \"\"; fi + '" 2>/dev/null | tr -d '\r' || true +) +NPM_DB_PATH="${NPM_DB_PATH//$'\n'/}" +if [ -n "$NPM_DB_PATH" ]; then + log_info " Found DB at: $NPM_DB_PATH (inside LXC)" +else + log_info " No database.sqlite at common paths; dump may be skipped (check container / mounts)" +fi # Method 1: SQL dump log_info " Creating SQL dump..." -ssh root@"$NPMPLUS_HOST" "pct exec $NPMPLUS_VMID -- bash -c ' - if [ -f /data/database.sqlite ]; then - sqlite3 /data/database.sqlite \".dump\" > /tmp/npm-database.sql 2>/dev/null || echo \"Database export may have issues\" - cat /tmp/npm-database.sql +NPM_DB_PQ="" +[ -n "$NPM_DB_PATH" ] && NPM_DB_PQ=$(printf %q "$NPM_DB_PATH") +if [ -n "$NPM_DB_PATH" ]; then + npm_lxc_ssh "pct exec $NPMPLUS_VMID -- sh -c \"if command -v sqlite3 >/dev/null 2>&1; then sqlite3 $NPM_DB_PQ .dump; else echo _NO_SQLITE3; fi\"" \ + > "$DB_BACKUP_DIR/database.sql" 2>/dev/null || : > "$DB_BACKUP_DIR/database.sql" + if grep -qxF '_NO_SQLITE3' "$DB_BACKUP_DIR/database.sql" 2>/dev/null; then + : > "$DB_BACKUP_DIR/database.sql" + fi +else + : > "$DB_BACKUP_DIR/database.sql" +fi + +# Method 2: Direct file copy (binary; complements SQL) +if [ -n "$NPM_DB_PATH" ]; then + log_info " Copying database file (binary)..." + if ! npm_lxc_ssh "pct exec $NPMPLUS_VMID -- sh -c \"cat $NPM_DB_PQ\"" > "$DB_BACKUP_DIR/database.sqlite" 2>/dev/null; then + if [ -s "$DB_BACKUP_DIR/database.sql" ] && grep -qiE 'CREATE|INSERT|PRAGMA' "$DB_BACKUP_DIR/database.sql" 2>/dev/null; then + log_info " Skipped or failed binary copy; SQL dump contains schema/data and is usable for restore" else - echo \"Database file not found\" + log_warn " Direct copy failed and SQL dump empty or unusable — check LXC/SSH, sqlite3, or volume mount" fi -'" > "$DB_BACKUP_DIR/database.sql" || { - log_warn " SQL dump failed, trying direct copy..." -} - -# Method 2: Direct file copy -log_info " Copying database file..." -ssh root@"$NPMPLUS_HOST" "pct exec $NPMPLUS_VMID -- cat /data/database.sqlite" > "$DB_BACKUP_DIR/database.sqlite" 2>/dev/null || { - log_warn " Direct copy failed - database may not exist or container may be down" -} - -if [ -s "$DB_BACKUP_DIR/database.sql" ] || [ -s "$DB_BACKUP_DIR/database.sqlite" ]; then + else + log_info " Binary database copy OK" + fi +else + : > "$DB_BACKUP_DIR/database.sqlite" + log_info " Skipping binary copy (no DB file resolved)" +fi +if ( [ -s "$DB_BACKUP_DIR/database.sql" ] && grep -qiE 'CREATE|INSERT|PRAGMA' "$DB_BACKUP_DIR/database.sql" 2>/dev/null ) || [ -s "$DB_BACKUP_DIR/database.sqlite" ]; then log_success " Database backup completed" else - log_warn " Database backup may be empty - check container status" + log_warn " Database backup empty — LXC not reachable, DB path changed, or sqlite3 missing in container" fi # Step 2: Export Proxy Hosts via API @@ -152,11 +190,24 @@ log_info "Step 3: Backing up certificate files..." CERT_BACKUP_DIR="$BACKUP_DIR/certificates" mkdir -p "$CERT_BACKUP_DIR" -# List all certificates -log_info " Listing certificates..." -ssh root@"$NPMPLUS_HOST" "pct exec $NPMPLUS_VMID -- ls -1 /data/tls/certbot/live/ 2>/dev/null" > "$CERT_BACKUP_DIR/cert_list.txt" 2>/dev/null || { - log_warn " Could not list certificates - path may differ" -} +CERT_LIVE_BASE="" +for _try in /data/tls/certbot/live /etc/letsencrypt/live /data/letsencrypt/live; do + if npm_lxc_ssh "pct exec $NPMPLUS_VMID -- test -d '$_try'" 2>/dev/null; then + CERT_LIVE_BASE="$_try" + break + fi +done + +# List all certificate directories (NPM+ Certbot / Linux layouts) +if [ -n "$CERT_LIVE_BASE" ]; then + log_info " Listing certificates (from $CERT_LIVE_BASE)..." + npm_lxc_ssh "pct exec $NPMPLUS_VMID -- ls -1 $CERT_LIVE_BASE/ 2>/dev/null" | grep -v '^lost+found$' | grep -v '^$' > "$CERT_BACKUP_DIR/cert_list.txt" 2>/dev/null || { + : > "$CERT_BACKUP_DIR/cert_list.txt" + } +else + : > "$CERT_BACKUP_DIR/cert_list.txt" + log_info " No Certbot/letsencrypt live dir in common paths; PEM files may be elsewhere or API-only" +fi # Copy certificate files if [ -s "$CERT_BACKUP_DIR/cert_list.txt" ]; then @@ -164,22 +215,25 @@ if [ -s "$CERT_BACKUP_DIR/cert_list.txt" ]; then while IFS= read -r cert_dir; do if [ -n "$cert_dir" ] && [ "$cert_dir" != "lost+found" ]; then mkdir -p "$CERT_BACKUP_DIR/$cert_dir" - - # Copy fullchain.pem - ssh root@"$NPMPLUS_HOST" "pct exec $NPMPLUS_VMID -- cat /data/tls/certbot/live/$cert_dir/fullchain.pem" > "$CERT_BACKUP_DIR/$cert_dir/fullchain.pem" 2>/dev/null || { + npm_lxc_ssh "pct exec $NPMPLUS_VMID -- cat $CERT_LIVE_BASE/$cert_dir/fullchain.pem" > "$CERT_BACKUP_DIR/$cert_dir/fullchain.pem" 2>/dev/null || { log_warn " Failed to copy fullchain.pem for $cert_dir" } - - # Copy privkey.pem - ssh root@"$NPMPLUS_HOST" "pct exec $NPMPLUS_VMID -- cat /data/tls/certbot/live/$cert_dir/privkey.pem" > "$CERT_BACKUP_DIR/$cert_dir/privkey.pem" 2>/dev/null || { + npm_lxc_ssh "pct exec $NPMPLUS_VMID -- cat $CERT_LIVE_BASE/$cert_dir/privkey.pem" > "$CERT_BACKUP_DIR/$cert_dir/privkey.pem" 2>/dev/null || { log_warn " Failed to copy privkey.pem for $cert_dir" } fi done < "$CERT_BACKUP_DIR/cert_list.txt" - - log_success " Certificate files backed up" + log_success " Certificate files backed up (where readable)" else - log_warn " No certificates found to backup" + API_CERT_N=0 + if [ -f "$API_BACKUP_DIR/certificates.json" ] && command -v jq >/dev/null 2>&1; then + API_CERT_N=$(jq 'if type == "array" then length else 0 end' "$API_BACKUP_DIR/certificates.json" 2>/dev/null) || API_CERT_N=0 + fi + if [ "$API_CERT_N" -gt 0 ] 2>/dev/null; then + log_info " No on-disk cert dirs listed; $API_CERT_N certificate(s) in api/certificates.json (PEMs may be internal only)" + else + log_info " No separate PEM backup (none listed or API export missing)" + fi fi # Step 4: Backup Docker Volume (if accessible) @@ -187,10 +241,12 @@ log_info "Step 4: Attempting Docker volume backup..." VOLUME_BACKUP_DIR="$BACKUP_DIR/volumes" mkdir -p "$VOLUME_BACKUP_DIR" -# Try to export Docker volume -ssh root@"$NPMPLUS_HOST" "pct exec $NPMPLUS_VMID -- docker volume ls" > "$VOLUME_BACKUP_DIR/volume_list.txt" 2>/dev/null || { - log_warn " Could not list Docker volumes" -} +# Optional: list Docker volumes if docker exists in the LXC +if npm_lxc_ssh "pct exec $NPMPLUS_VMID -- docker volume ls" > "$VOLUME_BACKUP_DIR/volume_list.txt" 2>/dev/null; then + log_info " Docker volume list written (if NPM uses host paths instead, that is still OK)" +else + log_info " No Docker in LXC or not used — skip volume list" +fi # Step 5: Create backup manifest log_info "Step 5: Creating backup manifest..." diff --git a/scripts/verify/check-pnpm-workspace-lockfile.sh b/scripts/verify/check-pnpm-workspace-lockfile.sh new file mode 100755 index 00000000..8904f4d3 --- /dev/null +++ b/scripts/verify/check-pnpm-workspace-lockfile.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Every path listed under "packages:" in pnpm-workspace.yaml must have a matching +# importer entry in pnpm-lock.yaml. If one is missing, pnpm can fail in confusing +# ways (e.g. pnpm outdated -r: Cannot read ... 'optionalDependencies'). +# Usage: bash scripts/verify/check-pnpm-workspace-lockfile.sh +# Exit: 0 if check passes or pnpm is not used; 1 on mismatch. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +WS="${PROJECT_ROOT}/pnpm-workspace.yaml" +LOCK="${PROJECT_ROOT}/pnpm-lock.yaml" + +if [[ ! -f "$WS" ]] || [[ ! -f "$LOCK" ]]; then + echo " (skip: pnpm-workspace.yaml or pnpm-lock.yaml not present at repo root)" + exit 0 +fi + +# Paths under the top-level `packages:` block only (stops at next top-level key) +mapfile -t _paths < <(awk ' + /^packages:/ { p=1; next } + p && /^[a-zA-Z]/ && $0 !~ /^packages/ { exit } + p && /^[[:space:]]*-[[:space:]]/ { + sub(/^[[:space:]]*-[[:space:]]+/, "") + sub(/[[:space:]]*#.*/, "") + gsub(/[[:space:]]+$/, "") + if (length) print + } +' "$WS") + +missing=() +for relp in "${_paths[@]}"; do + if [[ -z "$relp" ]]; then + continue + fi + if ! grep -qFx " ${relp}:" "$LOCK"; then + missing+=("$relp") + fi +done + +if [[ ${#missing[@]} -gt 0 ]]; then + echo "✗ pnpm lockfile is missing importer(s) for these workspace path(s):" + printf ' %q\n' "${missing[@]}" + echo " Run: pnpm install (at repo root) to refresh pnpm-lock.yaml" + exit 1 +fi + +echo " pnpm workspace / lockfile importers aligned (${#_paths[@]} path(s))." +exit 0 diff --git a/scripts/verify/run-all-validation.sh b/scripts/verify/run-all-validation.sh index 1bd75b83..7cd48a5a 100644 --- a/scripts/verify/run-all-validation.sh +++ b/scripts/verify/run-all-validation.sh @@ -3,6 +3,7 @@ # Use for CI or pre-deploy: dependencies, config files, optional genesis. # Usage: bash scripts/verify/run-all-validation.sh [--skip-genesis] # --skip-genesis: do not run validate-genesis.sh (default: run if smom-dbis-138 present). +# Steps: dependencies, config files, cW* mesh matrix (if pair-discovery JSON exists), genesis. set -euo pipefail @@ -24,15 +25,45 @@ bash "$SCRIPT_DIR/check-dependencies.sh" || log_err "check-dependencies failed" log_ok "Dependencies OK" echo "" +echo "1b. pnpm workspace vs lockfile..." +if [[ -f "$PROJECT_ROOT/pnpm-workspace.yaml" ]]; then + bash "$SCRIPT_DIR/check-pnpm-workspace-lockfile.sh" || log_err "pnpm lockfile / workspace drift" + log_ok "pnpm lockfile aligned with workspace" +else + echo " (no pnpm-workspace.yaml at root — skip)" +fi +echo "" + echo "2. Config files..." bash "$SCRIPT_DIR/../validation/validate-config-files.sh" || log_err "validate-config-files failed" log_ok "Config validation OK" echo "" -if [[ "$SKIP_GENESIS" == true ]]; then - echo "3. Genesis — skipped (--skip-genesis)" +echo "3. cW* mesh matrix (deployment-status + Uni V2 pair-discovery)..." +DISCOVERY_JSON="$PROJECT_ROOT/reports/extraction/promod-uniswap-v2-live-pair-discovery-latest.json" +if [[ -f "$DISCOVERY_JSON" ]]; then + MATRIX_JSON="$PROJECT_ROOT/reports/status/cw-mesh-deployment-matrix-latest.json" + bash "$SCRIPT_DIR/build-cw-mesh-deployment-matrix.sh" --no-markdown --json-out "$MATRIX_JSON" || log_err "cw mesh matrix merge failed" + log_ok "cW mesh matrix OK (also wrote $MATRIX_JSON)" else - echo "3. Genesis (smom-dbis-138)..." + echo " ($DISCOVERY_JSON missing — run: bash scripts/verify/build-promod-uniswap-v2-live-pair-discovery.sh)" +fi +echo "" + +echo "3b. deployment-status graph (cross-chain-pmm-lps)..." +PMM_VALIDATE="$PROJECT_ROOT/cross-chain-pmm-lps/scripts/validate-deployment-status.cjs" +if [[ -f "$PMM_VALIDATE" ]] && command -v node &>/dev/null; then + node "$PMM_VALIDATE" || log_err "validate-deployment-status.cjs failed" + log_ok "deployment-status.json rules OK" +else + echo " (skip: node or $PMM_VALIDATE missing)" +fi +echo "" + +if [[ "$SKIP_GENESIS" == true ]]; then + echo "4. Genesis — skipped (--skip-genesis)" +else + echo "4. Genesis (smom-dbis-138)..." GENESIS_SCRIPT="$PROJECT_ROOT/smom-dbis-138/scripts/validation/validate-genesis.sh" if [[ -x "$GENESIS_SCRIPT" ]]; then bash "$GENESIS_SCRIPT" || log_err "validate-genesis failed"