chore: sync all changes to Gitea
Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled

- Config, docs, scripts, and backup manifests
- Submodule refs unchanged (m = modified content in submodules)

Made-with: Cursor
This commit is contained in:
defiQUG
2026-03-02 11:37:34 -08:00
parent ed85135249
commit b3a8fe4496
883 changed files with 73580 additions and 4796 deletions

View File

@@ -0,0 +1,18 @@
# VM 5702 — Inference: llama.cpp server (CPU-friendly)
# Copy to /opt/ai/inference/ and place model at /opt/ai/inference/data/models/model.gguf
# See: docs/02-architecture/AI_AGENTS_57XX_DEPLOYMENT_PLAN.md Appendix D
services:
llama:
image: ghcr.io/ggerganov/llama.cpp:server
container_name: ai-inf-prod
volumes:
- /opt/ai/inference/data/models:/models
command: >
-m /models/model.gguf
--host 0.0.0.0 --port 8000
--n-gpu-layers 0
--ctx-size 4096
ports:
- "8000:8000"
restart: unless-stopped

View File

@@ -0,0 +1,13 @@
# VM 5703 agent — copy to /opt/ai/agent/.env
# Replace hostnames with your 5701/5702 hostnames or IPs if not using numeric hostnames.
MCP_URL=http://5701:3000/mcp/call
INF_URL=http://5702:8000
MODE=read-only
# Set to an allowlisted pool address when using dodo.get_pool_state
POOL_ADDRESS=POOL_ADDRESS_HERE
# Optional: when VM 5704 is used
# PG_DSN=postgresql://ai:YOUR_PASSWORD@5704:5432/ai
# REDIS_URL=redis://5704:6379/0

View File

@@ -0,0 +1,24 @@
# VM 5703 — Agent worker: calls 5701 (MCP) and 5702 (inference)
# Copy to /opt/ai/agent/ and copy agent.py to /opt/ai/agent/config/
# See: docs/02-architecture/AI_AGENTS_57XX_DEPLOYMENT_PLAN.md Appendix E
services:
agent:
image: python:3.11-slim
container_name: ai-agent-prod
working_dir: /app
volumes:
- /opt/ai/agent/config:/app
- /opt/ai/agent/logs:/logs
env_file: .env
environment:
MCP_URL: ${MCP_URL:-http://5701:3000/mcp/call}
INF_URL: ${INF_URL:-http://5702:8000}
MODE: ${MODE:-read-only}
POOL_ADDRESS: ${POOL_ADDRESS:-POOL_ADDRESS_HERE}
# PG_DSN: postgresql://ai:...@5704:5432/ai
# REDIS_URL: redis://5704:6379/0
command: >
sh -lc "pip install --no-cache-dir requests &&
python agent.py"
restart: unless-stopped

View File

@@ -0,0 +1,2 @@
# VM 5704 state — copy to /opt/ai/state/.env and set a strong password
POSTGRES_PASSWORD=change_me_strong

View File

@@ -0,0 +1,47 @@
# VM 5704 — Memory/State: Postgres + Redis
# Copy to /opt/ai/state/ and run: docker compose up -d
# See: docs/02-architecture/AI_AGENTS_57XX_DEPLOYMENT_PLAN.md Appendix B
services:
postgres:
image: postgres:16
container_name: ai-state-postgres
environment:
POSTGRES_DB: ai
POSTGRES_USER: ai
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
volumes:
- /opt/ai/state/data/postgres:/var/lib/postgresql/data
ports:
- "5432:5432"
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ai -d ai"]
interval: 10s
timeout: 5s
retries: 10
redis:
image: redis:7
container_name: ai-state-redis
command: ["redis-server", "--appendonly", "yes", "--save", "60", "1"]
volumes:
- /opt/ai/state/data/redis:/data
ports:
- "6379:6379"
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "PING"]
interval: 10s
timeout: 3s
retries: 10
# Optional vector DB (uncomment if needed)
# qdrant:
# image: qdrant/qdrant:latest
# container_name: ai-state-qdrant
# volumes:
# - /opt/ai/state/data/qdrant:/qdrant/storage
# ports:
# - "6333:6333"
# restart: unless-stopped

View File

@@ -0,0 +1,39 @@
# 57xx AI/Agents deploy artifacts
Copy these files to the target VMs per the [deployment task list](../../docs/02-architecture/AI_AGENTS_57XX_DEPLOYMENT_TASKS.md).
**VMID band:** 57005999 (see [VMID_ALLOCATION_FINAL.md](../../docs/02-architecture/VMID_ALLOCATION_FINAL.md)).
## Layout
| Path | Purpose |
|------|---------|
| **5704-state/** | Postgres + Redis for VM 5704. Copy `docker-compose.yml` and `.env.example``/opt/ai/state/`, rename `.env.example` to `.env`, set `POSTGRES_PASSWORD`. |
| **5702-inference/** | llama.cpp server for VM 5702. Copy `docker-compose.yml``/opt/ai/inference/`. Put GGUF at `/opt/ai/inference/data/models/model.gguf`. |
| **5703-agent/** | Agent worker for VM 5703. Copy `agent.py``/opt/ai/agent/config/`, `docker-compose.yml` and `.env.example``/opt/ai/agent/`. Rename `.env.example` to `.env`, set `MCP_URL`, `INF_URL`, and optionally `POOL_ADDRESS`. |
**5701 (MCP Hub)** is the **ai-mcp-pmm-controller** submodule at repo root; see [AI_AGENTS_57XX_DEPLOYMENT_TASKS.md](../../docs/02-architecture/AI_AGENTS_57XX_DEPLOYMENT_TASKS.md) Task 2.
## Optional: copy to /opt/ai (run on target host with repo at /opt/proxmox)
```bash
REPO=/opt/proxmox
sudo mkdir -p /opt/ai/state/data/postgres /opt/ai/state/data/redis
sudo mkdir -p /opt/ai/inference/data/models /opt/ai/agent/config /opt/ai/agent/logs
sudo chown -R $USER:$USER /opt/ai
cp "$REPO/scripts/57xx-deploy/5704-state/docker-compose.yml" /opt/ai/state/
cp "$REPO/scripts/57xx-deploy/5704-state/.env.example" /opt/ai/state/.env
cp "$REPO/scripts/57xx-deploy/5702-inference/docker-compose.yml" /opt/ai/inference/
cp "$REPO/scripts/57xx-deploy/5703-agent/agent.py" /opt/ai/agent/config/
cp "$REPO/scripts/57xx-deploy/5703-agent/docker-compose.yml" /opt/ai/agent/
cp "$REPO/scripts/57xx-deploy/5703-agent/.env.example" /opt/ai/agent/.env
# Edit secrets and URLs:
# - /opt/ai/state/.env → POSTGRES_PASSWORD
# - /opt/ai/agent/.env → MCP_URL, INF_URL, POOL_ADDRESS (if used)
```
Then start each stack from its directory (see [AI_AGENTS_57XX_DEPLOYMENT_TASKS.md](../../docs/02-architecture/AI_AGENTS_57XX_DEPLOYMENT_TASKS.md)).

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env bash
# Copy 57xx deploy artifacts to /opt/ai. Run from repo root or set REPO.
# Usage: ./scripts/57xx-deploy/copy-to-opt-ai.sh
set -e
REPO="${REPO:-$(cd "$(dirname "$0")/../.." && pwd)}"
DEPLOY="$REPO/scripts/57xx-deploy"
echo "Using REPO=$REPO"
sudo mkdir -p /opt/ai/state/data/postgres /opt/ai/state/data/redis
sudo mkdir -p /opt/ai/inference/data/models /opt/ai/agent/config /opt/ai/agent/logs
sudo chown -R "$USER:$USER" /opt/ai
cp "$DEPLOY/5704-state/docker-compose.yml" /opt/ai/state/
if [[ ! -f /opt/ai/state/.env ]]; then
cp "$DEPLOY/5704-state/.env.example" /opt/ai/state/.env
echo "Created /opt/ai/state/.env — set POSTGRES_PASSWORD"
fi
cp "$DEPLOY/5702-inference/docker-compose.yml" /opt/ai/inference/
cp "$DEPLOY/5703-agent/agent.py" /opt/ai/agent/config/
cp "$DEPLOY/5703-agent/docker-compose.yml" /opt/ai/agent/
if [[ ! -f /opt/ai/agent/.env ]]; then
cp "$DEPLOY/5703-agent/.env.example" /opt/ai/agent/.env
echo "Created /opt/ai/agent/.env — set MCP_URL, INF_URL, POOL_ADDRESS"
fi
echo "Done. Next: edit /opt/ai/state/.env and /opt/ai/agent/.env, then start each stack (see docs/02-architecture/AI_AGENTS_57XX_DEPLOYMENT_TASKS.md)."

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Access Omada Cloud Controller and check firewall rules for Blockscout
# This script helps automate access to the cloud controller web interface

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Activate storage on r630-01 (local-lvm and thin1)
# Usage: ./scripts/activate-storage-r630-01.sh

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Activate storage on r630-02 (local-lvm and thin1-thin6)
# Usage: ./scripts/activate-storage-r630-02.sh

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Analyze all Cloudflare domains for tunnel configurations and issues

View File

@@ -0,0 +1,79 @@
#!/usr/bin/env bash
# Apply remaining operator fixes from DETAILED_GAPS_AND_ISSUES_LIST.md §12:
# 1) Nginx fix + deploy explorer config on VMID 5000 (via Proxmox host)
# 2) Token-aggregation: print steps (DB create, migrations, restart)
#
# Usage: ./scripts/apply-remaining-operator-fixes.sh [--nginx-only] [--dry-run]
# Requires: SSH to Proxmox host (PROXMOX_HOST_R630_02 or 192.168.11.12), or run nginx script inside VMID 5000 manually.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}"
DRY_RUN=false
NGINX_ONLY=false
for a in "$@"; do
[[ "$a" == "--dry-run" ]] && DRY_RUN=true
[[ "$a" == "--nginx-only" ]] && NGINX_ONLY=true
done
echo "=== Apply remaining operator fixes (§12) ==="
echo " dry-run=$DRY_RUN nginx-only=$NGINX_ONLY"
echo ""
# 1) Nginx fix on VMID 5000
NGINX_SCRIPT="${PROJECT_ROOT}/explorer-monorepo/scripts/fix-nginx-conflicts-vmid5000.sh"
DEPLOY_CONFIG="${PROJECT_ROOT}/explorer-monorepo/scripts/deploy-explorer-config-to-vmid5000.sh"
if [[ -f "$NGINX_SCRIPT" ]]; then
echo "--- 1. Nginx fix (VMID 5000) ---"
if [[ "$DRY_RUN" == true ]]; then
echo " [DRY-RUN] Would run fix-nginx-conflicts-vmid5000.sh inside VMID 5000 via: ssh root@${PROXMOX_HOST} 'pct exec 5000 -- bash -s' < $NGINX_SCRIPT"
else
if ssh -o ConnectTimeout=10 -o BatchMode=yes "root@${PROXMOX_HOST}" "pct exec 5000 -- true" 2>/dev/null; then
echo " Pushing and running nginx fix inside VMID 5000..."
ssh "root@${PROXMOX_HOST}" "pct exec 5000 -- bash -s" < "$NGINX_SCRIPT" && echo " ✅ Nginx fix done" || { echo " ⚠ Nginx fix failed (check output above)"; exit 1; }
if [[ -f "$DEPLOY_CONFIG" ]]; then
echo " Deploying explorer config..."
CONFIG_SRC="${PROJECT_ROOT}/explorer-monorepo/backend/api/rest/config/metamask"
if [[ -f "$CONFIG_SRC/DUAL_CHAIN_TOKEN_LIST.tokenlist.json" && -f "$CONFIG_SRC/DUAL_CHAIN_NETWORKS.json" ]]; then
TMP_DEPLOY=$(mktemp -d) && trap "rm -rf $TMP_DEPLOY" EXIT
cp "$CONFIG_SRC/DUAL_CHAIN_TOKEN_LIST.tokenlist.json" "$CONFIG_SRC/DUAL_CHAIN_NETWORKS.json" "$TMP_DEPLOY/"
if scp -o ConnectTimeout=10 "$TMP_DEPLOY/DUAL_CHAIN_TOKEN_LIST.tokenlist.json" "$TMP_DEPLOY/DUAL_CHAIN_NETWORKS.json" "root@${PROXMOX_HOST}:/tmp/" 2>/dev/null; then
ssh "root@${PROXMOX_HOST}" "pct exec 5000 -- mkdir -p /var/www/html/config && pct push 5000 /tmp/DUAL_CHAIN_TOKEN_LIST.tokenlist.json /var/www/html/config/DUAL_CHAIN_TOKEN_LIST.tokenlist.json && pct push 5000 /tmp/DUAL_CHAIN_NETWORKS.json /var/www/html/config/DUAL_CHAIN_NETWORKS.json" 2>/dev/null && echo " ✅ Explorer config deployed (via Proxmox host)" || echo " ⚠ pct push failed (run deploy script from Proxmox host)"
else
EXPLORER_IP="${EXPLORER_IP:-192.168.11.140}" EXEC_MODE=ssh bash "$DEPLOY_CONFIG" 2>/dev/null && echo " ✅ Explorer config deployed" || echo " ⚠ Deploy failed (run manually: EXEC_MODE=ssh EXPLORER_IP=192.168.11.140 bash $DEPLOY_CONFIG)"
fi
else
echo " ⚠ Config files not found in $CONFIG_SRC"
fi
fi
else
echo " ⚠ SSH to root@${PROXMOX_HOST} failed or pct exec 5000 not available. Run inside VMID 5000 manually:"
echo " bash $NGINX_SCRIPT"
echo " Then deploy config: bash $DEPLOY_CONFIG"
fi
fi
echo ""
else
echo " ⚠ Nginx script not found: $NGINX_SCRIPT"
echo ""
fi
# 2) Token-aggregation (steps only unless we add automation)
if [[ "$NGINX_ONLY" != true ]]; then
echo "--- 2. Token-aggregation (see §2 in DETAILED_GAPS) ---"
echo " If /health returns 'database token_aggregation does not exist':"
echo " 1) On VMID 5000 (or PostgreSQL host): createdb -U postgres token_aggregation"
echo " 2) Run migrations: cd smom-dbis-138/services/token-aggregation && DATABASE_URL=postgresql://... bash scripts/run-migrations.sh"
echo " 3) Restart: systemctl restart token-aggregation"
echo " 4) Verify: curl -s http://192.168.11.140:3001/health | jq ."
echo ""
fi
echo "=== Done ==="

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Comprehensive IP address audit for all VMs/containers across all Proxmox hosts
set -euo pipefail

View File

@@ -13,13 +13,15 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-${PROXMOX_USER:-root}}"
[[ "$PROXMOX_SSH_USER" == *"@"* ]] && PROXMOX_SSH_USER="root"
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_ML110:-192.168.11.10}}"
UNIT="/etc/systemd/system/besu-rpc.service"
VMIDS=(2400 2401 2402 2500 2501 2502 2503 2504 2505 2506 2507 2508)
ssh_pve() {
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -o ConnectTimeout=5 "root@${PROXMOX_HOST}" "$@"
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -o ConnectTimeout=5 "${PROXMOX_SSH_USER}@${PROXMOX_HOST}" "$@"
}
extract_xmx_gb() {

View File

@@ -1,10 +1,10 @@
#!/usr/bin/env bash
# Audit storage node restrictions vs RPC VMID placement.
#
# Requires SSH access to a Proxmox node that can run pct/pvesm and see /etc/pve/storage.cfg.
# Runs per-host: each VMID is checked on its actual Proxmox host (no cross-node config lookup).
#
# Usage:
# PROXMOX_HOST=${PROXMOX_HOST_ML110:-192.168.11.10} ./scripts/audit-proxmox-rpc-storage.sh
# ./scripts/audit-proxmox-rpc-storage.sh
# PROXMOX_HOST=192.168.11.10 ./scripts/audit-proxmox-rpc-storage.sh # single-host mode (legacy)
set -euo pipefail
@@ -13,42 +13,100 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# SSH user for shell (PROXMOX_USER in .env may be root@pam for API)
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-${PROXMOX_USER:-root}}"
[[ "$PROXMOX_SSH_USER" == *"@"* ]] && PROXMOX_SSH_USER="root"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
VMIDS=(2400 2401 2402 2500 2501 2502 2503 2504 2505 2506 2507 2508)
# VMID:host (same mapping as check-rpc-vms-health.sh) — only VMIDs that exist on these hosts
RPC_NODES=(
"2101:$R630_01"
"2201:$R630_02"
"2301:$ML110"
"2303:$R630_02"
"2304:$ML110"
"2305:$ML110"
"2306:$ML110"
"2307:$ML110"
"2308:$ML110"
"2400:$ML110"
"2401:$R630_02"
"2402:$ML110"
"2403:$ML110"
)
# If PROXMOX_HOST is set, run single-host legacy mode (one host only)
PROXMOX_HOST="${PROXMOX_HOST:-}"
ssh_pve() {
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -o ConnectTimeout=5 "root@${PROXMOX_HOST}" "$@"
local host="$1"
shift
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -o ConnectTimeout=5 "${PROXMOX_SSH_USER}@${host}" "$@"
}
echo "=== Proxmox RPC Storage Audit ==="
echo "Host: ${PROXMOX_HOST}"
echo
if [[ -n "${PROXMOX_HOST:-}" ]]; then
echo "=== Proxmox RPC Storage Audit (single host: ${PROXMOX_HOST}) ==="
echo ""
NODE="$(ssh_pve "$PROXMOX_HOST" "hostname")"
echo "Node name: ${NODE}"
echo ""
echo "=== Storages active on this node (pvesm) ==="
ssh_pve "$PROXMOX_HOST" "pvesm status" | sed 's/^/ /'
echo ""
echo "=== storage.cfg: storages with node restrictions ==="
ssh_pve "$PROXMOX_HOST" "awk '
/^dir: /{s=\$2; t=\"dir\"; nodes=\"\"}
/^lvmthin: /{s=\$2; t=\"lvmthin\"; nodes=\"\"}
/^[[:space:]]*nodes /{nodes=\$2}
/^[[:space:]]*nodes /{print t \":\" s \" nodes=\" nodes}
' /etc/pve/storage.cfg" | sed 's/^/ /'
echo ""
# Single host: only VMIDs that belong on this node (we don't have full map in legacy mode, so list all and skip missing)
for entry in "${RPC_NODES[@]}"; do
IFS=: read -r vmid host <<< "$entry"
[[ "$host" != "$PROXMOX_HOST" ]] && continue
echo "--- VMID ${vmid} ---"
ssh_pve "$host" "pct status ${vmid} 2>&1; pct config ${vmid} 2>&1 | egrep -i 'hostname:|rootfs:|memory:|swap:|cores:|net0:'" | sed 's/^/ /'
echo ""
done
else
echo "=== Proxmox RPC Storage Audit (all RPC hosts) ==="
echo ""
NODE="$(ssh_pve "hostname")"
echo "Node name: ${NODE}"
echo
# Collect unique hosts and show storage per host
declare -A seen_host
for entry in "${RPC_NODES[@]}"; do
IFS=: read -r vmid host <<< "$entry"
[[ -n "${seen_host[$host]:-}" ]] && continue
seen_host[$host]=1
NODE="$(ssh_pve "$host" "hostname" 2>/dev/null || echo "$host")"
echo "--- Host $host (node: $NODE) ---"
echo "Storages:"
ssh_pve "$host" "pvesm status" 2>/dev/null | sed 's/^/ /'
echo ""
done
echo "=== Storages active on this node (pvesm) ==="
ssh_pve "pvesm status" | sed 's/^/ /'
echo
echo "=== storage.cfg (from first host) ==="
first_host="${R630_01}"
ssh_pve "$first_host" "awk '
/^dir: /{s=\$2; t=\"dir\"; nodes=\"\"}
/^lvmthin: /{s=\$2; t=\"lvmthin\"; nodes=\"\"}
/^[[:space:]]*nodes /{nodes=\$2}
/^[[:space:]]*nodes /{print t \":\" s \" nodes=\" nodes}
' /etc/pve/storage.cfg" 2>/dev/null | sed 's/^/ /'
echo ""
echo "=== storage.cfg: storages with node restrictions ==="
ssh_pve "awk '
/^dir: /{s=\$2; t=\"dir\"; nodes=\"\"}
/^lvmthin: /{s=\$2; t=\"lvmthin\"; nodes=\"\"}
/^[[:space:]]*nodes /{nodes=\$2}
/^[[:space:]]*nodes /{print t \":\" s \" nodes=\" nodes}
' /etc/pve/storage.cfg" | sed 's/^/ /'
echo
echo "=== RPC VMID -> rootfs storage mapping ==="
for v in "${VMIDS[@]}"; do
echo "--- VMID ${v} ---"
ssh_pve "pct status ${v} 2>&1; pct config ${v} 2>&1 | egrep -i 'hostname:|rootfs:|memory:|swap:|cores:|net0:'" | sed 's/^/ /'
echo
done
echo "=== RPC VMID -> rootfs storage mapping (per host) ==="
for entry in "${RPC_NODES[@]}"; do
IFS=: read -r vmid host <<< "$entry"
echo "--- VMID ${vmid} (host $host) ---"
ssh_pve "$host" "pct status ${vmid} 2>&1; pct config ${vmid} 2>&1 | egrep -i 'hostname:|rootfs:|memory:|swap:|cores:|net0:'" | sed 's/^/ /'
echo ""
done
fi
cat <<'NOTE'
NOTE:

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Collect enodes from all nodes and generate allowlist
# Usage: Update NODES array with your node IPs, then: bash collect-all-enodes.sh

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Deploy corrected allowlist files to all Proxmox containers
# Usage: bash besu-deploy-allowlist.sh <static-nodes.json> <permissions-nodes.toml> [proxmox-host]

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Extract enode from Besu nodekey file using Besu CLI
# Usage: DATA_PATH=/data/besu NODE_IP=${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}}}} bash extract-enode-from-nodekey.sh

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Extract enode from running Besu node via JSON-RPC
# Usage: RPC_URL=http://${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}}}}:8545 NODE_IP=${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}}}} bash extract-enode-from-rpc.sh

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Generate Besu allowlist files from collected enodes
# Usage: bash besu-generate-allowlist.sh <collected-enodes.txt> [validator-ips...]

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Validate all enodes in generated files
# Usage: bash besu-validate-allowlist.sh <static-nodes.json> <permissions-nodes.toml>

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Check peer connections on Besu node
# Usage: bash besu-verify-peers.sh <rpc-url>
# Example: bash besu-verify-peers.sh http://${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}}}}:8545

View File

@@ -0,0 +1,78 @@
#!/usr/bin/env bash
# Enable TRACE API on the public RPC node (VMID 2201) so Blockscout can index internal
# transactions and block rewards. Run from Proxmox host that has VMID 2201, or set
# RPC_VM_2201_HOST=root@192.168.11.12 to run via SSH.
# See: explorer-monorepo/docs/RPC_FUNCTIONALITY_AND_BLOCKSCOUT_TRACE.md
set -euo pipefail
VMID="${RPC_VMID_2201:-2201}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[ -f "$REPO_ROOT/config/ip-addresses.conf" ] && source "$REPO_ROOT/config/ip-addresses.conf" 2>/dev/null || true
RPC_HOST="${RPC_VM_2201_HOST:-root@${PROXMOX_R630_02:-192.168.11.12}}"
[[ "$RPC_HOST" != *"@"* ]] && RPC_HOST="root@$RPC_HOST"
run_in_vmid() {
local cmd="$1"
if command -v pct &>/dev/null && pct list 2>/dev/null | grep -q "^$VMID "; then
pct exec "$VMID" -- bash -c "$cmd"
else
ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 "$RPC_HOST" "pct exec $VMID -- bash -c $(printf '%q' "$cmd")"
fi
}
echo "=============================================="
echo "Enable TRACE API on VMID $VMID (public RPC)"
echo "=============================================="
if ! command -v pct &>/dev/null || ! pct list 2>/dev/null | grep -q "^$VMID "; then
echo "Running via SSH: $RPC_HOST"
fi
# Service uses config-rpc-public.toml (see besu-rpc.service ExecStart); try that first
CONFIG_PATHS="/etc/besu/config-rpc-public.toml /etc/besu/config-rpc.toml /etc/besu/config.toml"
CONFIG_FILE=""
for p in $CONFIG_PATHS; do
if run_in_vmid "test -f $p" 2>/dev/null; then
CONFIG_FILE="$p"
break
fi
done
if [ -z "$CONFIG_FILE" ]; then
echo "No Besu config found in VMID $VMID. List /etc/besu/:"
run_in_vmid "ls -la /etc/besu/" 2>/dev/null || true
exit 1
fi
echo "Config: $CONFIG_FILE"
if run_in_vmid "grep -q '\"TRACE\"' $CONFIG_FILE" 2>/dev/null; then
echo "TRACE already present. No change."
exit 0
fi
# Add TRACE (match both ["ETH","NET","WEB3"] and ["ETH","NET","WEB3","TXPOOL","ADMIN"])
run_in_vmid "sed -i 's/rpc-http-api=\[\"ETH\",\"NET\",\"WEB3\"\]/rpc-http-api=[\"ETH\",\"NET\",\"WEB3\",\"TRACE\"]/' $CONFIG_FILE" 2>/dev/null || true
run_in_vmid "sed -i 's/rpc-http-api=\[\"ETH\",\"NET\",\"WEB3\",\"TXPOOL\",\"ADMIN\"\]/rpc-http-api=[\"ETH\",\"NET\",\"WEB3\",\"TXPOOL\",\"ADMIN\",\"TRACE\"]/' $CONFIG_FILE" 2>/dev/null || true
run_in_vmid "sed -i 's/rpc-ws-api=\[\"ETH\",\"NET\",\"WEB3\"\]/rpc-ws-api=[\"ETH\",\"NET\",\"WEB3\",\"TRACE\"]/' $CONFIG_FILE" 2>/dev/null || true
if ! run_in_vmid "grep -q '\"TRACE\"' $CONFIG_FILE" 2>/dev/null; then
echo "Could not add TRACE. Edit $CONFIG_FILE inside VMID $VMID and add TRACE, then restart Besu."
exit 1
fi
echo "TRACE added. Restarting Besu..."
run_in_vmid "systemctl restart besu-rpc 2>/dev/null || systemctl restart besu 2>/dev/null || true"
sleep 5
RPC_IP="${RPC_PUBLIC_1:-192.168.11.221}"
if curl -sS --max-time 10 -X POST -H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"trace_block","params":["0x1"],"id":1}' \
"http://${RPC_IP}:8545" | grep -q '"result"'; then
echo "OK: trace_block returns result (TRACE API enabled)."
else
echo "WARN: trace_block still failed. Check: pct exec $VMID -- journalctl -u besu-rpc -n 30"
fi

97
scripts/burn-weth9-deployer.sh Executable file
View File

@@ -0,0 +1,97 @@
#!/usr/bin/env bash
# Unwrap (burn) all WETH9 in the deployer wallet on Chain 138.
# Calls WETH9.withdraw(balance) so WETH9 is burned and ETH is returned to the deployer.
#
# Requires: PRIVATE_KEY and RPC_URL_138 (or CHAIN138_RPC_URL) in smom-dbis-138/.env
# Usage: ./scripts/burn-weth9-deployer.sh [--dry-run]
# ./scripts/burn-weth9-deployer.sh <0xADDRESS> # Check WETH9 balance of ADDRESS only (read-only)
# If the UI shows WETH9 in a different wallet, that wallet's key must be in PRIVATE_KEY to unwrap.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
if [[ -f smom-dbis-138/.env ]]; then
set -a
source smom-dbis-138/.env
set +a
fi
RPC="${RPC_URL_138:-${CHAIN138_RPC_URL:-http://192.168.11.211:8545}}"
GAS_PRICE="${GAS_PRICE:-1000000000}"
GAS_LIMIT="${GAS_LIMIT:-150000}"
WETH9="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
DRY_RUN=false
CHECK_ADDRESS=""
for a in "$@"; do
case "$a" in
--dry-run) DRY_RUN=true;;
0x*) [[ -z "$CHECK_ADDRESS" ]] && CHECK_ADDRESS="$a";;
esac
done
DEPLOYER=""
if [[ -n "${PRIVATE_KEY:-}" ]]; then
DEPLOYER=$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null || true)
fi
if [[ -n "$CHECK_ADDRESS" ]]; then
DEPLOYER="$CHECK_ADDRESS"
if [[ -z "$DEPLOYER" ]]; then
echo "Error: invalid address $CHECK_ADDRESS"
exit 1
fi
echo "=== WETH9 balance check (read-only) ==="
echo "Address: $DEPLOYER"
echo "RPC: $RPC"
echo "WETH9: $WETH9"
echo ""
BALANCE_WEI=$(cast call "$WETH9" "balanceOf(address)(uint256)" "$DEPLOYER" --rpc-url "$RPC" 2>/dev/null || echo "0")
if [[ -z "$BALANCE_WEI" ]] || [[ "$BALANCE_WEI" == "0" ]]; then
echo "WETH9 balance: 0"
else
BALANCE_ETH=$(cast from-wei "$BALANCE_WEI" ether 2>/dev/null || echo "?")
echo "WETH9 balance: $BALANCE_WEI wei ($BALANCE_ETH WETH)"
echo ""
echo "To unwrap: run this script with PRIVATE_KEY for this address (e.g. set in smom-dbis-138/.env), or call withdraw($BALANCE_WEI) from this wallet."
fi
exit 0
fi
if [[ -z "$DEPLOYER" ]]; then
echo "Error: could not derive deployer address. Set PRIVATE_KEY in smom-dbis-138/.env"
exit 1
fi
echo "=== Burn all WETH9 in deployer wallet ==="
echo "Deployer: $DEPLOYER"
echo "RPC: $RPC"
echo "WETH9: $WETH9"
echo ""
BALANCE_WEI=$(cast call "$WETH9" "balanceOf(address)(uint256)" "$DEPLOYER" --rpc-url "$RPC" 2>/dev/null || echo "0")
if [[ -z "$BALANCE_WEI" ]] || [[ "$BALANCE_WEI" == "0" ]]; then
echo "Deployer WETH9 balance is 0. Nothing to burn."
exit 0
fi
# Human-readable for display (18 decimals)
BALANCE_ETH=$(cast from-wei "$BALANCE_WEI" ether 2>/dev/null || echo "?")
echo "WETH9 balance: $BALANCE_WEI wei ($BALANCE_ETH WETH)"
echo ""
if $DRY_RUN; then
echo "[dry-run] Would call WETH9.withdraw($BALANCE_WEI) to burn all WETH9 and receive $BALANCE_ETH ETH."
echo "[dry-run] Command: cast send $WETH9 \"withdraw(uint256)\" $BALANCE_WEI --rpc-url \$RPC --private-key \$PRIVATE_KEY --legacy --gas-price $GAS_PRICE --gas-limit $GAS_LIMIT"
exit 0
fi
echo "Calling WETH9.withdraw($BALANCE_WEI)..."
if cast send "$WETH9" "withdraw(uint256)" "$BALANCE_WEI" \
--rpc-url "$RPC" --private-key "$PRIVATE_KEY" --legacy --gas-price "$GAS_PRICE" --gas-limit "$GAS_LIMIT"; then
echo "Done. All WETH9 in deployer wallet has been unwrapped (burned); ETH returned to $DEPLOYER"
else
echo "Transaction failed. Check gas, nonce, or RPC."
exit 1
fi

91
scripts/burn-weth9-from-keys.sh Executable file
View File

@@ -0,0 +1,91 @@
#!/usr/bin/env bash
# Burn all WETH9 from every address whose private key is provided.
# Keys: set BURN_KEYS_FILE to a path with one PRIVATE_KEY=0x... per line,
# or export BURN_KEY_1=0x... BURN_KEY_2=0x... etc.
# Requires: RPC_URL_138 (or CHAIN138_RPC_URL). Keys from smom-dbis-138/.env or BURN_KEYS_FILE / BURN_KEY_*.
# Usage: ./scripts/burn-weth9-from-keys.sh [--dry-run]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
if [[ -f smom-dbis-138/.env ]]; then
set -a
source smom-dbis-138/.env
set +a
fi
RPC="${RPC_URL_138:-${CHAIN138_RPC_URL:-http://192.168.11.211:8545}}"
GAS_PRICE="${GAS_PRICE:-1000000000}"
GAS_LIMIT="${GAS_LIMIT:-150000}"
WETH9="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
DRY_RUN=false
for a in "$@"; do
case "$a" in
--dry-run) DRY_RUN=true;;
esac
done
# Collect private keys: BURN_KEY_1, BURN_KEY_2, ... and/or PRIVATE_KEY, and/or lines in BURN_KEYS_FILE
KEYS=()
if [[ -n "${PRIVATE_KEY:-}" ]]; then
KEYS+=( "$PRIVATE_KEY" )
fi
for i in $(seq 1 50); do
v="BURN_KEY_$i"
[[ -n "${!v:-}" ]] && KEYS+=( "${!v}" )
done
if [[ -n "${BURN_KEYS_FILE:-}" ]] && [[ -f "$BURN_KEYS_FILE" ]]; then
while IFS= read -r line || true; do
line=$(echo "$line" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')
[[ -z "$line" || "$line" =~ ^# ]] && continue
if [[ "$line" =~ PRIVATE_KEY=(.+) ]]; then
KEYS+=( "$(echo "${BASH_REMATCH[1]}" | sed "s/^['\"]//;s/['\"]$//")" )
fi
done < "$BURN_KEYS_FILE"
fi
if [[ ${#KEYS[@]} -eq 0 ]]; then
echo "No private keys found. Set PRIVATE_KEY, BURN_KEY_1, BURN_KEY_2, ... or BURN_KEYS_FILE (file with PRIVATE_KEY=0x... per line)."
exit 1
fi
echo "=== Burn WETH9 from all key-controlled addresses ==="
echo "RPC: $RPC"
echo "WETH9: $WETH9"
echo "Keys loaded: ${#KEYS[@]}"
echo ""
BURNED=0
for KEY in "${KEYS[@]}"; do
[[ -z "$KEY" ]] && continue
ADDR=$(cast wallet address --private-key "$KEY" 2>/dev/null || true)
[[ -z "$ADDR" ]] && continue
BALANCE_WEI=$(cast call "$WETH9" "balanceOf(address)(uint256)" "$ADDR" --rpc-url "$RPC" 2>/dev/null || echo "0")
[[ -z "$BALANCE_WEI" ]] && BALANCE_WEI=0
if [[ "$BALANCE_WEI" == "0" ]]; then
echo "[$ADDR] Balance 0, skip"
continue
fi
BALANCE_ETH=$(cast from-wei "$BALANCE_WEI" ether 2>/dev/null || echo "?")
echo "[$ADDR] WETH9: $BALANCE_WEI ($BALANCE_ETH WETH)"
if $DRY_RUN; then
echo " [dry-run] Would withdraw($BALANCE_WEI)"
((BURNED++)) || true
continue
fi
if cast send "$WETH9" "withdraw(uint256)" "$BALANCE_WEI" \
--rpc-url "$RPC" --private-key "$KEY" --legacy --gas-price "$GAS_PRICE" --gas-limit "$GAS_LIMIT" 2>&1; then
echo " Burned."
((BURNED++)) || true
else
echo " Failed."
fi
echo ""
done
echo "Done. Burned from $BURNED address(es)."
echo "To zero total supply, every WETH9 holder must withdraw; use ./scripts/weth9-list-holders.sh to list holders."

View File

@@ -0,0 +1,37 @@
#!/usr/bin/env bash
# Check and start Besu RPC on container 2101 (Chain 138 Core — 192.168.11.211).
# Run from a host that can SSH to Proxmox (e.g. root@192.168.11.11).
# Usage: ./scripts/check-and-start-rpc-2101.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_USER="${PROXMOX_USER:-root}"
# RPC 2101 is on R630 (192.168.11.11) per clear-all-transaction-pools.sh
for HOST in "${PROXMOX_R630:-192.168.11.11}" "${PROXMOX_ML110:-192.168.11.10}"; do
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no "${PROXMOX_USER}@${HOST}" "pct list 2>/dev/null | grep -q '2101'"; then
echo "RPC container 2101 found on $HOST. Checking service..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no "${PROXMOX_USER}@${HOST}" \
"pct exec 2101 -- bash -c '
echo \"--- systemctl status ---\"
systemctl status besu-rpc-core 2>/dev/null || systemctl status besu-rpc 2>/dev/null || systemctl status besu-rpc.service 2>/dev/null || echo \"No besu-rpc* unit found\"
echo \"\"
echo \"--- listening on 8545? ---\"
ss -tlnp 2>/dev/null | grep 8545 || netstat -tlnp 2>/dev/null | grep 8545 || echo \"Nothing on 8545\"
echo \"\"
echo \"--- starting besu-rpc-core (or besu-rpc) ---\"
systemctl start besu-rpc-core 2>/dev/null || systemctl start besu-rpc 2>/dev/null || systemctl start besu-rpc.service 2>/dev/null || true
sleep 3
systemctl is-active besu-rpc-core 2>/dev/null || systemctl is-active besu-rpc 2>/dev/null || echo inactive
ss -tlnp 2>/dev/null | grep 8545 || true
'" 2>&1
echo ""
echo "Wait 1030s then test: curl -s -X POST http://192.168.11.211:8545 -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}'"
exit 0
fi
done
echo "RPC container 2101 not found on Proxmox hosts." >&2
exit 1

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# Check network connectivity to Chain 138 RPC endpoints.
# Usage: ./scripts/check-network-rpc-138.sh [RPC_HOST] [PORT]
# Default: 192.168.11.211 8545 (Core RPC — VMID 2101)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
HOST="${1:-${RPC_CORE_1:-192.168.11.211}}"
PORT="${2:-8545}"
RPC_URL="http://${HOST}:${PORT}"
echo "=== Network connection check: $HOST (port $PORT) ==="
echo ""
# 1. Route
echo "--- Route ---"
if command -v ip &>/dev/null; then
ip route get "$HOST" 2>/dev/null || echo "No route to $HOST"
else
route -n 2>/dev/null | head -5
fi
echo ""
# 2. Ping (L3)
echo "--- Ping (L3) ---"
if ping -c 2 -W 2 "$HOST" 2>&1; then
echo "Ping: OK"
else
echo "Ping: FAIL (host or ICMP unreachable)"
fi
echo ""
# 3. TCP port (L4)
echo "--- TCP port $PORT (L4) ---"
if command -v nc &>/dev/null; then
if nc -zv -w 3 "$HOST" "$PORT" 2>&1; then
echo "Port $PORT: OPEN"
else
echo "Port $PORT: REFUSED or FILTERED (nothing listening or firewall)"
fi
else
echo "nc not found; trying curl..."
curl -s -m 3 -o /dev/null -w "curl exit %{errno}: %{errno}\n" "$RPC_URL" 2>&1 || true
fi
echo ""
# 4. JSON-RPC (application)
echo "--- JSON-RPC eth_chainId ---"
resp=$(curl -s -m 5 -X POST "$RPC_URL" -H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' 2>&1) || true
if echo "$resp" | grep -q '"result"'; then
echo "RPC: OK — $resp"
else
echo "RPC: FAIL — ${resp:-empty or connection failed}"
fi
echo ""
echo "Summary:"
echo " - Ping OK + Port REFUSED = host up, service not listening (start Besu RPC in container)."
echo " - Ping OK + Port OPEN + RPC FAIL = service up but not responding JSON-RPC."
echo " - Ping FAIL = host down or network/firewall blocking ICMP."

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Clean up secrets from documentation files
# Replaces actual secret values with placeholders while preserving structure

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Markdown Files Cleanup Script
# Automatically organizes markdown files based on analysis

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Clear transaction pools on all Besu nodes (RPC and Validators)
# This script clears transaction pool databases to remove stuck transactions
@@ -105,10 +105,9 @@ for validator in "${VALIDATORS[@]}"; do
clear_node_pool "$VMID" "$HOST" "$TYPE"
done
# Clear RPC (if accessible)
log_section "Clearing RPC Transaction Pool"
# Clear RPC Core (2101)
log_section "Clearing RPC Transaction Pool (2101)"
# Try to find RPC on ml110 first
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no "${PROXMOX_USER}@${PROXMOX_ML110}" \
"pct list | grep -q '2101'" 2>/dev/null; then
clear_node_pool 2101 "$PROXMOX_ML110" "RPC"
@@ -119,6 +118,16 @@ else
log_warn "RPC node (2101) not found on either host"
fi
# Clear RPC Public (2201) — often used when Core is down; ensures deploy txs not stuck
log_section "Clearing RPC Public (2201)"
R630_02="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no "${PROXMOX_USER}@${R630_02}" \
"pct list | grep -q '2201'" 2>/dev/null; then
clear_node_pool 2201 "$R630_02" "RPC Public"
else
log_warn "RPC Public (2201) not found on ${R630_02}"
fi
log_section "Transaction Pool Clear Complete"
echo "Next steps:"

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env bash
# Clear transaction pool on Public RPC node (VMID 2201 — 192.168.11.221).
# Use when deploying via RPC_URL_138_PUBLIC and you get "Known transaction".
# Usage: ./scripts/clear-rpc-2201-txpool.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_USER="${PROXMOX_USER:-root}"
# 2201 is on r630-02 per health/check-rpc-vms-health.sh, verify-backend-vms.sh
HOST="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
VMID=2201
SSH_TARGET="${PROXMOX_USER}@${HOST}"
echo "=== Clear Public RPC (VMID 2201) transaction pool ==="
echo "Host: $HOST"
echo ""
if ! ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no "$SSH_TARGET" "pct list 2>/dev/null | grep -q '2201'"; then
echo "VMID 2201 not found on $HOST. Abort." >&2
exit 1
fi
echo "Stopping besu-rpc..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no "$SSH_TARGET" \
"pct exec $VMID -- systemctl stop besu-rpc 2>/dev/null || pct exec $VMID -- systemctl stop besu-rpc.service 2>/dev/null || true" 2>&1 | grep -v "Configuration file" || true
sleep 2
echo "Clearing tx pool database..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no "$SSH_TARGET" \
"pct exec $VMID -- bash -c '
for d in /data/besu /var/lib/besu; do
[ -d \"\$d\" ] && find \"\$d\" -type d -name \"*pool*\" -exec rm -rf {} \; 2>/dev/null; find \"\$d\" -type f -name \"*transaction*\" -delete 2>/dev/null; find \"\$d\" -type f -name \"*txpool*\" -delete 2>/dev/null; true
done
'" 2>&1 | grep -v "Configuration file" || true
echo "Pool cleared."
echo "Starting besu-rpc..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no "$SSH_TARGET" \
"pct exec $VMID -- systemctl start besu-rpc 2>/dev/null || pct exec $VMID -- systemctl start besu-rpc.service 2>/dev/null || true" 2>&1 | grep -v "Configuration file" || true
sleep 3
echo ""
echo "Done. Wait 30s then run: ./scripts/deployment/deploy-transaction-mirror-and-pmm-pool-after-txpool-clear.sh"

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Clear RPC node database completely to remove stuck transactions
# This clears the entire Besu database, not just transaction pool

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
# Add Cloudflare DNS record for Sankofa Studio: studio.sankofa.nexus
# A record: studio → 76.53.10.36 (or PUBLIC_IP). Use CNAME to tunnel if you use Cloudflare Tunnel.
#
# Usage: bash scripts/cloudflare/add-studio-sankofa-dns.sh
# Requires: .env with CLOUDFLARE_API_TOKEN (or CLOUDFLARE_EMAIL + CLOUDFLARE_API_KEY)
# CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS for zone sankofa.nexus
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
source config/ip-addresses.conf 2>/dev/null || true
[ -f .env ] && set +u && source .env 2>/dev/null || true && set -u
ZONE_ID="${CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS:-}"
PUBLIC_IP="${PUBLIC_IP:-76.53.10.36}"
NAME="studio"
if [ -n "${CLOUDFLARE_API_TOKEN:-}" ]; then
AUTH_H=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
elif [ -n "${CLOUDFLARE_API_KEY:-}" ] && [ -n "${CLOUDFLARE_EMAIL:-}" ]; then
AUTH_H=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
else
echo "❌ Set CLOUDFLARE_API_TOKEN or (CLOUDFLARE_EMAIL + CLOUDFLARE_API_KEY) in .env"
exit 1
fi
[ -z "$ZONE_ID" ] && { echo "❌ Set CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS in .env"; exit 1; }
echo "Adding DNS for ${NAME}.sankofa.nexus → $PUBLIC_IP (zone: sankofa.nexus)"
DATA=$(jq -n --arg name "$NAME" --arg content "$PUBLIC_IP" \
'{type:"A",name:$name,content:$content,ttl:1,proxied:true}')
EXISTING=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/dns_records?name=${NAME}.sankofa.nexus" \
"${AUTH_H[@]}" -H "Content-Type: application/json")
RECORD_ID=$(echo "$EXISTING" | jq -r '.result[0].id // empty')
if [ -n "$RECORD_ID" ] && [ "$RECORD_ID" != "null" ]; then
UPD=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/dns_records/${RECORD_ID}" \
"${AUTH_H[@]}" -H "Content-Type: application/json" -d "$DATA")
if echo "$UPD" | jq -e '.success == true' >/dev/null 2>&1; then
echo "${NAME}.sankofa.nexus: Updated A → $PUBLIC_IP"
else
echo "❌ Update failed: $(echo "$UPD" | jq -r '.errors[0].message // "unknown"' 2>/dev/null)"
exit 1
fi
else
CR=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/dns_records" \
"${AUTH_H[@]}" -H "Content-Type: application/json" -d "$DATA")
if echo "$CR" | jq -e '.success == true' >/dev/null 2>&1; then
echo "${NAME}.sankofa.nexus: Created A → $PUBLIC_IP"
else
echo "❌ Create failed: $(echo "$CR" | jq -r '.errors[0].message // "unknown"' 2>/dev/null)"
exit 1
fi
fi
echo "Done."

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Complete all Blockscout setup tasks - automated and verification
# This script completes all possible automated tasks

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Complete All Installations and Tasks - Final Execution
# Installs PostgreSQL, Redis, configures databases, and completes all remaining tasks

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Complete All Remaining Tasks - Final Comprehensive Script
# Creates all scripts and completes all tasks that can be done without service installation

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Complete All Incomplete Tasks - Comprehensive Parallel Execution
# Executes all pending tasks across all 33 containers in parallel

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Complete All Incomplete Tasks in Parallel Execution Mode
# This script executes all pending tasks in parallel for maximum efficiency

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Complete Blockscout Firewall Fix - Run all checks and tests
# This script performs comprehensive checks and provides final status

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
# Complete next steps after IPFS logo wiring:
# 1. Run migration 0013 (update token logo_url in explorer DB)
# 2. Build token-aggregation (picks up new canonical-tokens IPFS URLs)
# 3. Validate token list schema
#
# Prerequisites: DATABASE_URL in smom-dbis-138/services/token-aggregation/.env (or root .env)
# Usage: ./scripts/complete-ipfs-logo-next-steps.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
TA_DIR="$REPO_ROOT/smom-dbis-138/services/token-aggregation"
MIGRATION_0013="$REPO_ROOT/explorer-monorepo/backend/database/migrations/0013_update_token_logos_ipfs.up.sql"
log() { echo "[$(date +%H:%M:%S)] $*"; }
err() { echo "ERROR: $*" >&2; }
# Load env
[[ -f "$REPO_ROOT/.env" ]] && set -a && source "$REPO_ROOT/.env" && set +a
[[ -f "$TA_DIR/.env" ]] && set -a && source "$TA_DIR/.env" && set +a
DATABASE_URL="${DATABASE_URL:-}"
echo "=========================================="
echo "Complete IPFS Logo Next Steps"
echo "=========================================="
echo ""
# Step 1: Run migration 0013
log "Step 1: Running migration 0013 (update token logos to IPFS)..."
if [[ ! -f "$MIGRATION_0013" ]]; then
err "Migration file not found: $MIGRATION_0013"
exit 1
fi
if [[ -z "$DATABASE_URL" ]]; then
log " SKIP: DATABASE_URL not set (set in .env or smom-dbis-138/services/token-aggregation/.env)"
log " To run manually: psql \$DATABASE_URL -f $MIGRATION_0013"
else
if command -v psql &>/dev/null; then
if psql "$DATABASE_URL" -f "$MIGRATION_0013" 2>/dev/null; then
log " OK: Migration 0013 completed"
else
err " Migration 0013 failed (tokens table may not exist yet)"
log " Run when explorer DB is available: psql \$DATABASE_URL -f $MIGRATION_0013"
fi
else
log " SKIP: psql not found"
fi
fi
echo ""
# Step 2: Build token-aggregation
log "Step 2: Building token-aggregation..."
if [[ -d "$TA_DIR" ]]; then
(cd "$TA_DIR" && pnpm run build 2>/dev/null || npm run build 2>/dev/null) && log " OK: Built" || err " Build failed"
else
log " SKIP: token-aggregation dir not found"
fi
echo ""
# Step 3: Validate token list
log "Step 3: Validating token list schema..."
TOKEN_LISTS_DIR="${TOKEN_LISTS_DIR:-$REPO_ROOT/../token-lists}"
if [[ -d "$TOKEN_LISTS_DIR" ]] && [[ -f "$REPO_ROOT/token-lists/lists/dbis-138.tokenlist.json" ]]; then
(cd "$TOKEN_LISTS_DIR" && node -e "
const Ajv = require('ajv');
const addFormats = require('ajv-formats');
const schema = require('./src/tokenlist.schema.json');
const ajv = new Ajv({ allErrors: true });
addFormats(ajv);
const validate = ajv.compile(schema);
const list = require('$REPO_ROOT/token-lists/lists/dbis-138.tokenlist.json');
if (validate(list)) { console.log(' OK: Token list valid'); process.exit(0); }
console.error(' INVALID:', JSON.stringify(validate.errors, null, 2)); process.exit(1);
" 2>/dev/null) && log " OK: Schema valid" || log " SKIP: token-lists or ajv not available"
else
log " SKIP: token-lists not found (set TOKEN_LISTS_DIR or clone to ../token-lists)"
fi
echo ""
log "Done."
echo ""
echo "Next: Redeploy token-aggregation if running (systemctl restart token-aggregation)"
echo " Explorer will show new logos after migration 0013 is applied to its DB."

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Complete Setup Script for Proxmox Workspace

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Complete Validation Script

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Comprehensive IP audit and hostname migration preparation
# Audits all VMs/containers across all Proxmox hosts for IP conflicts

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Comprehensive Duplicate Consolidation
# Consolidates all duplicate status files across the project

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Consolidate Duplicate Status Files
# Archives older duplicates, keeps most recent/complete version

View File

@@ -0,0 +1,117 @@
#!/usr/bin/env bash
# Consolidate all .env secrets into one file for backup/download.
# Run from proxmox repo root. Output: one .env-style file (path as first argument).
# Usage: bash scripts/consolidate-secrets-into-file.sh [OUTPUT_FILE]
# Example: bash scripts/consolidate-secrets-into-file.sh ~/secrets-consolidated.env
# SECURITY: Run locally only. Output contains real secrets; chmod 600 and never commit.
set -euo pipefail
PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}"
cd "$PROJECT_ROOT"
OUTPUT="${1:-secrets-consolidated.env}"
# Keys we care about (from SECRETS_CONSOLIDATED_DOWNLOAD.env); order preserved
KEYS=(
PROXMOX_ML110 PROXMOX_R630_01 PROXMOX_R630_02 PROXMOX_HOST PROXMOX_PORT PROXMOX_USER
PROXMOX_TOKEN_NAME PROXMOX_TOKEN_VALUE PROXMOX_ALLOW_ELEVATED
CLOUDFLARE_API_TOKEN CLOUDFLARE_EMAIL CLOUDFLARE_API_KEY CLOUDFLARE_ZONE_ID
CLOUDFLARE_ZONE_ID_D_BIS_ORG CLOUDFLARE_ZONE_ID_MIM4U_ORG CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS CLOUDFLARE_ZONE_ID_DEFI_ORACLE_IO
CLOUDFLARE_TUNNEL_TOKEN CLOUDFLARE_TUNNEL_ID CLOUDFLARE_TUNNEL_ID_ALLTRA_HYBX CLOUDFLARE_TUNNEL_ID_MIFOS_R630_02
CLOUDFLARE_TUNNEL_TOKEN_MIFOS_R630_02 CLOUDFLARE_ORIGIN_CA_KEY CLOUDFLARE_ACCOUNT_ID
CLOUDNS_AUTH_ID CLOUDNS_AUTH_PASSWORD
NPM_URL NPM_EMAIL NPM_PASSWORD NPM_HOST NPM_PROXMOX_HOST NPMPLUS_HOST NPM_VMID NPMPLUS_VMID
NPMPLUS_ALLTRA_HYBX_VMID IP_NPMPLUS_ALLTRA_HYBX NPM_URL_MIFOS
FASTLY_API_TOKEN
PUBLIC_IP PROXMOX_HOST_FOR_TEST UNIFI_UDM_URL UNIFI_API_KEY UNIFI_API_MODE UNIFI_SITE_ID UNIFI_VERIFY_SSL
OMADA_API_KEY OMADA_CLIENT_SECRET
GITEA_URL GITEA_TOKEN GITEA_ORG
DATABASE_URL JWT_SECRET JWT_REFRESH_SECRET JWT_EXPIRES_IN JWT_REFRESH_EXPIRES_IN SESSION_SECRET
ADMIN_CENTRAL_API_KEY DBIS_CENTRAL_URL ADMIN_JWT_SECRET
STORAGE_TYPE STORAGE_PATH AWS_REGION AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_S3_BUCKET
AZURE_STORAGE_CONNECTION_STRING AZURE_STORAGE_CONTAINER
PRIVATE_KEY RPC_URL_138 RPC_URL_138_PUBLIC ETHEREUM_MAINNET_RPC CHAIN_651940_RPC_URL ETHERLINK_RPC_URL TEZOS_RPC_URL
ETHERSCAN_API_KEY ETHERLINK_CCIP_SELECTOR TEZOS_BRIDGE_ENABLED ETHERLINK_BRIDGE_ENABLED
TEZOS_RELAY_ORACLE_KEY ETHERLINK_RELAY_BRIDGE ETHERLINK_RELAY_PRIVATE_KEY JUMPER_API_KEY
ONEINCH_API_KEY MOONPAY_API_KEY MOONPAY_SECRET_KEY RAMP_NETWORK_API_KEY ONRAMPER_API_KEY
SLACK_WEBHOOK_URL PAGERDUTY_INTEGRATION_KEY EMAIL_ALERT_API_URL EMAIL_ALERT_RECIPIENTS SENTRY_DSN
E_SIGNATURE_BASE_URL
CRYPTO_COM_API_KEY CRYPTO_COM_API_SECRET CRYPTO_COM_ENVIRONMENT BINANCE_API_KEY BINANCE_API_SECRET
KRAKEN_API_KEY KRAKEN_PRIVATE_KEY OANDA_API_KEY OANDA_ACCOUNT_ID OANDA_ENVIRONMENT FXCM_API_TOKEN
COINGECKO_API_KEY COINDESK_API_KEY COINMARKETCAP_API_KEY DEXSCREENER_API_KEY
MIFOS_BASE_URL MIFOS_TENANT MIFOS_USER MIFOS_PASSWORD MIFOS_INSECURE
OMNL_FINERACT_BASE_URL OMNL_FINERACT_TENANT OMNL_FINERACT_USER OMNL_FINERACT_PASSWORD
SANKOFA_PHOENIX_API_URL SANKOFA_PHOENIX_CLIENT_ID SANKOFA_PHOENIX_CLIENT_SECRET SANKOFA_PHOENIX_TENANT_ID
VITE_WALLETCONNECT_PROJECT_ID VITE_THIRDWEB_CLIENT_ID VITE_ETHERSCAN_API_KEY VITE_SENTRY_DSN
VITE_API_URL VITE_API_BASE_URL NEXT_PUBLIC_API_URL NEXT_PUBLIC_CHAIN_ID
METAMASK_API_KEY THIRDWEB_SECRET_KEY NPM_ACCESS_TOKEN
PARASWAP_API_KEY ZEROX_API_KEY
MONGO_USER MONGO_PASSWORD MONGO_IP MONGO_PORT MONGO_DATABASE
CHAIN138_RPC_URL RPC_URL_138_FIREBLOCKS WS_URL_138_FIREBLOCKS CHAIN_ID_138
PORT MARKET_REPORTING_API_KEY E_FILING_ENABLED NODE_ENV
)
# Sources: path -> prefix for comments
declare -A SOURCES
SOURCES["$PROJECT_ROOT/.env"]="root"
SOURCES["$PROJECT_ROOT/.env.master"]="root"
if [ -d "$PROJECT_ROOT/smom-dbis-138" ]; then
SOURCES["$PROJECT_ROOT/smom-dbis-138/.env"]="smom"
fi
if [ -d "$PROJECT_ROOT/dbis_core" ]; then
SOURCES["$PROJECT_ROOT/dbis_core/.env"]="dbis"
fi
if [ -d "$PROJECT_ROOT/OMNIS" ] && [ -f "$PROJECT_ROOT/OMNIS/backend/.env" ]; then
SOURCES["$PROJECT_ROOT/OMNIS/backend/.env"]="omnis"
fi
if [ -d "$PROJECT_ROOT/omada-api" ]; then
SOURCES["$PROJECT_ROOT/omada-api/.env"]="omada"
fi
if [ -d "$PROJECT_ROOT/phoenix-deploy-api" ]; then
SOURCES["$PROJECT_ROOT/phoenix-deploy-api/.env"]="phoenix"
fi
if [ -d "$PROJECT_ROOT/ProxmoxVE/api" ]; then
SOURCES["$PROJECT_ROOT/ProxmoxVE/api/.env"]="proxmoxve"
fi
# Export from a single file (no spaces around =, no export keyword in value)
export_from() {
local f="$1"
[ -f "$f" ] || return 0
while IFS= read -r line; do
[[ "$line" =~ ^[A-Za-z_][A-Za-z0-9_]*= ]] || continue
key="${line%%=*}"
value="${line#*=}"
printf '%s\n' "$key=$value"
done < "$f"
}
# Collect key=value from all sources (first occurrence wins)
declare -A collected
for path in "${!SOURCES[@]}"; do
while IFS= read -r line; do
key="${line%%=*}"
[ -z "$key" ] && continue
[ -n "${collected[$key]:-}" ] && continue
collected[$key]="${line#*=}"
done < <(export_from "$path")
done
# Build output: header + each KEY from KEYS (use value from collected if present)
{
echo "# =============================================================================
# CONSOLIDATED SECRETS — Filled from local .env files
# Generated: $(date -u +"%Y-%m-%dT%H:%M:%SZ")
# SECURITY: chmod 600 this file; never commit.
# ============================================================================="
for key in "${KEYS[@]}"; do
val="${collected[$key]:-}"
if [ -n "$val" ]; then
echo "${key}=${val}"
else
echo "${key}="
fi
done
} > "$OUTPUT"
chmod 600 "$OUTPUT"
echo "Written to $OUTPUT ($(wc -l < "$OUTPUT") lines). Keep secure; do not commit."

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Convert Database Containers to Privileged Mode
# Recreates PostgreSQL and Redis containers as privileged to enable service startup

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Convert DHCP containers to static IPs
# Converts one container at a time with verification

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Convert Containers to Privileged and Install All Services
# This script converts containers to privileged mode and completes all installations

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Create missing DNS records for all services
set -euo pipefail

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Script to create a Proxmox API token via the Proxmox API

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env bash
# Deploy token-aggregation service for publication (token lists, CoinGecko/CMC reports).
# Run on explorer VM (VMID 5000) or host that serves explorer.d-bis.org.
#
# Prerequisites: Node 20+, PostgreSQL (for full indexing; report API may work with minimal config)
# Usage: ./scripts/deploy-token-aggregation-for-publication.sh [INSTALL_DIR]
#
# After deploy: Run apply-nginx-token-aggregation-proxy.sh to proxy /api/v1/ to this service.
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# Default: user-writable dir in repo (no sudo). Use /opt/token-aggregation with sudo for system install.
INSTALL_DIR="${1:-$REPO_ROOT/token-aggregation-build}"
SVC_DIR="$REPO_ROOT/smom-dbis-138/services/token-aggregation"
if [ ! -d "$SVC_DIR" ]; then
echo "Token-aggregation not found at $SVC_DIR" >&2
exit 1
fi
echo "Deploying token-aggregation to $INSTALL_DIR"
mkdir -p "$INSTALL_DIR"
cp -a "$SVC_DIR"/* "$INSTALL_DIR/"
cd "$INSTALL_DIR"
if [ ! -f .env ]; then
if [ -f .env.example ]; then
cp .env.example .env
echo "Created .env from .env.example — edit with CUSDC_ADDRESS_138, CUSDT_ADDRESS_138, DATABASE_URL"
else
echo "Create .env with at least: CUSDC_ADDRESS_138, CUSDT_ADDRESS_138, CHAIN_138_RPC_URL"
fi
fi
npm install --omit=dev 2>/dev/null || npm install
npm run build 2>/dev/null || true
echo ""
echo "Token-aggregation built. Start with:"
echo " cd $INSTALL_DIR && node dist/index.js"
echo "Or add systemd unit. Default port: 3000"
echo ""
echo "Then apply nginx proxy (on same host):"
echo " TOKEN_AGG_PORT=3000 CONFIG_FILE=/etc/nginx/sites-available/blockscout \\"
echo " bash $REPO_ROOT/explorer-monorepo/scripts/apply-nginx-token-aggregation-proxy.sh"
echo ""
echo "Verify: curl -s https://explorer.d-bis.org/api/v1/report/token-list?chainId=138 | jq '.tokens | length'"

View File

@@ -0,0 +1,113 @@
#!/usr/bin/env bash
# Check deployer wallet balances on ChainID 138 (native ETH + ERC-20: WETH, WETH10, LINK, cUSDT, cUSDC).
# Output half of each balance as the funding plan for the three PMM liquidity pools.
#
# Usage:
# RPC_URL_138=https://rpc-core.d-bis.org ./scripts/deployment/check-deployer-balance-chain138-and-funding-plan.sh
# # Or from smom-dbis-138: source .env then run from repo root with RPC_URL_138 set
#
# Requires: cast (foundry), jq (optional). RPC_URL_138 must be set and reachable.
set -euo pipefail
DEPLOYER="${DEPLOYER_ADDRESS:-0x4A666F96fC8764181194447A7dFdb7d471b301C8}"
RPC="${RPC_URL_138:-}"
if [ -z "$RPC" ]; then
echo "ERROR: Set RPC_URL_138 (e.g. https://rpc-core.d-bis.org or http://192.168.11.211:8545)"
exit 1
fi
CHAIN_ID=138
# Chain 138 token addresses (from CHAIN138_TOKEN_ADDRESSES / ADDRESS_MATRIX)
WETH="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
WETH10="0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f"
LINK="0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03"
CUSDT="0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
CUSDC="0xf22258f57794CC8E06237084b353Ab30fFfa640b"
USDT_OFFICIAL="0x15DF1D5BFDD8Aa4b380445D4e3E9B38d34283619"
# PMM pool addresses (from LIQUIDITY_POOLS_MASTER_MAP / ADDRESS_MATRIX)
POOL_CUSDTCUSDC="0x9fcB06Aa1FD5215DC0E91Fd098aeff4B62fEa5C8"
POOL_CUSDTUSDT="0xa3Ee6091696B28e5497b6F491fA1e99047250c59"
POOL_CUSDCUSDC="0x90bd9Bf18Daa26Af3e814ea224032d015db58Ea5"
get_balance() {
local addr="$1"
cast call "$addr" "balanceOf(address)(uint256)" "$DEPLOYER" --rpc-url "$RPC" 2>/dev/null || echo "0"
}
get_decimals() {
local addr="$1"
cast call "$addr" "decimals()(uint8)" --rpc-url "$RPC" 2>/dev/null | cast --to-dec 2>/dev/null || echo "18"
}
# Native balance
native_wei=$(cast balance "$DEPLOYER" --rpc-url "$RPC" 2>/dev/null || echo "0")
native_eth=$(awk "BEGIN { printf \"%.6f\", $native_wei / 1e18 }" 2>/dev/null || echo "0")
half_native_wei=$((native_wei / 2))
echo "============================================"
echo "Deployer wallet — ChainID 138"
echo "Deployer: $DEPLOYER"
echo "RPC: $RPC"
echo "============================================"
echo ""
echo "--- Current balances ---"
echo " Native ETH: $native_eth ETH (raw: $native_wei wei)"
echo ""
RAW_WETH=0; RAW_WETH10=0; RAW_LINK=0; RAW_CUSDT=0; RAW_CUSDC=0
HALF_WETH=0; HALF_WETH10=0; HALF_LINK=0; HALF_CUSDT=0; HALF_CUSDC=0
for entry in "WETH:$WETH:18" "WETH10:$WETH10:18" "LINK:$LINK:18" "cUSDT:$CUSDT:6" "cUSDC:$CUSDC:6"; do
sym="${entry%%:*}"; rest="${entry#*:}"; addr="${rest%%:*}"; dec="${rest#*:}"
raw=$(get_balance "$addr")
half=$((raw / 2))
case "$sym" in
WETH) RAW_WETH=$raw; HALF_WETH=$half ;;
WETH10) RAW_WETH10=$raw; HALF_WETH10=$half ;;
LINK) RAW_LINK=$raw; HALF_LINK=$half ;;
cUSDT) RAW_CUSDT=$raw; HALF_CUSDT=$half ;;
cUSDC) RAW_CUSDC=$raw; HALF_CUSDC=$half ;;
esac
if [ "$dec" = "18" ]; then
disp=$(awk "BEGIN { printf \"%.6f\", $raw / 1e18 }" 2>/dev/null || echo "0")
half_disp=$(awk "BEGIN { printf \"%.6f\", $half / 1e18 }" 2>/dev/null || echo "0")
else
disp=$(awk "BEGIN { printf \"%.2f\", $raw / 1e$dec }" 2>/dev/null || echo "0")
half_disp=$(awk "BEGIN { printf \"%.2f\", $half / 1e$dec }" 2>/dev/null || echo "0")
fi
echo " $sym: $disp (raw: $raw) → half for LP: $half_disp (raw: $half)"
done
echo ""
echo "--- Funding plan: use HALF of each balance for PMM liquidity ---"
echo ""
echo "Pool 1: cUSDT/cUSDC ($POOL_CUSDTCUSDC)"
echo " Base (cUSDT): $HALF_CUSDT (decimals 6)"
echo " Quote (cUSDC): $HALF_CUSDC (decimals 6)"
echo ""
echo "Pool 2: cUSDT/USDT ($POOL_CUSDTUSDT)"
echo " Base (cUSDT): $HALF_CUSDT (decimals 6)"
echo " Quote (USDT): use same amount in USDT (official) — check deployer USDT balance separately if needed"
echo ""
echo "Pool 3: cUSDC/USDC ($POOL_CUSDCUSDC)"
echo " Base (cUSDC): $HALF_CUSDC (decimals 6)"
echo " Quote (USDC): use same amount in USDC (official) — check deployer USDC balance separately if needed"
echo ""
echo "--- Env vars for AddLiquidityPMMPoolsChain138 (half of cUSDT/cUSDC) ---"
echo "# Add to smom-dbis-138/.env and run: forge script script/dex/AddLiquidityPMMPoolsChain138.s.sol:AddLiquidityPMMPoolsChain138 --rpc-url \$RPC_URL_138 --broadcast --private-key \$PRIVATE_KEY"
echo "POOL_CUSDTCUSDC=$POOL_CUSDTCUSDC"
echo "POOL_CUSDTUSDT=$POOL_CUSDTUSDT"
echo "POOL_CUSDCUSDC=$POOL_CUSDCUSDC"
echo "ADD_LIQUIDITY_BASE_AMOUNT=$HALF_CUSDT"
echo "ADD_LIQUIDITY_QUOTE_AMOUNT=$HALF_CUSDC"
echo "# For pool cUSDT/cUSDC only (base=cUSDT, quote=cUSDC). For cUSDT/USDT and cUSDC/USDC use per-pool vars:"
echo "# ADD_LIQUIDITY_CUSDTUSDT_BASE=$HALF_CUSDT ADD_LIQUIDITY_CUSDTUSDT_QUOTE=<deployer USDT balance / 2>"
echo "# ADD_LIQUIDITY_CUSDCUSDC_BASE=$HALF_CUSDC ADD_LIQUIDITY_CUSDCUSDC_QUOTE=<deployer USDC balance / 2>"
echo ""
echo "--- Reserve ---"
echo " Keep half of native ETH for gas. Half for LP (if wrapping to WETH for a pool): $((half_native_wei / 2)) wei."
echo " WETH/LINK: half amounts above can be reserved for other use or future pools."
echo "============================================"

View File

@@ -0,0 +1,50 @@
#!/usr/bin/env bash
# Check deployer nonce and balance on Mainnet, Cronos, and Arbitrum.
# Use to diagnose "nonce too high" / "invalid nonce" and "insufficient funds" before retrying cW* deploy.
# Usage: ./scripts/deployment/check-deployer-nonce-and-balance.sh
# Requires: smom-dbis-138/.env with PRIVATE_KEY, ETHEREUM_MAINNET_RPC, CRONOS_RPC_URL, ARBITRUM_MAINNET_RPC
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="${PROJECT_ROOT}/smom-dbis-138"
[[ -f "$SMOM/.env" ]] || { echo "Missing $SMOM/.env" >&2; exit 1; }
set -a
source "$SMOM/.env"
set +a
DEPLOYER=""
if [[ -n "${PRIVATE_KEY:-}" ]]; then
DEPLOYER=$(cast wallet address "$PRIVATE_KEY" 2>/dev/null || true)
fi
[[ -z "$DEPLOYER" ]] && { echo "Could not derive deployer address (set PRIVATE_KEY in $SMOM/.env)" >&2; exit 1; }
echo "Deployer address: $DEPLOYER"
echo ""
for label in "Mainnet (1)" "Cronos (25)" "Arbitrum (42161)"; do
case "$label" in
"Mainnet (1)") RPC="${ETHEREUM_MAINNET_RPC:-$ETH_MAINNET_RPC_URL}"; CHAIN=1 ;;
"Cronos (25)") RPC="${CRONOS_RPC_URL:-$CRONOS_RPC}"; CHAIN=25 ;;
"Arbitrum (42161)") RPC="${ARBITRUM_MAINNET_RPC:-$ARBITRUM_MAINNET_RPC_URL}"; CHAIN=42161 ;;
esac
[[ -z "$RPC" ]] && { echo "$label: no RPC set, skip"; continue; }
NONCE=$(cast nonce "$DEPLOYER" --rpc-url "$RPC" 2>/dev/null || echo "?")
BALANCE=$(cast balance "$DEPLOYER" --rpc-url "$RPC" 2>/dev/null || echo "?")
echo "$label"
echo " RPC: ${RPC%%\?*}"
echo " Nonce (next tx): $NONCE"
echo " Balance (wei): $BALANCE"
if [[ "$CHAIN" == "42161" && "$BALANCE" != "?" ]]; then
# ~0.44 ETH needed at 35 gwei for 10 contracts
NEEDED=440872740000000000
if [[ "$BALANCE" -lt "$NEEDED" ]]; then
echo " → Insufficient for cW* deploy (~0.44 ETH at 35 gwei). Send ETH to deployer."
else
echo " → Balance OK for deploy."
fi
fi
echo ""
done
echo "Mainnet/Cronos nonce: If 'nonce too high' or 'invalid nonce', either wait for pending txs to confirm or replace/cancel them (e.g. same nonce, higher gas, 0 value to self)."
echo "Arbitrum: Fund deployer with ~0.5 ETH on Arbitrum One, then run deploy with ARBITRUM_GAS_PRICE=35000000000."

View File

@@ -0,0 +1,63 @@
#!/usr/bin/env bash
# Create all three DODO PMM pools on Chain 138: cUSDT/cUSDC, cUSDT/USDT, cUSDC/USDC.
# Requires: DODOPMMIntegration deployed, deployer with POOL_MANAGER_ROLE, PRIVATE_KEY and RPC_URL_138 in smom-dbis-138/.env.
#
# Usage: ./scripts/deployment/create-all-pmm-pools-chain138.sh [--dry-run]
# --dry-run Print commands only; do not broadcast.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="${PROJECT_ROOT}/smom-dbis-138"
DRY_RUN=""
for a in "$@"; do
[[ "$a" == "--dry-run" ]] && DRY_RUN=1
done
if [[ ! -f "$SMOM/.env" ]]; then
echo "Missing $SMOM/.env. Abort." >&2
exit 1
fi
set -a
source "$SMOM/.env"
set +a
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
GAS_PRICE="${GAS_PRICE_138:-${GAS_PRICE:-1000000000}}"
export DODO_PMM_INTEGRATION="${DODO_PMM_INTEGRATION_ADDRESS:-${DODO_PMM_INTEGRATION:-0x79cdbaFBaA0FdF9F55D26F360F54cddE5c743F7D}}"
export RPC_URL_138="$RPC"
cd "$SMOM"
POOLS=(
"script/dex/CreateCUSDTCUSDCPool.s.sol:CreateCUSDTCUSDCPool"
"script/dex/CreateCUSDTUSDTPool.s.sol:CreateCUSDTUSDTPool"
"script/dex/CreateCUSDCUSDCPool.s.sol:CreateCUSDCUSDCPool"
)
if [[ -n "$DRY_RUN" ]]; then
echo "[dry-run] Would run the following (from $SMOM):"
for spec in "${POOLS[@]}"; do
echo " forge script $spec --rpc-url \"$RPC\" --broadcast --private-key \"\$PRIVATE_KEY\" --with-gas-price $GAS_PRICE"
done
exit 0
fi
for spec in "${POOLS[@]}"; do
echo "=== Creating pool: $spec ==="
out=$(forge script $spec --rpc-url "$RPC" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price "$GAS_PRICE" 2>&1) || true
echo "$out"
if echo "$out" | grep -qE "pool exists|Pool already exists"; then
echo "Pool already exists; skipping."
elif echo "$out" | grep -q "Error:" && ! echo "$out" | grep -qE "pool exists|Pool already exists"; then
echo "Pool creation failed (see above)." >&2
exit 1
else
echo "Pool created successfully."
fi
sleep 2
done
echo "Done. Run ./scripts/verify/check-contracts-on-chain-138.sh \"$RPC\" to verify."

View File

@@ -119,7 +119,7 @@ deploy_inside() {
log "Creating startup script and systemd service..."
run_ct "bash -c '
cat > /srv/gov-portals/start-portals.sh << \"SCRIPT\"
#!/bin/bash
#!/usr/bin/env bash
cd /srv/gov-portals
export NODE_ENV=production
PORT=3001 node DBIS/node_modules/next/dist/bin/next start -p 3001 &

View File

@@ -2,12 +2,19 @@
# Deploy TransactionMirror and create DODO cUSDT/cUSDC PMM pool on Chain 138.
# Run after clearing RPC tx pool (./scripts/clear-all-transaction-pools.sh) so deployer nonce is not stuck.
#
# Uses: smom-dbis-138/.env (PRIVATE_KEY, RPC_URL_138, RPC_URL_138_PUBLIC, DODO_PMM_INTEGRATION, GAS_PRICE)
# and config/ip-addresses.conf for RPC fallbacks. Always checks nonce, RPC active, and gas.
# RPC: Contract deployments use ONLY Core RPC (RPC_URL_138 = 192.168.11.211:8545, VMID 2101).
# No Public RPC fallback. If Core is down or read-only, fix it first (see docs/03-deployment/RPC_2101_READONLY_FIX.md).
#
# Usage: ./scripts/deployment/deploy-transaction-mirror-and-pmm-pool-after-txpool-clear.sh [--dry-run] [--force]
# --dry-run Check env, RPC, nonce only; no deploy.
# --force Skip RPC reachability check (not recommended).
# Uses: smom-dbis-138/.env (PRIVATE_KEY, RPC_URL_138, DODO_PMM_INTEGRATION, GAS_PRICE)
#
# Usage: ./scripts/deployment/deploy-transaction-mirror-and-pmm-pool-after-txpool-clear.sh [--dry-run] [--force] [--skip-mirror]
# --dry-run Check env, RPC, nonce only; no deploy.
# --force Skip RPC reachability check (not recommended).
# --skip-mirror Skip TransactionMirror deploy; only create PMM pool (set TRANSACTION_MIRROR_ADDRESS in .env if mirror already deployed).
#
# Before first deploy: fix Core if read-only, then run health check:
# ./scripts/maintenance/make-rpc-vmids-writable-via-ssh.sh
# ./scripts/maintenance/health-check-rpc-2101.sh
set -euo pipefail
@@ -17,9 +24,11 @@ SMOM="${PROJECT_ROOT}/smom-dbis-138"
DRY_RUN=""
FORCE=""
SKIP_MIRROR=""
for a in "$@"; do
[[ "$a" == "--dry-run" ]] && DRY_RUN=1
[[ "$a" == "--force" ]] && FORCE=1
[[ "$a" == "--skip-mirror" ]] && SKIP_MIRROR=1
done
# 1) Load dotenv: project config (RPCs) then smom-dbis-138/.env (PRIVATE_KEY, overrides)
@@ -32,44 +41,32 @@ set -a
source "$SMOM/.env"
set +a
# 2) RPC: prefer .env, fallback to config
# 2) RPC: Core (2101) only — no Public fallback for deployments
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
PUBLIC_RPC="${RPC_URL_138_PUBLIC:-http://192.168.11.221:8545}"
[[ -z "${PRIVATE_KEY:-}" ]] && echo "PRIVATE_KEY not set in $SMOM/.env. Abort." >&2 && exit 1
# Chain 138 gas: min 1 gwei; use GAS_PRICE from .env or default
GAS_PRICE="${GAS_PRICE_138:-${GAS_PRICE:-1000000000}}"
echo "=== TransactionMirror + PMM pool (Chain 138) ==="
echo "RPC: $RPC"
echo "RPC (Core for deploy): $RPC"
echo "Gas price: $GAS_PRICE wei"
echo ""
# 3) Ensure RPC is active (chainId 138)
rpc_ok=""
# 3) Ensure Core RPC is active (chainId 138). No Public fallback.
if [[ -z "$FORCE" ]]; then
chain_id_hex=$(curl -s -m 10 -X POST "$RPC" -H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' 2>/dev/null | sed -n 's/.*"result":"\([^"]*\)".*/\1/p') || true
if [[ "$chain_id_hex" == "0x8a" ]]; then
rpc_ok=1
else
if [[ "$chain_id_hex" != "0x8a" ]]; then
if [[ -n "$chain_id_hex" ]]; then
echo "RPC returned chainId $chain_id_hex (expected 0x8a for Chain 138)." >&2
echo "Core RPC returned chainId $chain_id_hex (expected 0x8a for Chain 138)." >&2
else
echo "RPC unreachable or invalid response: $RPC" >&2
fi
if [[ "$RPC" == *"192.168.11.211"* ]] && [[ "$PUBLIC_RPC" != *"192.168.11.211"* ]]; then
pub_hex=$(curl -s -m 5 -X POST "$PUBLIC_RPC" -H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' 2>/dev/null | sed -n 's/.*"result":"\([^"]*\)".*/\1/p') || true
if [[ "$pub_hex" == "0x8a" ]]; then
echo "Using Public RPC: $PUBLIC_RPC" >&2
RPC="$PUBLIC_RPC"
rpc_ok=1
fi
fi
if [[ -z "$rpc_ok" ]]; then
echo "Set RPC_URL_138 (and optionally RPC_URL_138_PUBLIC) in $SMOM/.env to a reachable Chain 138 RPC." >&2
exit 1
echo "Core RPC unreachable or invalid response: $RPC" >&2
fi
echo "Contract deployments use Core RPC only (VMID 2101, 192.168.11.211:8545). Fix read-only/storage then re-run:" >&2
echo " 1. ./scripts/maintenance/make-rpc-vmids-writable-via-ssh.sh" >&2
echo " 2. ./scripts/maintenance/health-check-rpc-2101.sh" >&2
echo " See docs/03-deployment/RPC_2101_READONLY_FIX.md" >&2
exit 1
fi
else
echo "(--force: skipping RPC check)" >&2
@@ -102,12 +99,36 @@ cd "$SMOM"
export RPC_URL_138="$RPC"
export DODO_PMM_INTEGRATION="${DODO_PMM_INTEGRATION_ADDRESS:-${DODO_PMM_INTEGRATION:-0x79cdbaFBaA0FdF9F55D26F360F54cddE5c743F7D}}"
echo "Deploying TransactionMirror (NEXT_NONCE=$NEXT_NONCE, gas $GAS_PRICE)..."
forge script script/DeployTransactionMirror.s.sol:DeployTransactionMirror \
--rpc-url "$RPC" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price "$GAS_PRICE"
# Skip TransactionMirror deploy if already deployed at TRANSACTION_MIRROR_ADDRESS or if --skip-mirror
MIRROR_ADDR="${TRANSACTION_MIRROR_ADDRESS:-}"
if [[ -n "$SKIP_MIRROR" ]]; then
echo "Skipping TransactionMirror deploy (--skip-mirror)."
if [[ -z "$MIRROR_ADDR" ]]; then
echo "Set TRANSACTION_MIRROR_ADDRESS in $SMOM/.env to the existing mirror address, then re-run for pool-only." >&2
exit 1
fi
elif [[ -n "$MIRROR_ADDR" ]]; then
CODE_LEN=$(cast code "$MIRROR_ADDR" --rpc-url "$RPC" 2>/dev/null | wc -c)
if [[ -n "$CODE_LEN" && "$CODE_LEN" -gt 10 ]]; then
echo "TransactionMirror already deployed at $MIRROR_ADDR (has code). Skipping mirror deploy."
SKIP_MIRROR=1
fi
fi
# Re-query pending nonce for pool deploy; wait briefly so first tx can be mined (reduces "Replacement transaction underpriced")
sleep 3
if [[ -z "$SKIP_MIRROR" ]]; then
echo "Deploying TransactionMirror (NEXT_NONCE=$NEXT_NONCE, gas $GAS_PRICE)..."
if ! forge script script/DeployTransactionMirror.s.sol:DeployTransactionMirror \
--rpc-url "$RPC" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price "$GAS_PRICE"; then
echo ""
echo "If the failure was CreateCollision (contract already at expected address), set in $SMOM/.env:" >&2
echo " TRANSACTION_MIRROR_ADDRESS=0xC7f2Cf4845C6db0e1a1e91ED41Bcd0FcC1b0E141" >&2
echo "Then re-run this script to create only the PMM pool, or run with --skip-mirror." >&2
exit 1
fi
sleep 3
fi
# Re-query pending nonce for pool deploy
NONCE_USED_FIRST=$NEXT_NONCE
NEXT_NONCE=$(cast nonce "$DEPLOYER" --rpc-url "$RPC" --block pending 2>/dev/null) || true
[[ -z "${NEXT_NONCE//[0-9a-fA-Fx]/}" && -n "$NEXT_NONCE" ]] || NEXT_NONCE=$((NONCE_USED_FIRST + 1))
@@ -138,6 +159,10 @@ while true; do
if echo "$POOL_OUTPUT" | grep -q "Script ran successfully"; then
break
fi
if echo "$POOL_OUTPUT" | grep -qE "pool exists|DODOPMMIntegration: pool exists"; then
echo "Pool already exists; continuing."
break
fi
echo "Pool deploy failed. Check output above." >&2
exit 1
done

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
# Deploy TransactionMirror to Chain 138 using next nonce (skip stuck tx).
# Usage: ./scripts/deployment/deploy-transaction-mirror-chain138-nonce-fix.sh [NONCE]
# Default NONCE = 13370 (when current is 13369 stuck). Requires: .env with PRIVATE_KEY; RPC reachable (Public 2201 OK).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="${PROJECT_ROOT}/smom-dbis-138"
NONCE="${1:-13370}"
[[ -f "${SMOM}/.env" ]] && set -a && source "${SMOM}/.env" 2>/dev/null && set +a
RPC="${RPC_URL_138:-${RPC_URL_138_PUBLIC:-http://192.168.11.221:8545}}"
[[ -z "${PRIVATE_KEY:-}" ]] && echo "PRIVATE_KEY not set." >&2 && exit 1
[[ "${PRIVATE_KEY#0x}" == "$PRIVATE_KEY" ]] && export PRIVATE_KEY="0x$PRIVATE_KEY"
ADMIN="${MIRROR_ADMIN:-$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null)}"
echo "Deploying TransactionMirror (nonce=$NONCE) to $RPC"
cd "$SMOM"
out=$(forge create contracts/mirror/TransactionMirror.sol:TransactionMirror \
--constructor-args "$ADMIN" \
--rpc-url "$RPC" \
--private-key "$PRIVATE_KEY" \
--gas-price 1000000000 \
--nonce "$NONCE" 2>&1) || { echo "$out"; exit 1; }
echo "$out"
addr=$(echo "$out" | grep -oE 'Deployed to: (0x[a-fA-F0-9]{40})' | sed 's/Deployed to: //')
[[ -z "$addr" ]] && addr=$(echo "$out" | grep -oE '0x[a-fA-F0-9]{40}' | head -1)
[[ -z "$addr" ]] && { echo "Could not parse deployed address"; exit 1; }
echo ""
echo "TransactionMirror deployed at: $addr"
echo "Update scripts/verify/check-contracts-on-chain-138.sh and docs to use $addr instead of 0xe363a69273C3471ECaf313f8Ca45bc7C538c5e78"

View File

@@ -0,0 +1,93 @@
#!/usr/bin/env bash
# Pre-flight checks before Chain 138 deployment: correct RPC, dotenv, nonce (no stuck txs).
# Optionally run gas/cost estimate for deployment cost.
#
# Usage: ./scripts/deployment/preflight-chain138-deploy.sh [--cost]
# --cost Also run cost estimate (smom-dbis-138/scripts/deployment/calculate-costs-consolidated.sh).
#
# Requires: run from repo root. Uses smom-dbis-138/.env only (PRIVATE_KEY, RPC_URL_138).
# See: docs/03-deployment/DEPLOYMENT_ORDER_OF_OPERATIONS.md § Deployment safety
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="${PROJECT_ROOT}/smom-dbis-138"
RUN_COST=""
for a in "$@"; do [[ "$a" == "--cost" ]] && RUN_COST=1; done
echo "=== Chain 138 deployment pre-flight ==="
echo ""
# 1) Dotenv: must exist and be the canonical one
if [[ ! -f "$SMOM/.env" ]]; then
echo "FAIL: $SMOM/.env not found. Use smom-dbis-138/.env only for deployment secrets." >&2
exit 1
fi
echo "OK Dotenv: $SMOM/.env exists (canonical for Chain 138 deploy)."
# 2) Env check script (keys only, no values)
if [[ -f "$SMOM/scripts/deployment/check-env-required.sh" ]]; then
echo ""
bash "$SMOM/scripts/deployment/check-env-required.sh" || { echo "FAIL: env check failed. Fix MISS entries in $SMOM/.env." >&2; exit 1; }
else
echo "WARN: $SMOM/scripts/deployment/check-env-required.sh not found; skipping env key check." >&2
fi
# 3) Load env for RPC and nonce checks (no secrets printed)
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
set -a
source "$SMOM/.env"
set +a
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
if [[ -z "${PRIVATE_KEY:-}" ]]; then
echo "FAIL: PRIVATE_KEY not set in $SMOM/.env." >&2
exit 1
fi
# 4) RPC: must be Core (chainId 138 = 0x8a)
echo ""
chain_id_hex=$(curl -s -m 10 -X POST "$RPC" -H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' 2>/dev/null | sed -n 's/.*"result":"\([^"]*\)".*/\1/p') || true
if [[ "$chain_id_hex" != "0x8a" ]]; then
if [[ -n "$chain_id_hex" ]]; then
echo "FAIL: RPC returned chainId $chain_id_hex (expected 0x8a for Chain 138). Use Core RPC only (RPC_URL_138 = VMID 2101, e.g. http://192.168.11.211:8545)." >&2
else
echo "FAIL: RPC unreachable: $RPC. Fix Core RPC or network." >&2
fi
echo " See docs/04-configuration/RPC_ENDPOINTS_MASTER.md." >&2
exit 1
fi
echo "OK RPC (Core): $RPC (chainId 138)."
# 5) Nonce: warn if pending > latest (stuck txs)
DEPLOYER=$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null) || { echo "FAIL: cast wallet address failed. Check PRIVATE_KEY in .env." >&2; exit 1; }
NONCE_PENDING=$(cast nonce "$DEPLOYER" --rpc-url "$RPC" --block pending 2>/dev/null) || true
NONCE_LATEST=$(cast nonce "$DEPLOYER" --rpc-url "$RPC" --block latest 2>/dev/null) || true
# Normalize to decimal (cast may return hex 0xN or decimal N)
[[ -z "${NONCE_LATEST//[0-9a-fA-Fx]/}" && -n "$NONCE_LATEST" ]] || NONCE_LATEST=0
[[ -z "${NONCE_PENDING//[0-9a-fA-Fx]/}" && -n "$NONCE_PENDING" ]] || NONCE_PENDING="$NONCE_LATEST"
NONCE_PENDING=$((NONCE_PENDING))
NONCE_LATEST=$((NONCE_LATEST))
echo " Deployer: $DEPLOYER"
echo " Nonce (pending): $NONCE_PENDING (latest: $NONCE_LATEST)"
if [[ $NONCE_PENDING -gt $NONCE_LATEST ]]; then
echo ""
echo "WARN: Pending nonce ($NONCE_PENDING) > latest ($NONCE_LATEST) — possible stuck transaction(s)." >&2
echo " Do NOT deploy until resolved: run ./scripts/clear-all-transaction-pools.sh then wait ~60s." >&2
echo " See docs/03-deployment/DEPLOYMENT_ORDER_OF_OPERATIONS.md § Do not deploy when stuck." >&2
exit 1
fi
echo "OK No stuck transactions (nonce consistent)."
# 6) Optional: cost estimate
if [[ -n "$RUN_COST" ]] && [[ -f "$SMOM/scripts/deployment/calculate-costs-consolidated.sh" ]]; then
echo ""
echo "=== Cost estimate (--cost) ==="
(cd "$SMOM" && bash scripts/deployment/calculate-costs-consolidated.sh) || true
fi
echo ""
echo "Pre-flight passed. Proceed with deployment using scripts that source $SMOM/.env and use RPC_URL_138 (Core only)."

View File

@@ -0,0 +1,100 @@
#!/usr/bin/env bash
# Run all deployment next steps for Chain 138 in order: preflight → mirror+pool (or pool-only) → register c* as GRU → verify.
#
# Usage: ./scripts/deployment/run-all-next-steps-chain138.sh [--dry-run] [--skip-mirror] [--skip-register-gru] [--skip-verify]
# --dry-run Print steps only; do not run deploy/scripts.
# --skip-mirror Do not deploy TransactionMirror (pool-only; requires TRANSACTION_MIRROR_ADDRESS in smom-dbis-138/.env).
# --skip-register-gru Skip RegisterGRUCompliantTokens (e.g. if already registered).
# --skip-verify Skip final on-chain verification.
#
# Requires: repo root; smom-dbis-138/.env with PRIVATE_KEY, RPC_URL_138; for register-gru: UNIVERSAL_ASSET_REGISTRY, CUSDT_ADDRESS_138, CUSDC_ADDRESS_138 (and optional c* vars).
# See: docs/03-deployment/DEPLOYMENT_ORDER_OF_OPERATIONS.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="$PROJECT_ROOT/smom-dbis-138"
DRY_RUN=""
SKIP_MIRROR=""
SKIP_REGISTER_GRU=""
SKIP_VERIFY=""
for a in "$@"; do
[[ "$a" == "--dry-run" ]] && DRY_RUN=1
[[ "$a" == "--skip-mirror" ]] && SKIP_MIRROR=1
[[ "$a" == "--skip-register-gru" ]] && SKIP_REGISTER_GRU=1
[[ "$a" == "--skip-verify" ]] && SKIP_VERIFY=1
done
echo "=== Chain 138 — run all next steps ==="
echo " dry-run: $DRY_RUN skip-mirror: $SKIP_MIRROR skip-register-gru: $SKIP_REGISTER_GRU skip-verify: $SKIP_VERIFY"
echo ""
# 1) Preflight
echo "--- Step 1: Preflight ---"
if [[ -n "$DRY_RUN" ]]; then
echo "[DRY-RUN] $PROJECT_ROOT/scripts/deployment/preflight-chain138-deploy.sh"
else
"$SCRIPT_DIR/preflight-chain138-deploy.sh" || { echo "Preflight failed." >&2; exit 1; }
fi
echo ""
# 2) TransactionMirror + PMM pool (or pool-only)
echo "--- Step 2: TransactionMirror + PMM pool ---"
if [[ -n "$DRY_RUN" ]]; then
if [[ -n "$SKIP_MIRROR" ]]; then
echo "[DRY-RUN] $PROJECT_ROOT/scripts/deployment/deploy-transaction-mirror-and-pmm-pool-after-txpool-clear.sh --skip-mirror"
else
echo "[DRY-RUN] $PROJECT_ROOT/scripts/deployment/deploy-transaction-mirror-and-pmm-pool-after-txpool-clear.sh"
fi
else
if [[ -n "$SKIP_MIRROR" ]]; then
"$PROJECT_ROOT/scripts/deployment/deploy-transaction-mirror-and-pmm-pool-after-txpool-clear.sh" --skip-mirror || { echo "Deploy (pool-only) failed." >&2; exit 1; }
else
"$PROJECT_ROOT/scripts/deployment/deploy-transaction-mirror-and-pmm-pool-after-txpool-clear.sh" || { echo "Deploy failed." >&2; exit 1; }
fi
fi
echo ""
# 3) Register c* as GRU (optional)
if [[ -z "$SKIP_REGISTER_GRU" ]]; then
echo "--- Step 3: Register c* as GRU (UniversalAssetRegistry) ---"
if [[ -n "$DRY_RUN" ]]; then
echo "[DRY-RUN] cd $SMOM && forge script script/deploy/RegisterGRUCompliantTokens.s.sol --rpc-url \$RPC_URL_138 --broadcast --private-key \$PRIVATE_KEY --with-gas-price 1000000000"
else
if [[ -f "$SMOM/.env" ]]; then
set -a; source "$SMOM/.env"; set +a
# Fallback: Register script expects CUSDT_ADDRESS_138/CUSDC_ADDRESS_138; use COMPLIANT_USDT/COMPLIANT_USDC if set
[[ -z "${CUSDT_ADDRESS_138:-}" && -n "${COMPLIANT_USDT:-}" ]] && export CUSDT_ADDRESS_138="$COMPLIANT_USDT"
[[ -z "${CUSDC_ADDRESS_138:-}" && -n "${COMPLIANT_USDC:-}" ]] && export CUSDC_ADDRESS_138="$COMPLIANT_USDC"
if [[ -n "${UNIVERSAL_ASSET_REGISTRY:-}" && ( -n "${CUSDT_ADDRESS_138:-}" || -n "${CUSDC_ADDRESS_138:-}" ) ]]; then
(cd "$SMOM" && forge script script/deploy/RegisterGRUCompliantTokens.s.sol --rpc-url "${RPC_URL_138:-http://192.168.11.211:8545}" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price 1000000000) || echo "RegisterGRUCompliantTokens failed or skipped (check UNIVERSAL_ASSET_REGISTRY and CUSDT_ADDRESS_138/CUSDC_ADDRESS_138)."
else
echo "Skip: set UNIVERSAL_ASSET_REGISTRY and CUSDT_ADDRESS_138 (and optional c* vars) in $SMOM/.env to register c* as GRU."
fi
else
echo "Skip: $SMOM/.env not found."
fi
fi
echo ""
else
echo "--- Step 3: Register c* as GRU (skipped) ---"
echo ""
fi
# 4) Verify
if [[ -z "$SKIP_VERIFY" ]]; then
echo "--- Step 4: On-chain verification ---"
if [[ -n "$DRY_RUN" ]]; then
echo "[DRY-RUN] $PROJECT_ROOT/scripts/verify/check-contracts-on-chain-138.sh"
else
[[ -f "$SMOM/.env" ]] && set -a && source "$SMOM/.env" && set +a
"$PROJECT_ROOT/scripts/verify/check-contracts-on-chain-138.sh" "${RPC_URL_138:-}" || true
fi
echo ""
else
echo "--- Step 4: Verify (skipped) ---"
echo ""
fi
echo "=== Next steps run complete. ==="

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env bash
# Run all checkable "before deploy" items from RECOMMENDATIONS_AND_FIXES_BEFORE_DEPLOY.md.
# Usage: ./scripts/deployment/run-before-deploy-checks.sh [--cost] [--alltra] [--on-chain] [--skip-build]
# --cost Run preflight with --cost (gas estimate).
# --alltra Include alltra-lifi-settlement in contract tests (slower).
# --on-chain Run check-contracts-on-chain-138.sh (requires RPC).
# --skip-build Skip forge build; use existing artifacts (step 3 and test script both skip build).
#
# Requires: run from repo root; forge in PATH.
# See: docs/03-deployment/RECOMMENDATIONS_AND_FIXES_BEFORE_DEPLOY.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
RUN_COST=""
RUN_ALLTRA=""
RUN_ONCHAIN=""
SKIP_BUILD=""
FAILED=0
for a in "$@"; do
case "$a" in
--cost) RUN_COST=1 ;;
--alltra) RUN_ALLTRA=1 ;;
--on-chain) RUN_ONCHAIN=1 ;;
--skip-build) SKIP_BUILD=1 ;;
esac
done
echo "=== Before-deploy checks (recommendations and fixes) ==="
echo ""
# 1) Preflight
echo "--- 1. Preflight (RPC, dotenv, nonce) ---"
if [[ -n "$RUN_COST" ]]; then
if "$SCRIPT_DIR/preflight-chain138-deploy.sh" --cost; then echo " PASS"; else echo " FAIL"; FAILED=$((FAILED+1)); fi
else
if "$SCRIPT_DIR/preflight-chain138-deploy.sh"; then echo " PASS"; else echo " FAIL"; FAILED=$((FAILED+1)); fi
fi
echo ""
# 2) Env check (smom-dbis-138)
echo "--- 2. Env check (smom-dbis-138) ---"
if (cd "$PROJECT_ROOT/smom-dbis-138" && ./scripts/deployment/check-env-required.sh) >/dev/null 2>&1; then echo " PASS"; else echo " FAIL"; FAILED=$((FAILED+1)); fi
echo ""
# 3) Forge build (skip if --skip-build)
echo "--- 3. Forge build (smom-dbis-138) ---"
if [[ -n "$SKIP_BUILD" ]]; then
echo " SKIP (--skip-build)"
else
if (cd "$PROJECT_ROOT/smom-dbis-138" && forge build) >/dev/null 2>&1; then echo " PASS"; else echo " FAIL"; FAILED=$((FAILED+1)); fi
fi
echo ""
# 4) Contract tests (skip build here; use step 3 build or existing artifacts)
echo "--- 4. Contract tests (unit; use --alltra for e2e) ---"
if [[ -n "$RUN_ALLTRA" ]]; then
if "$SCRIPT_DIR/test-all-contracts-before-deploy.sh" --alltra --skip-build 2>/dev/null; then echo " PASS"; else echo " FAIL"; FAILED=$((FAILED+1)); fi
else
if "$SCRIPT_DIR/test-all-contracts-before-deploy.sh" --no-match "Fork|Mainnet|Integration|e2e" --skip-build 2>/dev/null; then echo " PASS"; else echo " FAIL"; FAILED=$((FAILED+1)); fi
fi
echo ""
# 5) Config validation
echo "--- 5. Config validation ---"
if (cd "$PROJECT_ROOT" && bash scripts/validation/validate-config-files.sh) >/dev/null 2>&1; then echo " PASS"; else echo " FAIL"; FAILED=$((FAILED+1)); fi
echo ""
# 6) On-chain verification (optional)
if [[ -n "$RUN_ONCHAIN" ]]; then
echo "--- 6. On-chain verification (Chain 138) ---"
if (cd "$PROJECT_ROOT" && ./scripts/verify/check-contracts-on-chain-138.sh) >/dev/null 2>&1; then echo " PASS"; else echo " FAIL"; FAILED=$((FAILED+1)); fi
echo ""
fi
echo "=== Summary ==="
if [[ $FAILED -eq 0 ]]; then
echo "All checkable items passed. Complete remaining operator items (gas, POOL_MANAGER_ROLE, RPC writable if needed) per RECOMMENDATIONS_AND_FIXES_BEFORE_DEPLOY.md."
exit 0
else
echo "$FAILED check(s) failed. Fix and re-run."
exit 1
fi

View File

@@ -0,0 +1,138 @@
#!/usr/bin/env bash
# Run remaining cW* steps: deploy (or dry-run), update token-mapping from .env, optional verify.
# See docs/07-ccip/CW_DEPLOY_AND_WIRE_RUNBOOK.md and docs/00-meta/CW_BRIDGE_TASK_LIST.md.
#
# Usage:
# ./scripts/deployment/run-cw-remaining-steps.sh [--dry-run] [--deploy] [--update-mapping] [--verify]
# --dry-run Run deploy-cw in dry-run mode (print commands only).
# --deploy Run deploy-cw on all chains (requires RPC/PRIVATE_KEY in smom-dbis-138/.env).
# --update-mapping Update config/token-mapping-multichain.json from CWUSDT_*/CWUSDC_* in .env.
# --verify For each chain with CWUSDT_* set, check MINTER_ROLE/BURNER_ROLE on cW* for CW_BRIDGE_*.
# With no options, runs --dry-run then --update-mapping (if any CWUSDT_* in .env).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="${PROJECT_ROOT}/smom-dbis-138"
CONFIG="${PROJECT_ROOT}/config/token-mapping-multichain.json"
DRY_RUN=false
DO_DEPLOY=false
DO_UPDATE_MAPPING=false
DO_VERIFY=false
for a in "$@"; do
case "$a" in
--dry-run) DRY_RUN=true ;;
--deploy) DO_DEPLOY=true ;;
--update-mapping) DO_UPDATE_MAPPING=true ;;
--verify) DO_VERIFY=true ;;
esac
done
if ! $DRY_RUN && ! $DO_DEPLOY && ! $DO_UPDATE_MAPPING && ! $DO_VERIFY; then
DRY_RUN=true
DO_UPDATE_MAPPING=true
fi
if [[ ! -f "$SMOM/.env" ]]; then
echo "Missing $SMOM/.env" >&2
exit 1
fi
set -a
source "$SMOM/.env"
set +a
# Chain name (env suffix) -> chainId for 138 -> chain pairs
declare -A CHAIN_NAME_TO_ID=(
[MAINNET]=1 [CRONOS]=25 [BSC]=56 [POLYGON]=137 [GNOSIS]=100
[AVALANCHE]=43114 [BASE]=8453 [ARBITRUM]=42161 [OPTIMISM]=10 [ALL]=651940
)
if $DO_DEPLOY; then
echo "=== Deploy cW* (DeployCWTokens) on all chains ==="
(cd "$SMOM" && ./scripts/deployment/deploy-tokens-and-weth-all-chains-skip-canonical.sh --deploy-cw)
echo "→ Set CWUSDT_<CHAIN> and CWUSDC_<CHAIN> in $SMOM/.env from script output; then run with --update-mapping"
fi
if $DRY_RUN; then
echo "=== Dry-run: deploy cW* (commands only) ==="
(cd "$SMOM" && ./scripts/deployment/deploy-tokens-and-weth-all-chains-skip-canonical.sh --deploy-cw --dry-run)
fi
update_mapping() {
local env_file="$SMOM/.env"
local config_file="$CONFIG"
[[ -f "$config_file" ]] || { echo "Missing $config_file" >&2; return 1; }
node -e "
const fs = require('fs');
const path = require('path');
function loadEnv(f) {
const c = fs.readFileSync(f, 'utf8');
const out = {};
c.split('\n').forEach(line => {
const m = line.match(/^([A-Za-z_0-9]+)=(.*)$/);
if (m) out[m[1]] = m[2].replace(/^[\"']|[\"']\$/g, '').trim();
});
return out;
}
const env = loadEnv('$env_file');
const chainToId = { MAINNET: 1, CRONOS: 25, BSC: 56, POLYGON: 137, GNOSIS: 100, AVALANCHE: 43114, BASE: 8453, ARBITRUM: 42161, OPTIMISM: 10, ALL: 651940 };
const keyToEnv = { Compliant_USDT_cW: 'CWUSDT', Compliant_USDC_cW: 'CWUSDC', Compliant_EURC_cW: 'CWEURC', Compliant_EURT_cW: 'CWEURT', Compliant_GBPC_cW: 'CWGBPC', Compliant_GBPT_cW: 'CWGBPT', Compliant_AUDC_cW: 'CWAUDC', Compliant_JPYC_cW: 'CWJPYC', Compliant_CHFC_cW: 'CWCHFC', Compliant_CADC_cW: 'CWCADC', Compliant_XAUC_cW: 'CWXAUC', Compliant_XAUT_cW: 'CWXAUT' };
const j = JSON.parse(fs.readFileSync('$config_file', 'utf8'));
let updated = 0;
for (const [name, chainId] of Object.entries(chainToId)) {
const pair = j.pairs.find(p => p.fromChainId === 138 && p.toChainId === chainId);
if (!pair) continue;
for (const t of pair.tokens) {
const envKey = keyToEnv[t.key];
if (!envKey) continue;
const addr = env[envKey + '_' + name];
if (addr && t.addressTo !== addr) { t.addressTo = addr; updated++; }
}
}
if (updated) {
fs.writeFileSync('$config_file', JSON.stringify(j, null, 2) + '\n');
console.log('Updated', updated, 'addressTo entries in token-mapping-multichain.json');
} else {
console.log('No cW* addresses set in .env for mapped chains, or already up to date');
}
"
}
if $DO_UPDATE_MAPPING; then
echo "=== Update token-mapping from .env ==="
update_mapping
fi
if $DO_VERIFY; then
echo "=== Verify MINTER/BURNER roles on cW* for each chain ==="
MINTER_ROLE=$(cast keccak "MINTER_ROLE" 2>/dev/null || echo "0x")
BURNER_ROLE=$(cast keccak "BURNER_ROLE" 2>/dev/null || echo "0x")
for name in MAINNET CRONOS BSC POLYGON GNOSIS AVALANCHE BASE ARBITRUM OPTIMISM; do
cwusdt_var="CWUSDT_${name}"
bridge_var="CW_BRIDGE_${name}"
cwusdt="${!cwusdt_var:-}"
bridge="${!bridge_var:-}"
rpc_var="${name}_RPC_URL"
[[ -z "$rpc_var" ]] && rpc_var="${name}_RPC"
rpc="${!rpc_var:-}"
if [[ -z "$cwusdt" || -z "$bridge" ]]; then continue; fi
if [[ -z "$rpc" ]]; then
case "$name" in
MAINNET) rpc="${ETH_MAINNET_RPC_URL:-${ETHEREUM_MAINNET_RPC:-}}";;
CRONOS) rpc="${CRONOS_RPC_URL:-${CRONOS_RPC:-}}";;
BSC) rpc="${BSC_RPC_URL:-}";;
POLYGON) rpc="${POLYGON_MAINNET_RPC:-${POLYGON_RPC_URL:-}}";;
GNOSIS) rpc="${GNOSIS_RPC:-}";;
AVALANCHE) rpc="${AVALANCHE_RPC_URL:-}";;
BASE) rpc="${BASE_MAINNET_RPC:-}";;
ARBITRUM) rpc="${ARBITRUM_MAINNET_RPC:-}";;
OPTIMISM) rpc="${OPTIMISM_MAINNET_RPC:-}";;
esac
fi
if [[ -z "$rpc" ]]; then echo " Skip $name: no RPC"; continue; fi
m=$(cast call "$cwusdt" "hasRole(bytes32,address)(bool)" "$MINTER_ROLE" "$bridge" --rpc-url "$rpc" 2>/dev/null || echo "false")
b=$(cast call "$cwusdt" "hasRole(bytes32,address)(bool)" "$BURNER_ROLE" "$bridge" --rpc-url "$rpc" 2>/dev/null || echo "false")
echo " $name: MINTER=$m BURNER=$b (cWUSDT=$cwusdt bridge=$bridge)"
done
fi
echo "Done. See docs/07-ccip/CW_DEPLOY_AND_WIRE_RUNBOOK.md for Phase E (relay and E2E)."

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env bash
# Set COMPLIANT_USDT/COMPLIANT_USDC (all c*) and other Chain 138 token addresses in smom-dbis-138/.env,
# then run RegisterGRUCompliantTokens to register all c* as GRU in UniversalAssetRegistry.
#
# Addresses are from docs/11-references/CONTRACT_ADDRESSES_REFERENCE.md and TOKENS_AND_NETWORKS_MINTABLE_TO_DEPLOYER.md.
# Usage: ./scripts/deployment/set-dotenv-c-tokens-and-register-gru.sh [--no-register]
# --no-register Only update .env; do not run RegisterGRUCompliantTokens.
#
# Note: RegisterGRUCompliantTokens requires (1) broadcast account has REGISTRAR_ROLE, and (2) the
# UniversalAssetRegistry *implementation* (not just proxy) exposes registerGRUCompliantAsset.
# Grant REGISTRAR_ROLE (role hash = keccak256("REGISTRAR_ROLE")):
# REGISTRAR_ROLE=0xedcc084d3dcd65a1f7f23c65c46722faca6953d28e43150a467cf43e5c309238
# cast send $UNIVERSAL_ASSET_REGISTRY "grantRole(bytes32,address)" $REGISTRAR_ROLE $DEPLOYER --rpc-url $RPC_URL_138 --private-key $PRIVATE_KEY --gas-limit 100000
# If registration still reverts (empty revert data), the proxy's implementation may be an older
# version without registerGRUCompliantAsset — upgrade the implementation then re-run.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="$PROJECT_ROOT/smom-dbis-138"
ENV_FILE="$SMOM/.env"
RUN_REGISTER=1
for a in "$@"; do [[ "$a" == "--no-register" ]] && RUN_REGISTER=0; done
if [[ ! -f "$ENV_FILE" ]]; then
echo "Missing $ENV_FILE. Create it first (e.g. copy from .env.example)." >&2
exit 1
fi
# Set or update a single variable in .env (value must not contain newline or unescaped specials)
set_env_var() {
local var="$1" val="$2"
if grep -q "^${var}=" "$ENV_FILE" 2>/dev/null; then
sed -i "s|^${var}=.*|${var}=${val}|" "$ENV_FILE"
else
echo "${var}=${val}" >> "$ENV_FILE"
fi
}
echo "=== Setting c* and token addresses in $ENV_FILE (canonical Chain 138) ==="
# Canonical compliant tokens (c*) — CONTRACT_ADDRESSES_REFERENCE + TOKENS_AND_NETWORKS
set_env_var "COMPLIANT_USDT" "0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
set_env_var "COMPLIANT_USDC" "0xf22258f57794CC8E06237084b353Ab30fFfa640b"
set_env_var "CUSDT_ADDRESS_138" "0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
set_env_var "CUSDC_ADDRESS_138" "0xf22258f57794CC8E06237084b353Ab30fFfa640b"
# cEURC (TOKENS_AND_NETWORKS_MINTABLE_TO_DEPLOYER)
set_env_var "CEURC_ADDRESS_138" "0x8085961F9cF02b4d800A3c6d386D31da4B34266a"
# Remaining c* (Chain 138) — from DeployCompliantFiatTokens / ENV_EXAMPLE_CONTENT.md; register as GRU
set_env_var "CEURT_ADDRESS_138" "0xdf4b71c61E5912712C1Bdd451416B9aC26949d72"
set_env_var "CGBPC_ADDRESS_138" "0x003960f16D9d34F2e98d62723B6721Fb92074aD2"
set_env_var "CGBPT_ADDRESS_138" "0x350f54e4D23795f86A9c03988c7135357CCaD97c"
set_env_var "CAUDC_ADDRESS_138" "0xD51482e567c03899eecE3CAe8a058161FD56069D"
set_env_var "CJPYC_ADDRESS_138" "0xEe269e1226a334182aace90056EE4ee5Cc8A6770"
set_env_var "CCHFC_ADDRESS_138" "0x873990849DDa5117d7C644f0aF24370797C03885"
set_env_var "CCADC_ADDRESS_138" "0x54dBd40cF05e15906A2C21f600937e96787f5679"
set_env_var "CXAUC_ADDRESS_138" "0x290E52a8819A4fbD0714E517225429aA2B70EC6b"
set_env_var "CXAUT_ADDRESS_138" "0x94e408E26c6FD8F4ee00b54dF19082FDA07dC96E"
# UniversalAssetRegistry (required for GRU registration)
set_env_var "UNIVERSAL_ASSET_REGISTRY" "0xAEE4b7fBe82E1F8295951584CBc772b8BBD68575"
# Other key tokens (Chain 138) — CONTRACT_ADDRESSES_REFERENCE canonical
set_env_var "WETH9" "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
set_env_var "WETH10" "0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f"
set_env_var "LINK_TOKEN" "0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03"
set_env_var "CCIP_FEE_TOKEN" "0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03"
echo "Done. Set: COMPLIANT_USDT, COMPLIANT_USDC, all C*_ADDRESS_138 (cUSDT, cUSDC, cEURC, cEURT, cGBPC, cGBPT, cAUDC, cJPYC, cCHFC, cCADC, cXAUC, cXAUT), UNIVERSAL_ASSET_REGISTRY, WETH9, WETH10, LINK_TOKEN, CCIP_FEE_TOKEN."
echo "All c* on explorer.d-bis.org/tokens must be GRU-registered. See docs/04-configuration/EXPLORER_TOKENS_GRU_POLICY.md."
if [[ "$RUN_REGISTER" -eq 0 ]]; then
echo "Skipping GRU registration (--no-register)."
exit 0
fi
echo ""
echo "=== Registering all c* as GRU (RegisterGRUCompliantTokens) ==="
set -a
source "$ENV_FILE"
set +a
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
export RPC_URL_138="$RPC"
(cd "$SMOM" && forge script script/deploy/RegisterGRUCompliantTokens.s.sol \
--rpc-url "$RPC_URL_138" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price 1000000000)
echo "=== Done. ==="

View File

@@ -0,0 +1,37 @@
#!/usr/bin/env bash
# Append missing Chain 138 / DODO PMM env vars to smom-dbis-138/.env (no overwrite, no secrets).
# Usage: ./scripts/deployment/set-missing-dotenv-chain138.sh
# From smom-dbis-138: ../scripts/deployment/set-missing-dotenv-chain138.sh (if called from there, adjust path)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
ENV_FILE="${SMOM_ENV_FILE:-$PROJECT_ROOT/smom-dbis-138/.env}"
if [[ ! -f "$ENV_FILE" ]]; then
echo "Error: $ENV_FILE not found. Create it from env.additions.example or copy from .env.example." >&2
exit 1
fi
append_if_missing() {
local key="$1"
local value="$2"
if ! grep -qE "^${key}=" "$ENV_FILE" 2>/dev/null; then
echo "${key}=${value}" >> "$ENV_FILE"
echo " Added: $key"
fi
}
echo "=== Set missing dotenv (Chain 138 / DODO PMM) ==="
echo " Target: $ENV_FILE"
echo ""
append_if_missing "DODO_PMM_PROVIDER_ADDRESS" "0x8EF6657D2a86c569F6ffc337EE6b4260Bd2e59d0"
append_if_missing "DODO_PMM_INTEGRATION_ADDRESS" "0x79cdbaFBaA0FdF9F55D26F360F54cddE5c743F7D"
append_if_missing "POOL_CUSDTCUSDC" "0x9fcB06Aa1FD5215DC0E91Fd098aeff4B62fEa5C8"
append_if_missing "POOL_CUSDTUSDT" "0xa3Ee6091696B28e5497b6F491fA1e99047250c59"
append_if_missing "POOL_CUSDCUSDC" "0x90bd9Bf18Daa26Af3e814ea224032d015db58Ea5"
echo ""
echo "Done. Verify: grep -E 'DODO_PMM|POOL_' $ENV_FILE"

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
# Run all contract tests before deploying (Phase 0.8).
# Usage: ./scripts/deployment/test-all-contracts-before-deploy.sh [--dry-run] [--alltra] [--no-match PATTERN] [--skip-build]
#
# --dry-run Print commands only; do not run forge build/test.
# --alltra Also run forge test and npm run test:e2e in alltra-lifi-settlement.
# --no-match Exclude tests matching PATTERN (e.g. "Fork|Mainnet|Integration|e2e" for unit-only).
# --skip-build Skip forge build; use existing artifacts (forge test still compiles changed files incrementally).
#
# Requires: run from repo root; forge in PATH.
# See: docs/03-deployment/DEPLOYMENT_ORDER_OF_OPERATIONS.md § Phase 0.8
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
DRY_RUN=""
ALLTRA=""
NO_MATCH=""
SKIP_BUILD=""
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run) DRY_RUN=1; shift ;;
--skip-build) SKIP_BUILD=1; shift ;;
--alltra) ALLTRA=1; shift ;;
--no-match)
if [[ -n "${2:-}" && "${2:0:1}" != "-" ]]; then
NO_MATCH="$2"; shift 2
else
NO_MATCH="Fork|Mainnet|Integration|e2e"; shift
fi
;;
*) shift ;;
esac
done
echo "=== Test all contracts before deploy ==="
echo ""
# 1) smom-dbis-138: forge build (skip if --skip-build or already built)
if [[ -n "$SKIP_BUILD" ]]; then
echo "--- smom-dbis-138: forge build (skipped, using existing artifacts) ---"
elif [[ -n "$DRY_RUN" ]]; then
echo "--- smom-dbis-138: forge build ---"
echo "[DRY-RUN] cd $PROJECT_ROOT/smom-dbis-138 && forge build"
else
echo "--- smom-dbis-138: forge build ---"
(cd "$PROJECT_ROOT/smom-dbis-138" && forge build) || { echo "FAIL: forge build (smom-dbis-138)" >&2; exit 1; }
fi
echo ""
# 2) smom-dbis-138: forge test (includes GRU c* integration: GRUCompliantTokensRegistryTest)
echo "--- smom-dbis-138: forge test ---"
if [[ -n "$NO_MATCH" ]]; then
if [[ -n "$DRY_RUN" ]]; then
echo "[DRY-RUN] cd $PROJECT_ROOT/smom-dbis-138 && forge test --no-match-test \"$NO_MATCH\""
else
(cd "$PROJECT_ROOT/smom-dbis-138" && forge test --no-match-test "$NO_MATCH") || { echo "FAIL: forge test (smom-dbis-138)" >&2; exit 1; }
fi
else
if [[ -n "$DRY_RUN" ]]; then
echo "[DRY-RUN] cd $PROJECT_ROOT/smom-dbis-138 && forge test"
else
(cd "$PROJECT_ROOT/smom-dbis-138" && forge test) || { echo "FAIL: forge test (smom-dbis-138)" >&2; exit 1; }
fi
fi
echo ""
# 3) Optional: alltra-lifi-settlement
if [[ -n "$ALLTRA" ]]; then
echo "--- alltra-lifi-settlement: forge test ---"
if [[ -n "$DRY_RUN" ]]; then
echo "[DRY-RUN] cd $PROJECT_ROOT/alltra-lifi-settlement && forge test"
echo "[DRY-RUN] cd $PROJECT_ROOT/alltra-lifi-settlement && npm run test:e2e"
else
(cd "$PROJECT_ROOT/alltra-lifi-settlement" && forge test) || { echo "FAIL: forge test (alltra-lifi-settlement)" >&2; exit 1; }
(cd "$PROJECT_ROOT/alltra-lifi-settlement" && npm run test:e2e) || { echo "FAIL: npm run test:e2e (alltra-lifi-settlement)" >&2; exit 1; }
fi
echo ""
fi
if [[ -n "$DRY_RUN" ]]; then
echo "Dry-run complete. Run without --dry-run to execute."
else
echo "All contract tests passed. Proceed with deployment (Phase 1+)."
fi

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Diagnose Proxmox VE issues on ml110, r630-01, r630-02
# Usage: ./scripts/diagnose-proxmox-hosts.sh [ml110|r630-01|r630-02|all]

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Diagnostic Script for VMID 5000 (Blockscout Explorer)
# Checks container status, services, and provides detailed diagnostics

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Enable root SSH login for LXC container (VMID 5000)
# This allows SSH access as root user

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Enable and fix storage configuration on r630-01 and r630-02
# Fixes storage configuration issues and enables LVM thin storage

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Master Script to Execute All Remaining Tasks
# Run this after services are installed

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -0,0 +1,69 @@
#!/usr/bin/env bash
# Export all token reports (CoinGecko, CMC, token-list) for all supported chains.
# Use for CoinGecko/CMC submission and publication.
#
# Prerequisites: Token-aggregation API reachable at API_BASE (e.g. https://explorer.d-bis.org)
# Usage: ./scripts/export-all-token-reports-for-publication.sh [API_BASE]
#
# Output: docs/04-configuration/coingecko/exports/ (created if missing)
set -euo pipefail
API_BASE="${1:-https://explorer.d-bis.org}"
OUTPUT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)/docs/04-configuration/coingecko/exports"
mkdir -p "$OUTPUT_DIR"
# All chains from token-aggregation chains.ts
CHAINS=(138 651940 1 56 137 100 10 42161 8453 43114 25 42220 1111)
echo "Exporting token reports from $API_BASE to $OUTPUT_DIR"
echo ""
for chain in "${CHAINS[@]}"; do
echo "Chain $chain:"
curl -sS -L --connect-timeout 15 --max-time 60 \
"${API_BASE}/api/v1/report/coingecko?chainId=${chain}" \
-o "${OUTPUT_DIR}/report-coingecko-${chain}.json" 2>/dev/null || true
curl -sS -L --connect-timeout 15 --max-time 60 \
"${API_BASE}/api/v1/report/cmc?chainId=${chain}" \
-o "${OUTPUT_DIR}/report-cmc-${chain}.json" 2>/dev/null || true
if [ -s "${OUTPUT_DIR}/report-coingecko-${chain}.json" ]; then
count=$(jq -r '.tokens | length' "${OUTPUT_DIR}/report-coingecko-${chain}.json" 2>/dev/null || echo "?")
echo " coingecko: $count tokens"
else
echo " coingecko: (empty or failed)"
fi
if [ -s "${OUTPUT_DIR}/report-cmc-${chain}.json" ]; then
count=$(jq -r '.tokens | length' "${OUTPUT_DIR}/report-cmc-${chain}.json" 2>/dev/null || echo "?")
echo " cmc: $count tokens"
else
echo " cmc: (empty or failed)"
fi
done
echo ""
echo "Unified token list (all chains):"
curl -sS -L --connect-timeout 15 --max-time 60 \
"${API_BASE}/api/v1/report/token-list" \
-o "${OUTPUT_DIR}/token-list-all.json" 2>/dev/null || true
if [ -s "${OUTPUT_DIR}/token-list-all.json" ]; then
count=$(jq -r '.tokens | length' "${OUTPUT_DIR}/token-list-all.json" 2>/dev/null || echo "?")
echo " token-list: $count tokens"
else
echo " token-list: (empty or failed)"
fi
echo ""
echo "Cross-chain report (Chain 138):"
curl -sS -L --connect-timeout 15 --max-time 60 \
"${API_BASE}/api/v1/report/cross-chain?chainId=138" \
-o "${OUTPUT_DIR}/report-cross-chain-138.json" 2>/dev/null || true
if [ -s "${OUTPUT_DIR}/report-cross-chain-138.json" ]; then
echo " cross-chain: exported"
else
echo " cross-chain: (empty or failed)"
fi
echo ""
echo "Done. Exports in $OUTPUT_DIR"
echo "Use for: CoinGecko/CMC submission, DefiLlama, LiFi, etc. See docs/04-configuration/PUBLICATION_LOCATIONS_MASTER.md"

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Final verification and complete summary of all Blockscout setup
set -euo pipefail

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Fix WSL IP address from 192.168.11.4 to 192.168.11.23
# This script removes the old IP and adds the correct one

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Generate comprehensive Proxmox inventory report
set -euo pipefail

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -10,7 +10,10 @@ PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
[ -f config/ip-addresses.conf ] && source config/ip-addresses.conf 2>/dev/null || true
PROXMOX_USER="${PROXMOX_USER:-root}"
# SSH user for shell (PROXMOX_USER in .env may be root@pam for API)
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-${PROXMOX_USER:-root}}"
[[ "$PROXMOX_SSH_USER" == *"@"* ]] && PROXMOX_SSH_USER="root"
PROXMOX_USER="${PROXMOX_SSH_USER}"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"

View File

@@ -0,0 +1,68 @@
#!/usr/bin/env bash
# Verify every RPC (and optionally other) VM has an LXC config file on its Proxmox host.
# SSHs to each host and checks pct config <vmid> for expected VMIDs on that host.
# Run from project root.
# Usage: ./scripts/health/verify-lxc-configs-on-hosts.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
[ -f config/ip-addresses.conf ] && source config/ip-addresses.conf 2>/dev/null || true
# SSH user: use root for shell (PROXMOX_USER in .env may be root@pam for API)
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-${PROXMOX_USER:-root}}"
[[ "$PROXMOX_SSH_USER" == *"@"* ]] && PROXMOX_SSH_USER="root"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
# VMID:host (same mapping as check-rpc-vms-health.sh)
RPC_NODES=(
"2101:$R630_01"
"2201:$R630_02"
"2301:$ML110"
"2303:$R630_02"
"2304:$ML110"
"2305:$ML110"
"2306:$ML110"
"2307:$ML110"
"2308:$ML110"
"2400:$ML110"
"2401:$R630_02"
"2402:$ML110"
"2403:$ML110"
)
SSH_OPTS="-o ConnectTimeout=5 -o StrictHostKeyChecking=no"
RED='\033[0;31m'
GREEN='\033[0;32m'
CYAN='\033[0;36m'
NC='\033[0m'
echo -e "${CYAN}=== Verify LXC config files on Proxmox hosts ===${NC}"
echo ""
ok=0
fail=0
for entry in "${RPC_NODES[@]}"; do
IFS=: read -r vmid host <<< "$entry"
ssh_target="${PROXMOX_SSH_USER}@${host}"
out=$(ssh $SSH_OPTS "$ssh_target" "pct config $vmid 2>&1" || true)
if echo "$out" | grep -q "Configuration file.*does not exist\|No such file"; then
echo -e " VMID $vmid on $host: ${RED}no LXC config${NC}"
((fail++)) || true
elif echo "$out" | grep -q "hostname:\|arch:"; then
echo -e " VMID $vmid on $host: ${GREEN}config OK${NC}"
((ok++)) || true
else
echo -e " VMID $vmid on $host: ${RED}unexpected ($out)${NC}"
((fail++)) || true
fi
done
echo ""
echo -e "${CYAN}Summary: ${GREEN}$ok with config${NC}, ${RED}$fail missing/error${NC} (total ${#RPC_NODES[@]} VMs)"
exit $fail

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Identify all containers using DHCP
# Shows current DHCP-assigned IPs

View File

@@ -0,0 +1,90 @@
#!/usr/bin/env bash
# Install wscat (or ensure npx works) inside every RPC LXC container via SSH to Proxmox hosts.
# Run from project root. Containers need network and (for full install) Node.js/npm.
# Usage: ./scripts/install-wscat-in-rpc-containers.sh [--check-only]
# --check-only Only check if wscat/npx already available (fast). Default: install if missing (can take 30+ min for 13 containers).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
[ -f config/ip-addresses.conf ] && source config/ip-addresses.conf 2>/dev/null || true
PROXMOX_SSH_USER="${PROXMOX_SSH_USER:-${PROXMOX_USER:-root}}"
[[ "$PROXMOX_SSH_USER" == *"@"* ]] && PROXMOX_SSH_USER="root"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
RPC_NODES=(
"2101:$R630_01"
"2201:$R630_02"
"2301:$ML110"
"2303:$R630_02"
"2304:$ML110"
"2305:$ML110"
"2306:$ML110"
"2307:$ML110"
"2308:$ML110"
"2400:$ML110"
"2401:$R630_02"
"2402:$ML110"
"2403:$ML110"
)
SSH_OPTS="-o ConnectTimeout=10 -o StrictHostKeyChecking=no"
CHECK_ONLY=false
[[ "${1:-}" == "--check-only" ]] && CHECK_ONLY=true
# Try to ensure wscat or npx is available inside the container (non-interactive, minimal install)
install_inside() {
local vmid="$1" host="$2"
ssh $SSH_OPTS "${PROXMOX_SSH_USER}@${host}" "pct exec $vmid -- bash -s" <<'INNER'
export DEBIAN_FRONTEND=noninteractive
if command -v wscat >/dev/null 2>&1; then
echo "ok:wscat"
exit 0
fi
if command -v npx >/dev/null 2>&1; then
npx -y wscat --version >/dev/null 2>&1 && echo "ok:npx" || echo "fail:npx"
exit 0
fi
# Install Node.js if missing (Debian/Ubuntu)
if ! command -v node >/dev/null 2>&1; then
apt-get update -qq && apt-get install -y -qq nodejs npm 2>/dev/null || true
fi
if command -v npm >/dev/null 2>&1; then
npm install -g wscat 2>/dev/null && echo "ok:installed" || echo "fail:install"
else
echo "fail:no_node"
fi
INNER
}
echo "=== Install wscat in RPC containers (check-only=$CHECK_ONLY) ==="
echo ""
ok=0
fail=0
for entry in "${RPC_NODES[@]}"; do
IFS=: read -r vmid host <<< "$entry"
result=""
if "$CHECK_ONLY"; then
result=$(ssh $SSH_OPTS "${PROXMOX_SSH_USER}@${host}" "pct exec $vmid -- bash -c 'command -v wscat >/dev/null && echo ok:wscat || (command -v npx >/dev/null && npx -y wscat --version 2>/dev/null && echo ok:npx || echo fail)' 2>/dev/null" || echo "fail:ssh")
else
result=$(install_inside "$vmid" "$host" 2>/dev/null || echo "fail:ssh")
fi
if echo "$result" | grep -q "^ok:"; then
echo " VMID $vmid ($host): $result"
((ok++)) || true
else
echo " VMID $vmid ($host): $result"
((fail++)) || true
fi
done
echo ""
echo "Summary: $ok OK, $fail failed (total ${#RPC_NODES[@]} containers)"
exit $fail

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Investigate Transaction Failures on All RPC Nodes
# Checks logs, transaction pool, recent transactions, and node status

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Investigate why transactions persist after pool clearing
# Checks if transactions are in blockchain state vs transaction pool

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# List all Proxmox VMs with VMID, Name, IP Address, FQDN, and Description

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# Standardized .env loader function

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -euo pipefail
# MAC Vendor Lookup Script

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env bash
# Fix high storage: r630-01 data/local-lvm (72%) and r630-02 thin5 (84.6%).
# - thin5: Only VMID 5000 (Blockscout) uses it; prune logs/Docker inside 5000. Optional: migrate 5000 to thin2/thin6.
# - r630-01 data: Prune logs/journal in CTs on local-lvm to free space.
#
# Usage:
# ./scripts/maintenance/fix-storage-r630-01-and-thin5.sh [--dry-run] [--prune-only] [--migrate-5000 TARGET]
# --dry-run Print actions only.
# --prune-only Only prune logs (no migration).
# --migrate-5000 Migrate VMID 5000 from thin5 to TARGET (e.g. thin2 or thin6). Does backup/restore.
# Requires: SSH key-based access to r630-01 and r630-02. Run from project root (LAN).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
SSH_OPTS="-o ConnectTimeout=10 -o StrictHostKeyChecking=no"
DRY_RUN=false
PRUNE_ONLY=true
MIGRATE_5000_TARGET=""
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run) DRY_RUN=true; shift ;;
--prune-only) PRUNE_ONLY=true; shift ;;
--migrate-5000) MIGRATE_5000_TARGET="${2:-}"; PRUNE_ONLY=false; shift 2 ;;
*) shift ;;
esac
done
run_r630_01() { ssh $SSH_OPTS "root@$R630_01" "$@"; }
run_r630_02() { ssh $SSH_OPTS "root@$R630_02" "$@"; }
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
echo ""
echo "=== Fix storage: r630-01 data (72%) + r630-02 thin5 (84.6%) ==="
echo " dry-run=$DRY_RUN prune-only=$PRUNE_ONLY migrate-5000=${MIGRATE_5000_TARGET:-no}"
echo ""
# ----- Phase 1: Prune VMID 5000 (thin5) on r630-02 -----
echo "-------- Phase 1: Free space in VMID 5000 (thin5 on r630-02) --------"
if $DRY_RUN; then
echo " Would run: vmid5000-free-disk-and-logs.sh (journal vacuum, Docker prune, logrotate)"
else
if bash "${SCRIPT_DIR}/vmid5000-free-disk-and-logs.sh" 2>&1; then
log_ok "VMID 5000 prune done."
else
log_warn "VMID 5000 prune had warnings or failed (check above)."
fi
fi
echo ""
# ----- Phase 2: Prune logs in CTs on r630-01 (data) -----
echo "-------- Phase 2: Prune logs in CTs on r630-01 (local-lvm/data) --------"
VMIDS_R630_01=$(run_r630_01 "pct list 2>/dev/null | awk 'NR>1 && \$2==\"running\" {print \$1}'" 2>/dev/null || true)
if [[ -z "$VMIDS_R630_01" ]]; then
log_warn "Could not list running CTs on r630-01 (SSH failed?)."
else
for vmid in $VMIDS_R630_01; do
if $DRY_RUN; then
echo " Would prune in VMID $vmid: journalctl vacuum-time=3d vacuum-size=200M, logrotate -f"
continue
fi
run_r630_01 "pct exec $vmid -- journalctl --vacuum-time=3d 2>/dev/null; pct exec $vmid -- journalctl --vacuum-size=200M 2>/dev/null" 2>/dev/null || true
run_r630_01 "pct exec $vmid -- logrotate -f /etc/logrotate.conf 2>/dev/null" 2>/dev/null || true
run_r630_01 "pct exec $vmid -- sh -c ': > /var/log/syslog 2>/dev/null'" 2>/dev/null || true
log_ok "Pruned VMID $vmid"
done
fi
echo ""
# ----- Phase 3: Optional migrate 5000 from thin5 to thin2/thin6 -----
if [[ -n "$MIGRATE_5000_TARGET" ]]; then
echo "-------- Phase 3: Migrate VMID 5000 from thin5 to $MIGRATE_5000_TARGET --------"
if $DRY_RUN; then
echo " Would: stop 5000 -> vzdump to local -> destroy -> pct restore 5000 --storage $MIGRATE_5000_TARGET -> start"
echo " Target must be thin2 or thin6 (have free space on r630-02)."
else
run_r630_02 "pct stop 5000" 2>/dev/null || true
sleep 3
BACKUP_OUT=$(run_r630_02 "vzdump 5000 --storage local --compress gzip --mode stop --remove 0 2>&1" || true)
if echo "$BACKUP_OUT" | grep -qE "error|Error|failed|Failed"; then
log_warn "Backup failed: $BACKUP_OUT"
run_r630_02 "pct start 5000" 2>/dev/null || true
else
BACKUP_FILE=$(run_r630_02 "ls -t /var/lib/vz/dump/vzdump-lxc-5000-*.tar.gz 2>/dev/null | head -1" || true)
if [[ -z "$BACKUP_FILE" ]]; then
log_warn "Backup file not found."
run_r630_02 "pct start 5000" 2>/dev/null || true
else
run_r630_02 "pct destroy 5000 --force 2>/dev/null" || true
sleep 2
run_r630_02 "pct restore 5000 $BACKUP_FILE --storage $MIGRATE_5000_TARGET 2>&1" || { log_warn "Restore failed"; run_r630_02 "pct start 5000" 2>/dev/null; exit 1; }
run_r630_02 "rm -f $BACKUP_FILE"
run_r630_02 "pct start 5000"
log_ok "Migrated 5000 to $MIGRATE_5000_TARGET and started."
fi
fi
fi
echo ""
fi
echo "=== Re-check storage (run audit after a few minutes for thin pool reclaim) ==="
if ! $DRY_RUN; then
echo " r630-01: ssh root@$R630_01 'pvesm status | grep -E \"data|local-lvm\"'"
echo " r630-02: ssh root@$R630_02 'pvesm status | grep thin5'"
echo " Or: bash scripts/audit-proxmox-rpc-storage.sh"
fi
echo ""

View File

@@ -77,6 +77,14 @@ else
((FAIL++)) || true
fi
# 6. Database writable (required for deployments — Besu must write to /data/besu/database)
if run_ssh "pct exec $VMID -- sh -c 'mkdir -p /data/besu/database && touch /data/besu/database/.write_test && rm -f /data/besu/database/.write_test'" 2>/dev/null; then
log_ok "Database path writable (/data/besu/database)"
else
log_warn "Database path not writable (read-only?). Run: ./scripts/maintenance/make-rpc-vmids-writable-via-ssh.sh"
((FAIL++)) || true
fi
echo ""
if [[ "${FAIL:-0}" -gt 0 ]]; then
log_warn "Health check had $FAIL issue(s). Fix: ./scripts/maintenance/fix-core-rpc-2101.sh or see docs/09-troubleshooting/RPC_NODES_BLOCK_PRODUCTION_FIX.md"

View File

@@ -0,0 +1,82 @@
#!/usr/bin/env bash
# Migrate one LXC container from r630-01 data pool to thin1 (same host).
# Use to free space on data (currently 72%). See docs/04-configuration/MIGRATION_PLAN_R630_01_DATA.md
#
# Usage: bash scripts/maintenance/migrate-ct-r630-01-data-to-thin1.sh <VMID> [--dry-run]
# Example: bash scripts/maintenance/migrate-ct-r630-01-data-to-thin1.sh 10232
# Requires: SSH to r630-01. Run from project root (LAN).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
TARGET="thin1"
SSH_OPTS="-o ConnectTimeout=10 -o StrictHostKeyChecking=no"
VMID="${1:-}"
DRY_RUN=false
[[ "${2:-}" == "--dry-run" ]] && DRY_RUN=true
if [[ -z "$VMID" ]] || ! [[ "$VMID" =~ ^[0-9]+$ ]]; then
echo "Usage: $0 <VMID> [--dry-run]"
echo " Migrates CT VMID from data to thin1 on r630-01."
echo " Suggested VMIDs (small first): 10232 10233 10120 10100 10101 10235 10236 7804 8640 8642"
exit 1
fi
run() { ssh $SSH_OPTS "root@$R630_01" "$@"; }
echo "=== Migrate CT $VMID from data to $TARGET on r630-01 ==="
echo " dry-run=$DRY_RUN"
echo ""
# Check CT exists and is on data
ROOTFS=$(run "pct config $VMID 2>/dev/null | grep '^rootfs:'" || true)
if [[ -z "$ROOTFS" ]]; then
echo "ERROR: CT $VMID not found on r630-01."
exit 1
fi
if ! echo "$ROOTFS" | grep -q 'local-lvm\|data'; then
echo "WARNING: CT $VMID rootfs does not look like data pool: $ROOTFS"
read -p "Continue anyway? [y/N] " -n 1 -r; echo
[[ "${REPLY:-}" =~ ^[yY]$ ]] || exit 1
fi
if $DRY_RUN; then
echo "Would: stop $VMID -> vzdump -> destroy -> restore --storage $TARGET -> start"
exit 0
fi
echo "1. Stopping CT $VMID..."
run "pct stop $VMID" 2>/dev/null || true
sleep 3
echo "2. Backup (vzdump)..."
run "vzdump $VMID --storage local --compress gzip --mode stop --remove 0 2>&1" || true
BACKUP_FILE=$(run "ls -t /var/lib/vz/dump/vzdump-lxc-$VMID-*.tar.gz 2>/dev/null | head -1" || true)
if [[ -z "$BACKUP_FILE" ]]; then
echo "ERROR: Backup not found. Start CT back: ssh root@$R630_01 'pct start $VMID'"
exit 1
fi
echo " Backup: $BACKUP_FILE"
echo "3. Destroy CT..."
run "pct destroy $VMID --force 2>/dev/null" || true
sleep 2
echo "4. Restore to $TARGET..."
run "pct restore $VMID $BACKUP_FILE --storage $TARGET 2>&1" || {
echo "ERROR: Restore failed. Restore backup manually."
exit 1
}
echo "5. Remove backup..."
run "rm -f $BACKUP_FILE" 2>/dev/null || true
echo "6. Start CT..."
run "pct start $VMID"
echo ""
echo "Done. CT $VMID is now on $TARGET. Verify: ssh root@$R630_01 'pct config $VMID | grep rootfs'"

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Migrate VMID 5000 (Blockscout) from thin5 to thin2 on r630-02. No prune steps.
# Usage: bash scripts/maintenance/migrate-vmid-5000-to-thin2.sh
# Requires: SSH to r630-02. Run from project root (LAN).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
TARGET="thin2"
VMID=5000
SSH_OPTS="-o ConnectTimeout=10 -o StrictHostKeyChecking=no"
run() { ssh $SSH_OPTS "root@$R630_02" "$@"; }
echo "=== Migrate VMID $VMID from thin5 to $TARGET on r630-02 ==="
echo ""
echo "1. Stopping CT $VMID..."
run "pct stop $VMID" 2>/dev/null || true
sleep 5
echo "2. Backup (vzdump)..."
run "vzdump $VMID --storage local --compress gzip --mode stop --remove 0 2>&1" || true
BACKUP_FILE=$(run "ls -t /var/lib/vz/dump/vzdump-lxc-$VMID-*.tar.gz 2>/dev/null | head -1" || true)
if [[ -z "$BACKUP_FILE" ]]; then
echo "ERROR: Backup file not found. Starting CT back."
run "pct start $VMID" 2>/dev/null || true
exit 1
fi
echo " Backup: $BACKUP_FILE"
echo "3. Destroy CT..."
run "pct destroy $VMID --force 2>/dev/null" || true
sleep 3
echo "4. Restore to $TARGET..."
run "pct restore $VMID $BACKUP_FILE --storage $TARGET 2>&1" || {
echo "ERROR: Restore failed."
exit 1
}
echo "5. Remove backup..."
run "rm -f $BACKUP_FILE" 2>/dev/null || true
echo "6. Start CT..."
run "pct start $VMID"
echo ""
echo "Done. VMID $VMID is now on $TARGET. Verify: ssh root@$R630_02 'pct config 5000 | grep rootfs'"

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Create Snapshots Before Making Changes
# Usage: ./snapshot-before-change.sh <VMID> [snapshot-name]

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Migrate hostnames on Proxmox hosts: pve → r630-01, pve2 → r630-02
# This script properly renames hostnames and updates all necessary configurations

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
# Migrate All Discovered Secrets to Admin Vault
# Migrates all secrets from MASTER_SECRETS_INVENTORY.md to Sankofa Admin Vault

Some files were not shown because too many files have changed in this diff Show More