merge origin/main: sync upstream with Mission Control

This commit is contained in:
2026-03-29 10:11:51 +08:00
1861 changed files with 21945 additions and 48242 deletions

View File

@@ -17,7 +17,7 @@ echo "Configuring DBIS service dependencies..."
# DBIS service IPs
POSTGRES_IP="${DBIS_POSTGRES_PRIMARY:-192.168.11.105}"
REDIS_IP="192.168.11.120"
REDIS_IP="192.168.11.125"
DB_PASSWORD="8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771"
for vmid in 10150 10151; do

View File

@@ -11,7 +11,7 @@ echo "Configuring DBIS service dependencies..."
# DBIS service IPs
POSTGRES_IP="192.168.11.105"
REDIS_IP="192.168.11.120"
REDIS_IP="192.168.11.125"
DB_PASSWORD="8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771"
for vmid in 10150 10151; do

View File

@@ -24,7 +24,7 @@ done
for vmid in 10150 10151; do
ssh root@${NODE_IP} "pct enter $vmid -- bash -c '
find /opt -name \".env\" -exec sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://dbis:8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771@${DBIS_POSTGRES_PRIMARY:-192.168.11.105}:5432/dbis_core|g\" {} \;
find /opt -name \".env\" -exec sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${DBIS_REDIS_IP:-192.168.11.120}:6379|g\" {} \;
find /opt -name \".env\" -exec sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${DBIS_REDIS_IP:-192.168.11.125}:6379|g\" {} \;
echo \"Dependencies configured for CT $vmid\"
'"
done

View File

@@ -196,7 +196,7 @@ done
for vmid in 10150 10151; do
ssh root@${NODE_IP} "pct enter $vmid -- bash -c '
find /opt -name \".env\" -exec sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://dbis:8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771@${DBIS_POSTGRES_PRIMARY:-192.168.11.105}:5432/dbis_core|g\" {} \;
find /opt -name \".env\" -exec sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://192.168.11.120:6379|g\" {} \;
find /opt -name \".env\" -exec sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://192.168.11.125:6379|g\" {} \;
echo \"Dependencies configured for CT $vmid\"
'"
done

View File

@@ -196,7 +196,7 @@ done
for vmid in 10150 10151; do
ssh root@${NODE_IP} "pct enter $vmid -- bash -c '
find /opt -name \".env\" -exec sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://dbis:8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771@192.168.11.105:5432/dbis_core|g\" {} \;
find /opt -name \".env\" -exec sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://192.168.11.120:6379|g\" {} \;
find /opt -name \".env\" -exec sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://192.168.11.125:6379|g\" {} \;
echo \"Dependencies configured for CT $vmid\"
'"
done

View File

@@ -180,7 +180,7 @@ declare -a ENDPOINTS=(
# Application Services - DBIS Core Services
"10100|${DBIS_POSTGRES_PRIMARY:-192.168.11.105}|dbis-postgres-primary|PostgreSQL|tcp|5432|||Running|Primary database"
"10101|${DBIS_POSTGRES_REPLICA:-192.168.11.106}|dbis-postgres-replica-1|PostgreSQL|tcp|5432|||Running|Database replica"
"10120|192.168.11.120|dbis-redis|Redis|tcp|6379|||Running|Cache layer"
"10120|192.168.11.125|dbis-redis|Redis|tcp|6379|||Running|Cache layer"
"10130|${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}|dbis-frontend|Web|http|80|dbis-admin.d-bis.org,secure.d-bis.org|Running|Frontend admin console"
"10130|${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}|dbis-frontend|Web|https|443|||Running|Frontend admin console"
"10150|${IP_DBIS_API:-192.168.11.155}|dbis-api-primary|API|http|3000|dbis-api.d-bis.org|Running|Primary API server"

View File

@@ -174,7 +174,7 @@ declare -a ENDPOINTS=(
# Application Services - DBIS Core Services
"10100|192.168.11.105|dbis-postgres-primary|PostgreSQL|tcp|5432|||Running|Primary database"
"10101|192.168.11.106|dbis-postgres-replica-1|PostgreSQL|tcp|5432|||Running|Database replica"
"10120|192.168.11.120|dbis-redis|Redis|tcp|6379|||Running|Cache layer"
"10120|192.168.11.125|dbis-redis|Redis|tcp|6379|||Running|Cache layer"
"10130|192.168.11.130|dbis-frontend|Web|http|80|dbis-admin.d-bis.org,secure.d-bis.org|Running|Frontend admin console"
"10130|192.168.11.130|dbis-frontend|Web|https|443|||Running|Frontend admin console"
"10150|192.168.11.155|dbis-api-primary|API|http|3000|dbis-api.d-bis.org|Running|Primary API server"

View File

@@ -83,7 +83,7 @@ IP_TO_VMID["${IP_VALIDATOR_3:-${IP_VALIDATOR_3:-${IP_VALIDATOR_3:-${IP_VALIDATOR
IP_TO_VMID["${IP_VALIDATOR_4:-${IP_VALIDATOR_4:-${IP_VALIDATOR_4:-${IP_VALIDATOR_4:-192.168.11.104}}}}"]="1004"
IP_TO_VMID["${DBIS_POSTGRES_PRIMARY:-192.168.11.105}"]="10100"
IP_TO_VMID["${DBIS_POSTGRES_REPLICA:-192.168.11.106}"]="10101"
IP_TO_VMID["192.168.11.120"]="10120"
IP_TO_VMID["192.168.11.125"]="10120"
IP_TO_VMID["${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}"]="10130"
IP_TO_VMID["${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"]="5000"
IP_TO_VMID["${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-192.168.11.150}}}}"]="1500"

View File

@@ -77,7 +77,7 @@ IP_TO_VMID["192.168.11.103"]="1003"
IP_TO_VMID["192.168.11.104"]="1004"
IP_TO_VMID["192.168.11.105"]="10100"
IP_TO_VMID["192.168.11.106"]="10101"
IP_TO_VMID["192.168.11.120"]="10120"
IP_TO_VMID["192.168.11.125"]="10120"
IP_TO_VMID["192.168.11.130"]="10130"
IP_TO_VMID["192.168.11.140"]="5000"
IP_TO_VMID["192.168.11.150"]="1500"

View File

@@ -40,7 +40,7 @@ IP_TO_VMID["${DBIS_POSTGRES_REPLICA:-192.168.11.106}"]="10101"
IP_TO_VMID["192.168.11.110"]="106"
IP_TO_VMID["192.168.11.111"]="107"
IP_TO_VMID["192.168.11.112"]="108"
IP_TO_VMID["192.168.11.120"]="10120"
IP_TO_VMID["192.168.11.125"]="10120"
IP_TO_VMID["${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}"]="10130"
IP_TO_VMID["${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"]="5000"
IP_TO_VMID["${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-192.168.11.150}}}}"]="1500"
@@ -94,7 +94,7 @@ IP_TO_HOSTNAME["${DBIS_POSTGRES_REPLICA:-192.168.11.106}"]="dbis-postgres-replic
IP_TO_HOSTNAME["192.168.11.110"]="redis-rpc-translator"
IP_TO_HOSTNAME["192.168.11.111"]="web3signer-rpc-translator"
IP_TO_HOSTNAME["192.168.11.112"]="vault-rpc-translator"
IP_TO_HOSTNAME["192.168.11.120"]="dbis-redis"
IP_TO_HOSTNAME["192.168.11.125"]="dbis-redis"
IP_TO_HOSTNAME["${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}"]="dbis-frontend"
IP_TO_HOSTNAME["${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"]="blockscout-1"
IP_TO_HOSTNAME["${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-192.168.11.150}}}}"]="besu-sentry-1"

View File

@@ -34,7 +34,7 @@ IP_TO_VMID["192.168.11.106"]="10101"
IP_TO_VMID["192.168.11.110"]="106"
IP_TO_VMID["192.168.11.111"]="107"
IP_TO_VMID["192.168.11.112"]="108"
IP_TO_VMID["192.168.11.120"]="10120"
IP_TO_VMID["192.168.11.125"]="10120"
IP_TO_VMID["192.168.11.130"]="10130"
IP_TO_VMID["192.168.11.140"]="5000"
IP_TO_VMID["192.168.11.150"]="1500"
@@ -88,7 +88,7 @@ IP_TO_HOSTNAME["192.168.11.106"]="dbis-postgres-replica-1"
IP_TO_HOSTNAME["192.168.11.110"]="redis-rpc-translator"
IP_TO_HOSTNAME["192.168.11.111"]="web3signer-rpc-translator"
IP_TO_HOSTNAME["192.168.11.112"]="vault-rpc-translator"
IP_TO_HOSTNAME["192.168.11.120"]="dbis-redis"
IP_TO_HOSTNAME["192.168.11.125"]="dbis-redis"
IP_TO_HOSTNAME["192.168.11.130"]="dbis-frontend"
IP_TO_HOSTNAME["192.168.11.140"]="blockscout-1"
IP_TO_HOSTNAME["192.168.11.150"]="besu-sentry-1"

View File

@@ -11,18 +11,18 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SSH_OPTS="-o ConnectTimeout=20 -o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new"
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
# Same VMID -> host as deploy-besu-node-lists-to-all.sh
declare -A HOST_BY_VMID
for v in 1000 1001 1002 1500 1501 1502 2101 2500 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"; done
for v in 1000 1001 1002 1500 1501 1502 2101; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"; done
for v in 2201 2303 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"; done
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2307 2308 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
SSH_OPTS="-o ConnectTimeout=8 -o StrictHostKeyChecking=accept-new"
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403)
echo "Restarting Besu on all nodes (to reload static-nodes.json and permissions-nodes.toml)"
if $DRY_RUN; then echo " [dry-run]"; fi
@@ -46,7 +46,7 @@ for vmid in "${BESU_VMIDS[@]}"; do
continue
fi
# Detect Besu unit: besu-validator, besu-sentry, besu-rpc, or generic besu.service (1505-1508, 2500-2505)
result=$(ssh $SSH_OPTS "root@$host" "pct exec $vmid -- bash -c 'svc=\$(systemctl list-units --type=service --no-legend 2>/dev/null | grep -iE \"besu-validator|besu-sentry|besu-rpc|besu\\.service\" | head -1 | awk \"{print \\\$1}\"); if [ -n \"\$svc\" ]; then systemctl restart \"\$svc\" && echo \"OK:\$svc\"; else echo \"NONE\"; fi'" 2>/dev/null || echo "FAIL")
result=$(ssh $SSH_OPTS "root@$host" "timeout 180 pct exec $vmid -- bash -c 'svc=\$(systemctl list-units --type=service --no-legend 2>/dev/null | grep -iE \"besu-validator|besu-sentry|besu-rpc|besu\\.service\" | head -1 | awk \"{print \\\$1}\"); if [ -n \"\$svc\" ]; then systemctl restart \"\$svc\" && echo \"OK:\$svc\"; else echo \"NONE\"; fi'" 2>/dev/null || echo "FAIL")
if [[ "$result" == OK:* ]]; then
echo "VMID $vmid @ $host: restarted (${result#OK:})"
((ok++)) || true

View File

@@ -121,7 +121,7 @@ echo "$ACTIVE_CERTS" | while IFS='|' read -r cert_id domain_names; do
done
# Check for duplicates in sankofa.nexus domains
SANKOFA_DOMAINS="sankofa.nexus,www.sankofa.nexus,phoenix.sankofa.nexus,www.phoenix.sankofa.nexus,the-order.sankofa.nexus"
SANKOFA_DOMAINS="sankofa.nexus,www.sankofa.nexus,phoenix.sankofa.nexus,www.phoenix.sankofa.nexus,the-order.sankofa.nexus,www.the-order.sankofa.nexus"
SANKOFA_CERTS=$(echo "$CERT_JSON" | jq -r ".[] | select(.is_deleted == 0) | select(.domain_names | tostring | test(\"sankofa.nexus\")) | .id" 2>/dev/null || echo "")
if [ -n "$SANKOFA_CERTS" ]; then

View File

@@ -162,7 +162,7 @@ echo "Configuring DBIS service dependencies..."
# DBIS service IPs
POSTGRES_IP="${DBIS_POSTGRES_PRIMARY:-192.168.11.105}"
REDIS_IP="${DBIS_REDIS_IP:-192.168.11.120}"
REDIS_IP="${DBIS_REDIS_IP:-192.168.11.125}"
DB_PASSWORD="8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771"
for vmid in 10150 10151; do

View File

@@ -162,7 +162,7 @@ echo "Configuring DBIS service dependencies..."
# DBIS service IPs
POSTGRES_IP="192.168.11.105"
REDIS_IP="192.168.11.120"
REDIS_IP="192.168.11.125"
DB_PASSWORD="8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771"
for vmid in 10150 10151; do

View File

@@ -26,7 +26,7 @@ CONTAINER_CONFIGS[10001]="order-postgres-replica:${ORDER_POSTGRES_REPLICA:-${ORD
CONTAINER_CONFIGS[10100]="dbis-postgres:${DBIS_POSTGRES_PRIMARY:-192.168.11.105}/24:${NETWORK_GATEWAY:-192.168.11.1}:2:2048:8"
CONTAINER_CONFIGS[10101]="dbis-postgres-replica:${DBIS_POSTGRES_REPLICA:-192.168.11.106}/24:${NETWORK_GATEWAY:-192.168.11.1}:2:2048:8"
# Redis containers
CONTAINER_CONFIGS[10020]="order-redis:${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-192.168.11.46}}}/24:${NETWORK_GATEWAY:-192.168.11.1}:1:1024:4"
CONTAINER_CONFIGS[10020]="order-redis:${ORDER_REDIS_IP:-192.168.11.38}/24:${NETWORK_GATEWAY:-192.168.11.1}:1:1024:4"
CONTAINER_CONFIGS[10120]="dbis-redis:${DBIS_REDIS_IP:-192.168.11.125}/24:${NETWORK_GATEWAY:-192.168.11.1}:1:1024:4"
backup_container() {

View File

@@ -11,6 +11,8 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SSH_OPTS=(-o ConnectTimeout=20 -o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new)
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
@@ -27,14 +29,14 @@ fi
# VMID -> Proxmox host (per BESU_VMIDS_FROM_PROXMOX / list-besu-vmids-from-proxmox.sh)
declare -A HOST_BY_VMID
# r630-01 (192.168.11.11)
for v in 1000 1001 1002 1500 1501 1502 2101 2500 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"; done
# r630-01 (192.168.11.11) — 2500-2505 removed (destroyed; see ALL_VMIDS_ENDPOINTS.md)
for v in 1000 1001 1002 1500 1501 1502 2101; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"; done
# r630-02 (192.168.11.12)
for v in 2201 2303 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"; done
# ml110 (192.168.11.10)
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2307 2308 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403)
echo "Deploying Besu node lists from config/besu-node-lists/ to all nodes"
echo " static-nodes.json -> /etc/besu/static-nodes.json"
@@ -56,15 +58,16 @@ for host in "${!VMIDS_ON_HOST[@]}"; do
echo " [dry-run] would scp and pct push to:${vmids}"
continue
fi
scp -o StrictHostKeyChecking=accept-new -q "$STATIC" "$PERMS" "root@${host}:/tmp/" || { echo " Failed to scp to $host"; continue; }
scp "${SSH_OPTS[@]}" -q "$STATIC" "$PERMS" "root@${host}:/tmp/" || { echo " Failed to scp to $host"; continue; }
for vmid in $vmids; do
if ssh -o StrictHostKeyChecking=accept-new "root@${host}" "pct status $vmid 2>/dev/null | grep -q running" 2>/dev/null; then
ssh -o StrictHostKeyChecking=accept-new "root@${host}" "pct push $vmid /tmp/static-nodes.json /etc/besu/static-nodes.json && pct push $vmid /tmp/permissions-nodes.toml /etc/besu/permissions-nodes.toml && pct exec $vmid -- chown besu:besu /etc/besu/static-nodes.json /etc/besu/permissions-nodes.toml 2>/dev/null || pct exec $vmid -- chown root:root /etc/besu/static-nodes.json /etc/besu/permissions-nodes.toml 2>/dev/null" 2>/dev/null && echo " OK VMID $vmid" || echo " Skip/fail VMID $vmid"
if ssh "${SSH_OPTS[@]}" "root@${host}" "pct status $vmid 2>/dev/null | grep -q running" 2>/dev/null; then
# timeout: pct push can hang on slow storage; do not block the whole fleet deploy
ssh "${SSH_OPTS[@]}" "root@${host}" "timeout 180 bash -c 'pct push $vmid /tmp/static-nodes.json /etc/besu/static-nodes.json && pct push $vmid /tmp/permissions-nodes.toml /etc/besu/permissions-nodes.toml && (pct exec $vmid -- chown besu:besu /etc/besu/static-nodes.json /etc/besu/permissions-nodes.toml 2>/dev/null || pct exec $vmid -- chown root:root /etc/besu/static-nodes.json /etc/besu/permissions-nodes.toml 2>/dev/null)'" 2>/dev/null && echo " OK VMID $vmid" || echo " Skip/fail VMID $vmid"
else
echo " Skip VMID $vmid (not running)"
fi
done
ssh -o StrictHostKeyChecking=accept-new "root@${host}" "rm -f /tmp/static-nodes.json /tmp/permissions-nodes.toml" 2>/dev/null || true
ssh "${SSH_OPTS[@]}" "root@${host}" "rm -f /tmp/static-nodes.json /tmp/permissions-nodes.toml" 2>/dev/null || true
done
echo ""

View File

@@ -25,11 +25,11 @@ WETH10="0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f"
LINK="0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03"
CUSDT="0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
CUSDC="0xf22258f57794CC8E06237084b353Ab30fFfa640b"
USDT_OFFICIAL="0x15DF1D5BFDD8Aa4b380445D4e3E9B38d34283619"
USDT_OFFICIAL="0x004b63A7B5b0E06f6bB6adb4a5F9f590BF3182D1"
# PMM pool addresses (from LIQUIDITY_POOLS_MASTER_MAP / ADDRESS_MATRIX)
POOL_CUSDTCUSDC="0x9fcB06Aa1FD5215DC0E91Fd098aeff4B62fEa5C8"
POOL_CUSDTUSDT="0xa3Ee6091696B28e5497b6F491fA1e99047250c59"
POOL_CUSDTUSDT="0x6fc60DEDc92a2047062294488539992710b99D71"
POOL_CUSDCUSDC="0x90bd9Bf18Daa26Af3e814ea224032d015db58Ea5"
get_balance() {

View File

@@ -26,7 +26,7 @@ set +a
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
GAS_PRICE="${GAS_PRICE_138:-${GAS_PRICE:-1000000000}}"
export DODO_PMM_INTEGRATION="${DODO_PMM_INTEGRATION_ADDRESS:-${DODO_PMM_INTEGRATION:-0x79cdbaFBaA0FdF9F55D26F360F54cddE5c743F7D}}"
export DODO_PMM_INTEGRATION="${DODO_PMM_INTEGRATION_ADDRESS:-${DODO_PMM_INTEGRATION:-0x5BDc62f1ae7D630c37A8B363a1d49845356Ee72d}}"
export RPC_URL_138="$RPC"
cd "$SMOM"

View File

@@ -97,7 +97,7 @@ fi
cd "$SMOM"
export RPC_URL_138="$RPC"
export DODO_PMM_INTEGRATION="${DODO_PMM_INTEGRATION_ADDRESS:-${DODO_PMM_INTEGRATION:-0x79cdbaFBaA0FdF9F55D26F360F54cddE5c743F7D}}"
export DODO_PMM_INTEGRATION="${DODO_PMM_INTEGRATION_ADDRESS:-${DODO_PMM_INTEGRATION:-0x5BDc62f1ae7D630c37A8B363a1d49845356Ee72d}}"
# Skip TransactionMirror deploy if already deployed at TRANSACTION_MIRROR_ADDRESS or if --skip-mirror
MIRROR_ADDR="${TRANSACTION_MIRROR_ADDRESS:-}"

View File

@@ -0,0 +1,94 @@
#!/usr/bin/env bash
# Enable working login on https://sankofa.nexus:
# - Fix Keycloak systemd (JAVA_HOME line; hostname + proxy headers for NPM).
# - Remove .env.local on CT 7801; install .env with PORTAL_LOCAL_LOGIN_* + NEXTAUTH_SECRET.
# - Run sync-sankofa-portal-7801.sh (rebuild portal with updated auth.ts).
#
# Usage: ./scripts/deployment/enable-sankofa-portal-login-7801.sh [--dry-run]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
VMID_PORTAL="${SANKOFA_PORTAL_VMID:-7801}"
VMID_KC="${SANKOFA_KEYCLOAK_VMID:-7802}"
SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=accept-new -o ConnectTimeout=15"
LOCAL_EMAIL="${PORTAL_LOCAL_LOGIN_EMAIL:-portal@sankofa.nexus}"
if [[ "${1:-}" == "--dry-run" ]]; then
echo "[DRY-RUN] Would patch Keycloak ${VMID_KC}, write .env on ${VMID_PORTAL}, sync portal"
exit 0
fi
GEN_PASS="$(openssl rand -base64 24 | tr -d '/+=' | cut -c1-24)"
NEXTAUTH_SEC="$(openssl rand -base64 32)"
ENV_TMP="$(mktemp)"
trap 'rm -f "$ENV_TMP"' EXIT
cat > "$ENV_TMP" <<EOF
NEXT_PUBLIC_GRAPHQL_ENDPOINT=http://192.168.11.50:4000/graphql
NEXT_PUBLIC_GRAPHQL_WS_ENDPOINT=ws://192.168.11.50:4000/graphql-ws
NEXTAUTH_URL=https://sankofa.nexus
NEXTAUTH_SECRET=${NEXTAUTH_SEC}
KEYCLOAK_URL=https://keycloak.sankofa.nexus
KEYCLOAK_REALM=master
KEYCLOAK_CLIENT_ID=sankofa-portal
KEYCLOAK_CLIENT_SECRET=
PORTAL_LOCAL_LOGIN_EMAIL=${LOCAL_EMAIL}
PORTAL_LOCAL_LOGIN_PASSWORD=${GEN_PASS}
PORT=3000
NODE_ENV=production
EOF
scp $SSH_OPTS "$ENV_TMP" "root@${PROXMOX_HOST}:/tmp/sankofa-portal.env"
ssh $SSH_OPTS "root@${PROXMOX_HOST}" bash -s "$VMID_KC" "$VMID_PORTAL" <<'REMOTE'
set -euo pipefail
VMID_KC="$1"
VMID_PORTAL="$2"
pct push "${VMID_PORTAL}" /tmp/sankofa-portal.env /opt/sankofa-portal/.env
rm -f /tmp/sankofa-portal.env
pct exec "${VMID_KC}" -- python3 <<'PY'
from pathlib import Path
p = Path("/etc/systemd/system/keycloak.service")
raw = p.read_text()
if 'Environment="JAVA_HOME=/usr/lib/jvm/java-21-openjdk-amd64"' not in raw:
raw = raw.replace(
'Environment="JAVA_HOME=/usr/lib/jvm/java-21-openjdk-amd64',
'Environment="JAVA_HOME=/usr/lib/jvm/java-21-openjdk-amd64"',
1,
)
if "KC_HOSTNAME=keycloak.sankofa.nexus" not in raw:
raw = raw.replace(
'Environment="KC_HTTP_PORT=8080"',
'Environment="KC_HTTP_PORT=8080"\nEnvironment="KC_HOSTNAME=keycloak.sankofa.nexus"\nEnvironment="KC_HOSTNAME_PORT=443"\nEnvironment="KC_PROXY_HEADERS=xforwarded"',
1,
)
p.write_text(raw)
PY
pct exec "${VMID_KC}" -- systemctl daemon-reload
pct exec "${VMID_KC}" -- systemctl restart keycloak
pct exec "${VMID_PORTAL}" -- rm -f /opt/sankofa-portal/.env.local
REMOTE
echo ""
echo "📤 Syncing portal source + rebuild…"
bash "${SCRIPT_DIR}/sync-sankofa-portal-7801.sh"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Sign in at https://sankofa.nexus"
echo " Email: ${LOCAL_EMAIL}"
echo " Password: ${GEN_PASS}"
echo ""
echo "SSO: Add NPM host keycloak.sankofa.nexus → ${IP_KEYCLOAK:-192.168.11.52}:8080, then create Keycloak"
echo " confidential client sankofa-portal; set KEYCLOAK_CLIENT_SECRET in .env and re-sync."
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -0,0 +1,184 @@
#!/usr/bin/env bash
# Push Chain 138 PMM mesh into Proxmox LXC and enable systemd.
# Copies: pmm-mesh-6s-automation.sh, update-oracle-price.sh, smom-dbis-138/.env, and this host's cast binary.
#
# Run from repo root (LAN + SSH root@Proxmox BatchMode). Requires: cast in PATH, smom-dbis-138/.env.
#
# Usage:
# ./scripts/deployment/install-pmm-mesh-systemd-on-proxmox-lxc.sh [--dry-run]
# Env:
# PMM_MESH_LXC_TARGETS="192.168.11.11:3500 192.168.11.12:5700"
#
# Note: Running the full mesh on multiple hosts repeats performUpkeep / oracle ticks (extra gas).
# Set ENABLE_MESH_KEEPER_TICK=0 in a drop-in if you want only one keeper driver.
#
# Hardened LXCs (e.g. unprivileged) may forbid writing /etc/systemd/system inside the guest.
# In that case this script installs chain138-pmm-mesh-pct-<VMID>.service on the Proxmox host
# so systemd runs: pct exec <VMID> -- bash …/pmm-mesh-6s-automation.sh
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="$PROJECT_ROOT/smom-dbis-138"
DRY_RUN=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true; done
TARGETS="${PMM_MESH_LXC_TARGETS:-192.168.11.11:3500 192.168.11.12:5700}"
CAST_SRC="$(command -v cast || true)"
[[ -x "$CAST_SRC" ]] || { echo "ERROR: cast not in PATH" >&2; exit 1; }
[[ -f "$SMOM/.env" ]] || { echo "ERROR: missing $SMOM/.env" >&2; exit 1; }
MESH_TGZ="$(mktemp /tmp/c138-mesh-XXXXXX.tgz)"
cleanup() { rm -f "$MESH_TGZ" 2>/dev/null || true; }
trap cleanup EXIT
tar czf "$MESH_TGZ" -C "$PROJECT_ROOT" \
smom-dbis-138/scripts/reserve/pmm-mesh-6s-automation.sh \
smom-dbis-138/scripts/update-oracle-price.sh \
smom-dbis-138/.env
log() { echo "[install-pmm-mesh] $*"; }
for pair in $TARGETS; do
host="${pair%%:*}"
vmid="${pair##*:}"
[[ -n "$host" && -n "$vmid" ]] || { log "skip bad target: $pair"; continue; }
log "root@$host VMID $vmid"
if [[ "$DRY_RUN" == true ]]; then
log "DRY-RUN: would scp + pct push $vmid + systemctl enable --now"
continue
fi
scp -o BatchMode=yes -o ConnectTimeout=20 "$MESH_TGZ" "root@${host}:/tmp/c138-mesh-install.tgz"
scp -o BatchMode=yes -o ConnectTimeout=20 "$CAST_SRC" "root@${host}:/tmp/cast-bin-lxc"
ssh -o BatchMode=yes -o ConnectTimeout=25 "root@${host}" \
"VMID=${vmid} bash -s" <<'REMOTE'
set -euo pipefail
[[ -n "${VMID:-}" ]] || exit 1
[[ -f /tmp/c138-mesh-install.tgz ]] || { echo "missing /tmp/c138-mesh-install.tgz"; exit 1; }
[[ -f /tmp/cast-bin-lxc ]] || { echo "missing /tmp/cast-bin-lxc"; exit 1; }
# Stop mesh before rm/tar so host pct unit or guest loop does not hit a missing script mid-upgrade.
systemctl stop "chain138-pmm-mesh-pct-${VMID}.service" 2>/dev/null || true
pct exec "$VMID" -- systemctl stop chain138-pmm-mesh-automation.service 2>/dev/null || true
sleep 1
pct push "$VMID" /tmp/c138-mesh-install.tgz /var/tmp/c138-mesh.tgz
pct push "$VMID" /tmp/cast-bin-lxc /var/tmp/cast-bin
# Unprivileged LXCs may have /opt and /var/lib root-owned on host as nobody: use /var/tmp (writable as CT root).
BASE=/var/tmp/chain138-mesh
pct exec "$VMID" -- mkdir -p "$BASE/bin"
pct exec "$VMID" -- rm -rf "$BASE/smom-dbis-138"
pct exec "$VMID" -- tar xzf /var/tmp/c138-mesh.tgz -C "$BASE"
if pct exec "$VMID" -- install -m 755 /var/tmp/cast-bin "$BASE/bin/cast" 2>/dev/null; then
:
else
pct exec "$VMID" -- cp /var/tmp/cast-bin "$BASE/bin/cast"
pct exec "$VMID" -- chmod 755 "$BASE/bin/cast"
fi
set +e
pct exec "$VMID" -- env DEBIAN_FRONTEND=noninteractive apt-get update -qq
A1=$?
pct exec "$VMID" -- env DEBIAN_FRONTEND=noninteractive apt-get install -y -qq curl ca-certificates >/dev/null
A2=$?
set -e
if [[ "$A1" != 0 || "$A2" != 0 ]]; then
echo "apt not usable in VMID $VMID; installing static curl into $BASE/bin/curl"
curl -fsSL "https://github.com/moparisthebest/static-curl/releases/latest/download/curl-amd64" -o "/tmp/curl-static-$VMID"
chmod 755 "/tmp/curl-static-$VMID"
pct push "$VMID" "/tmp/curl-static-$VMID" "$BASE/bin/curl"
rm -f "/tmp/curl-static-$VMID"
fi
pct exec "$VMID" -- chmod 755 "$BASE/bin/cast" 2>/dev/null || true
if pct exec "$VMID" -- test -f "$BASE/bin/curl"; then
pct exec "$VMID" -- chmod 755 "$BASE/bin/curl"
fi
pct exec "$VMID" -- env -i PATH="$BASE/bin:/usr/local/bin:/usr/bin:/bin" HOME=/tmp bash --noprofile --norc -lc 'cast --version | head -1; command -v curl >/dev/null && curl --version | head -1 || true'
HOST_UNIT="chain138-pmm-mesh-pct-${VMID}.service"
GUEST_UNIT="chain138-pmm-mesh-automation.service"
PCT_BIN="$(command -v pct)"
can_guest_systemd=false
if pct exec "$VMID" -- bash -c 't=/etc/systemd/system/.c138mesh_w; rm -f "$t"; touch "$t" && rm -f "$t"'; then
can_guest_systemd=true
fi
if [[ "$can_guest_systemd" == true ]]; then
systemctl disable --now "$HOST_UNIT" 2>/dev/null || true
rm -f "/etc/systemd/system/$HOST_UNIT"
systemctl daemon-reload 2>/dev/null || true
pct exec "$VMID" -- bash -c 'cat > /etc/systemd/system/chain138-pmm-mesh-automation.service' <<'UNITEOF'
[Unit]
Description=Chain 138 PMM mesh — oracle/keeper/WETH poll
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
Environment=PATH=/var/tmp/chain138-mesh/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
WorkingDirectory=/var/tmp/chain138-mesh/smom-dbis-138
Environment=PMM_MESH_INTERVAL_SEC=6
Environment=MESH_CAST_GAS_PRICE=2gwei
Environment=ENABLE_MESH_ORACLE_TICK=1
Environment=ENABLE_MESH_KEEPER_TICK=1
Environment=ENABLE_MESH_PMM_READS=1
Environment=ENABLE_MESH_WETH_READS=1
EnvironmentFile=-/var/tmp/chain138-mesh/smom-dbis-138/.env
ExecStart=/bin/bash /var/tmp/chain138-mesh/smom-dbis-138/scripts/reserve/pmm-mesh-6s-automation.sh
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
UNITEOF
pct exec "$VMID" -- systemctl daemon-reload
pct exec "$VMID" -- systemctl enable "$GUEST_UNIT"
pct exec "$VMID" -- systemctl restart "$GUEST_UNIT"
sleep 2
pct exec "$VMID" -- systemctl is-active "$GUEST_UNIT" || {
pct exec "$VMID" -- journalctl -u chain138-pmm-mesh-automation -n 40 --no-pager || true
exit 1
}
else
pct exec "$VMID" -- systemctl disable --now "$GUEST_UNIT" 2>/dev/null || true
pct exec "$VMID" -- rm -f "/etc/systemd/system/$GUEST_UNIT" 2>/dev/null || true
cat > "/etc/systemd/system/$HOST_UNIT" <<UNIT_HOST
[Unit]
Description=Chain 138 PMM mesh via pct into CT ${VMID}
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=${PCT_BIN} exec ${VMID} -- env PATH=${BASE}/bin:/usr/bin:/bin HOME=/tmp PMM_MESH_INTERVAL_SEC=6 MESH_CAST_GAS_PRICE=2gwei ENABLE_MESH_ORACLE_TICK=1 ENABLE_MESH_KEEPER_TICK=1 ENABLE_MESH_PMM_READS=1 ENABLE_MESH_WETH_READS=1 /bin/bash --noprofile --norc ${BASE}/smom-dbis-138/scripts/reserve/pmm-mesh-6s-automation.sh
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
UNIT_HOST
systemctl daemon-reload
systemctl enable "$HOST_UNIT"
systemctl restart "$HOST_UNIT"
sleep 2
systemctl is-active "$HOST_UNIT" || {
journalctl -u "$HOST_UNIT" -n 40 --no-pager || true
exit 1
}
fi
rm -f /tmp/c138-mesh-install.tgz /tmp/cast-bin-lxc
REMOTE
done
log "done. Guest logs: ssh root@<proxmox> \"pct exec <VMID> -- journalctl -u chain138-pmm-mesh-automation -f\""
log " Host-wrapped (hardened CT): ssh root@<proxmox> \"journalctl -u chain138-pmm-mesh-pct-<VMID> -f\""

View File

@@ -0,0 +1,136 @@
#!/usr/bin/env bash
# Install Oracle Publisher on LXC 3500 (fresh Ubuntu template). Run from project root on LAN.
# Sources scripts/lib/load-project-env.sh for PRIVATE_KEY, AGGREGATOR_ADDRESS, COINGECKO_API_KEY, etc.
#
# Usage: ./scripts/deployment/provision-oracle-publisher-lxc-3500.sh
# Env: ORACLE_LXC_PROXMOX_HOST (default 192.168.11.12 — node where VMID 3500 runs; do not use root PROXMOX_HOST)
# ORACLE_VMID (default 3500)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
PROXMOX_HOST="${ORACLE_LXC_PROXMOX_HOST:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
ORACLE_VMID="${ORACLE_VMID:-3500}"
ORACLE_HOME="/opt/oracle-publisher"
ORACLE_USER="${ORACLE_USER:-oracle}"
RPC_URL="${RPC_URL:-http://192.168.11.211:8545}"
AGGREGATOR_ADDRESS="${AGGREGATOR_ADDRESS:-${ORACLE_AGGREGATOR_ADDRESS:-0x99b3511a2d315a497c8112c1fdd8d508d4b1e506}}"
ORACLE_PROXY_ADDRESS="${ORACLE_PROXY_ADDRESS:-0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6}"
SSH_OPTS=(-o ConnectTimeout=25 -o StrictHostKeyChecking=accept-new)
if [[ -z "${PRIVATE_KEY:-}" ]]; then
echo "ERROR: PRIVATE_KEY not set. Source smom-dbis-138/.env or export PRIVATE_KEY before running." >&2
exit 1
fi
PY_SRC="${PROJECT_ROOT}/smom-dbis-138/services/oracle-publisher/oracle_publisher.py"
REQ="${PROJECT_ROOT}/smom-dbis-138/services/oracle-publisher/requirements.txt"
[[ -f "$PY_SRC" ]] || { echo "ERROR: missing $PY_SRC" >&2; exit 1; }
[[ -f "$REQ" ]] || { echo "ERROR: missing $REQ" >&2; exit 1; }
remote() { ssh "${SSH_OPTS[@]}" "root@${PROXMOX_HOST}" "$@"; }
echo "=== Provisioning Oracle Publisher: host=${PROXMOX_HOST} vmid=${ORACLE_VMID} ==="
remote "pct status ${ORACLE_VMID}" >/dev/null
echo "[1/6] OS packages + oracle user..."
remote "pct exec ${ORACLE_VMID} -- bash -es" <<EOS
export DEBIAN_FRONTEND=noninteractive
apt-get update -qq
apt-get install -y -qq python3 python3-pip python3-venv ca-certificates curl
if ! id -u ${ORACLE_USER} &>/dev/null; then
useradd -r -s /bin/bash -d ${ORACLE_HOME} -m ${ORACLE_USER}
fi
mkdir -p ${ORACLE_HOME}
chown -R ${ORACLE_USER}:${ORACLE_USER} ${ORACLE_HOME}
EOS
echo "[2/6] Push Python app + requirements..."
scp "${SSH_OPTS[@]}" "$PY_SRC" "root@${PROXMOX_HOST}:/tmp/oracle_publisher.py"
scp "${SSH_OPTS[@]}" "$REQ" "root@${PROXMOX_HOST}:/tmp/oracle-requirements.txt"
remote "pct push ${ORACLE_VMID} /tmp/oracle_publisher.py ${ORACLE_HOME}/oracle_publisher.py"
remote "pct push ${ORACLE_VMID} /tmp/oracle-requirements.txt ${ORACLE_HOME}/requirements.txt"
remote "pct exec ${ORACLE_VMID} -- chown ${ORACLE_USER}:${ORACLE_USER} ${ORACLE_HOME}/oracle_publisher.py ${ORACLE_HOME}/requirements.txt"
remote "pct exec ${ORACLE_VMID} -- chmod 755 ${ORACLE_HOME}/oracle_publisher.py"
echo "[3/6] Python venv + pip..."
remote "pct exec ${ORACLE_VMID} -- bash -es" <<EOS
sudo -u ${ORACLE_USER} python3 -m venv ${ORACLE_HOME}/venv
sudo -u ${ORACLE_USER} ${ORACLE_HOME}/venv/bin/pip install -q --upgrade pip
sudo -u ${ORACLE_USER} ${ORACLE_HOME}/venv/bin/pip install -q -r ${ORACLE_HOME}/requirements.txt || true
# Minimal set if optional OTEL packages fail; web3 v7 breaks geth_poa_middleware — pin v6
sudo -u ${ORACLE_USER} ${ORACLE_HOME}/venv/bin/pip install -q 'web3>=6.15,<7' eth-account requests python-dotenv prometheus-client || true
EOS
echo "[4/6] Write .env (no stdout of secrets)..."
ENV_TMP="$(mktemp)"
chmod 600 "$ENV_TMP"
# Quote URLs for systemd EnvironmentFile: unquoted "&" can break parsing / concatenation.
DS1_URL="https://api.coingecko.com/api/v3/simple/price?ids=ethereum&vs_currencies=usd"
if [[ -n "${COINGECKO_API_KEY:-}" ]]; then
DS1_URL="${DS1_URL}&x_cg_demo_api_key=${COINGECKO_API_KEY}"
fi
{
echo "RPC_URL=${RPC_URL}"
echo "AGGREGATOR_ADDRESS=${AGGREGATOR_ADDRESS}"
echo "PRIVATE_KEY=${PRIVATE_KEY}"
echo "HEARTBEAT=60"
echo "DEVIATION_THRESHOLD=0.5"
echo "ORACLE_ADDRESS=${ORACLE_PROXY_ADDRESS}"
echo "CHAIN_ID=138"
echo "COINGECKO_API_KEY=${COINGECKO_API_KEY:-}"
echo "DATA_SOURCE_1_URL=\"${DS1_URL}\""
echo "DATA_SOURCE_1_PARSER=ethereum.usd"
echo "DATA_SOURCE_2_URL=\"https://api.coinbase.com/v2/prices/ETH-USD/spot\""
echo "DATA_SOURCE_2_PARSER=data.amount"
# Match smom-dbis-138/scripts/update-oracle-price.sh (100k was OOG on aggregator)
echo "GAS_LIMIT=400000"
echo "GAS_PRICE=1000000000"
} > "$ENV_TMP"
scp "${SSH_OPTS[@]}" "$ENV_TMP" "root@${PROXMOX_HOST}:/tmp/oracle-publisher.env"
rm -f "$ENV_TMP"
remote "pct push ${ORACLE_VMID} /tmp/oracle-publisher.env ${ORACLE_HOME}/.env"
remote "pct exec ${ORACLE_VMID} -- chown ${ORACLE_USER}:${ORACLE_USER} ${ORACLE_HOME}/.env"
remote "pct exec ${ORACLE_VMID} -- chmod 600 ${ORACLE_HOME}/.env"
remote "rm -f /tmp/oracle-publisher.env"
echo "[5/6] systemd unit..."
remote "pct exec ${ORACLE_VMID} -- bash -es" <<EOF
cat > /etc/systemd/system/oracle-publisher.service <<'UNIT'
[Unit]
Description=Oracle Publisher Service (Chain 138)
After=network.target
Wants=network-online.target
[Service]
Type=simple
User=${ORACLE_USER}
Group=${ORACLE_USER}
WorkingDirectory=${ORACLE_HOME}
Environment="PATH=${ORACLE_HOME}/venv/bin:/usr/local/bin:/usr/bin:/bin"
EnvironmentFile=-${ORACLE_HOME}/.env
ExecStart=${ORACLE_HOME}/venv/bin/python ${ORACLE_HOME}/oracle_publisher.py
Restart=always
RestartSec=15
NoNewPrivileges=true
[Install]
WantedBy=multi-user.target
UNIT
systemctl daemon-reload
systemctl enable oracle-publisher.service
EOF
echo "[6/6] Start service..."
remote "pct exec ${ORACLE_VMID} -- systemctl restart oracle-publisher.service"
sleep 3
remote "pct exec ${ORACLE_VMID} -- systemctl is-active oracle-publisher.service"
echo ""
echo "OK: Oracle Publisher on VMID ${ORACLE_VMID} (${PROXMOX_HOST})."
echo "Logs: ssh root@${PROXMOX_HOST} \"pct exec ${ORACLE_VMID} -- journalctl -u oracle-publisher -n 40 --no-pager\""

View File

@@ -0,0 +1,78 @@
#!/usr/bin/env bash
# Install HAProxy in LXC 10210 (order-haproxy) and proxy :80 → Sankofa/Order portal (Next.js).
# Requires SSH to Proxmox host that runs CT 10210 (default: r630-01). See config/ip-addresses.conf.
# Usage: ./scripts/deployment/provision-order-haproxy-10210.sh [--dry-run]
#
# One-time repair (unprivileged CT with host uid 0 on disk → "nobody" inside, apt broken): on Proxmox host,
# pct stop 10210 && pct mount 10210 && chown -R 100000:100000 /var/lib/lxc/10210/rootfs && pct unmount 10210 && pct start 10210
# (Default Proxmox idmap: container root = 100000 on host.)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "$PROJECT_ROOT/config/ip-addresses.conf"
DRY_RUN=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true; done
PROXMOX="${PROXMOX_ORDER_HAPROXY_NODE:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
VMID="${ORDER_HAPROXY_VMID:-10210}"
BACKEND_HOST="${ORDER_HAPROXY_BACKEND_HOST:-${IP_SANKOFA_PORTAL:-192.168.11.51}}"
BACKEND_PORT="${ORDER_HAPROXY_BACKEND_PORT:-${SANKOFA_PORTAL_PORT:-3000}}"
TEMPLATE="$PROJECT_ROOT/config/haproxy/order-haproxy-10210.cfg.template"
if [[ ! -r "$TEMPLATE" ]]; then
echo "❌ Missing template: $TEMPLATE"
exit 1
fi
CFG=$(sed -e "s/__BACKEND_HOST__/${BACKEND_HOST}/g" -e "s/__BACKEND_PORT__/${BACKEND_PORT}/g" "$TEMPLATE")
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Provision order-haproxy (CT $VMID on $PROXMOX)"
echo " Backend: http://${BACKEND_HOST}:${BACKEND_PORT}"
echo " Dry-run: $DRY_RUN"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
if [[ "$DRY_RUN" == true ]]; then
echo "$CFG"
exit 0
fi
remote_run() {
ssh -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new \
"${PROXMOX_SSH_USER:-root}@$PROXMOX" "$@"
}
if ! remote_run "pct status $VMID" 2>/dev/null | grep -q running; then
echo "❌ CT $VMID is not running on $PROXMOX"
exit 1
fi
remote_run "pct exec $VMID -- bash -c '
set -e
export DEBIAN_FRONTEND=noninteractive
if ! dpkg -s haproxy >/dev/null 2>&1; then
apt-get update -qq
apt-get install -y -qq haproxy
fi
'"
echo "$CFG" | remote_run "pct exec $VMID -- bash -c 'cat > /etc/haproxy/haproxy.cfg'"
remote_run "pct exec $VMID -- bash -c '
set -e
haproxy -c -f /etc/haproxy/haproxy.cfg
systemctl enable haproxy
systemctl restart haproxy
sleep 1
systemctl is-active --quiet haproxy
echo OK: haproxy active
command -v ss >/dev/null && ss -lntp | grep -E \":80|:443\" || true
'"
IP_ORDER="${IP_ORDER_HAPROXY:-192.168.11.39}"
echo ""
echo "✅ Done. From LAN: curl -sS -o /dev/null -w '%{http_code}\\n' http://${IP_ORDER}:80/"
echo " Then NPM: THE_ORDER_UPSTREAM_IP=${IP_ORDER} THE_ORDER_UPSTREAM_PORT=80 bash scripts/nginx-proxy-manager/update-npmplus-proxy-hosts-api.sh"

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env bash
# Ensure CT 7801 (or VMID) has NEXTAUTH_URL (public NPM host) and NEXTAUTH_SECRET.
# Does not print secret values. Safe to run after every portal sync.
#
# Env: PROXMOX_HOST, SANKOFA_PORTAL_VMID, SANKOFA_PORTAL_CT_DIR, SANKOFA_PORTAL_NEXTAUTH_URL
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
VMID="${SANKOFA_PORTAL_VMID:-7801}"
CT_APP_DIR="${SANKOFA_PORTAL_CT_DIR:-/opt/sankofa-portal}"
SERVICE_NAME="${SANKOFA_PORTAL_SERVICE:-sankofa-portal}"
NEXTAUTH_PUBLIC_URL="${SANKOFA_PORTAL_NEXTAUTH_URL:-https://sankofa.nexus}"
SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new"
ssh $SSH_OPTS "root@${PROXMOX_HOST}" "pct exec ${VMID} -- bash -s" <<EOF
set -euo pipefail
mkdir -p "${CT_APP_DIR}"
cd "${CT_APP_DIR}"
# .env.local (preferred for secrets / overrides)
ENV_LOCAL=".env.local"
touch "\$ENV_LOCAL"
if grep -q '^NEXTAUTH_URL=' "\$ENV_LOCAL" 2>/dev/null; then
sed -i "s|^NEXTAUTH_URL=.*|NEXTAUTH_URL=${NEXTAUTH_PUBLIC_URL}|" "\$ENV_LOCAL"
else
printf '%s\n' "NEXTAUTH_URL=${NEXTAUTH_PUBLIC_URL}" >> "\$ENV_LOCAL"
fi
if ! grep -q '^NEXTAUTH_SECRET=' "\$ENV_LOCAL" 2>/dev/null; then
printf '%s\n' "NEXTAUTH_SECRET=\$(openssl rand -hex 32)" >> "\$ENV_LOCAL"
fi
# .env on CT often ships with LAN NEXTAUTH_URL; Next merges both — align to public URL.
if [[ -f .env ]] && grep -q '^NEXTAUTH_URL=' .env 2>/dev/null; then
sed -i "s|^NEXTAUTH_URL=.*|NEXTAUTH_URL=${NEXTAUTH_PUBLIC_URL}|" .env
fi
EOF
ssh $SSH_OPTS "root@${PROXMOX_HOST}" "pct exec ${VMID} -- systemctl restart ${SERVICE_NAME}"
ssh $SSH_OPTS "root@${PROXMOX_HOST}" "pct exec ${VMID} -- systemctl is-active ${SERVICE_NAME}"
echo "NextAuth env ensured on CT ${VMID} (NEXTAUTH_URL=${NEXTAUTH_PUBLIC_URL}; secret added only if missing). Service restarted."

View File

@@ -27,11 +27,12 @@ echo "=== Set missing dotenv (Chain 138 / DODO PMM) ==="
echo " Target: $ENV_FILE"
echo ""
append_if_missing "DODO_PMM_PROVIDER_ADDRESS" "0x8EF6657D2a86c569F6ffc337EE6b4260Bd2e59d0"
append_if_missing "DODO_PMM_INTEGRATION_ADDRESS" "0x79cdbaFBaA0FdF9F55D26F360F54cddE5c743F7D"
append_if_missing "DODO_PMM_PROVIDER_ADDRESS" "0x5CAe6Ce155b7f08D3a956F5Dc82fC9945f29B381"
append_if_missing "DODO_PMM_INTEGRATION_ADDRESS" "0x5BDc62f1ae7D630c37A8B363a1d49845356Ee72d"
append_if_missing "POOL_CUSDTCUSDC" "0x9fcB06Aa1FD5215DC0E91Fd098aeff4B62fEa5C8"
append_if_missing "POOL_CUSDTUSDT" "0xa3Ee6091696B28e5497b6F491fA1e99047250c59"
append_if_missing "POOL_CUSDCUSDC" "0x90bd9Bf18Daa26Af3e814ea224032d015db58Ea5"
append_if_missing "POOL_CUSDTUSDT" "0x6fc60DEDc92a2047062294488539992710b99D71"
append_if_missing "POOL_CUSDCUSDC" "0x9f74Be42725f2Aa072a9E0CdCce0E7203C510263"
append_if_missing "CHAIN_138_DODO_PMM_INTEGRATION" "0x5BDc62f1ae7D630c37A8B363a1d49845356Ee72d"
echo ""
echo "Done. Verify: grep -E 'DODO_PMM|POOL_' $ENV_FILE"

View File

@@ -0,0 +1,110 @@
#!/usr/bin/env bash
# Sync Sankofa Next.js portal source to LXC 7801, install deps, production build, restart systemd.
# Prerequisites: SSH root@PROXMOX_HOST; portal tree at SANKOFA_PORTAL_SRC (default: sibling ../Sankofa/portal).
#
# Usage:
# ./scripts/deployment/sync-sankofa-portal-7801.sh [--dry-run]
# Env:
# PROXMOX_HOST (default 192.168.11.11), SANKOFA_PORTAL_VMID (7801), SANKOFA_PORTAL_SRC, IP_SANKOFA_PORTAL (for post-check only)
# SANKOFA_PORTAL_NEXTAUTH_URL (default https://sankofa.nexus) — applied on CT after build
#
# See: docs/03-deployment/PUBLIC_SECTOR_LIVE_DEPLOYMENT_CHECKLIST.md (Phoenix CT 7801)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
VMID="${SANKOFA_PORTAL_VMID:-7801}"
CT_APP_DIR="${SANKOFA_PORTAL_CT_DIR:-/opt/sankofa-portal}"
SERVICE_NAME="${SANKOFA_PORTAL_SERVICE:-sankofa-portal}"
SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new"
DEFAULT_SRC="${PROJECT_ROOT}/../Sankofa/portal"
if [[ -d "$DEFAULT_SRC" ]]; then
SANKOFA_PORTAL_SRC="${SANKOFA_PORTAL_SRC:-$DEFAULT_SRC}"
else
SANKOFA_PORTAL_SRC="${SANKOFA_PORTAL_SRC:-}"
fi
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
echo "=== Sync Sankofa portal → CT ${VMID} (${CT_APP_DIR}) ==="
echo "Proxmox: ${PROXMOX_HOST}"
echo "Source: ${SANKOFA_PORTAL_SRC:-<unset>}"
echo ""
if [[ -z "$SANKOFA_PORTAL_SRC" || ! -d "$SANKOFA_PORTAL_SRC" ]]; then
echo "ERROR: Set SANKOFA_PORTAL_SRC to the portal directory (clone of Sankofa/portal)."
echo "Example: SANKOFA_PORTAL_SRC=/path/to/Sankofa/portal $0"
exit 1
fi
if ! command -v tar >/dev/null; then
echo "ERROR: tar required"
exit 1
fi
TMP_TGZ="${TMPDIR:-/tmp}/sankofa-portal-sync-$$.tgz"
REMOTE_TGZ="/tmp/sankofa-portal-sync-$$.tgz"
CT_TGZ="/tmp/sankofa-portal-sync.tgz"
cleanup() { rm -f "$TMP_TGZ"; }
trap cleanup EXIT
if $DRY_RUN; then
echo "[DRY-RUN] tar (exclude node_modules,.next,.git) → $TMP_TGZ"
echo "[DRY-RUN] scp → root@${PROXMOX_HOST}:${REMOTE_TGZ}"
echo "[DRY-RUN] ssh pct push ${VMID} … && pct exec ${VMID} systemctl stop ${SERVICE_NAME}"
echo "[DRY-RUN] pct exec: tar xf into ${CT_APP_DIR}; pnpm install; pnpm build; systemctl start ${SERVICE_NAME}"
exit 0
fi
echo "📦 Archiving portal (excluding node_modules, .next, .git, .env / .env.local)…"
tar czf "$TMP_TGZ" \
--exclude=node_modules \
--exclude=.next \
--exclude=.git \
--exclude=.env.local \
--exclude=.env \
-C "$SANKOFA_PORTAL_SRC" .
echo "📤 Copy to Proxmox host…"
scp $SSH_OPTS "$TMP_TGZ" "root@${PROXMOX_HOST}:${REMOTE_TGZ}"
echo "📥 Push into CT ${VMID} and build…"
ssh $SSH_OPTS "root@${PROXMOX_HOST}" bash -s <<REMOTE_EOF
set -euo pipefail
pct push ${VMID} ${REMOTE_TGZ} ${CT_TGZ}
rm -f ${REMOTE_TGZ}
pct exec ${VMID} -- systemctl stop ${SERVICE_NAME} || true
pct exec ${VMID} -- bash -lc 'set -euo pipefail
mkdir -p ${CT_APP_DIR}
cd ${CT_APP_DIR}
tar xzf ${CT_TGZ}
rm -f ${CT_TGZ}
command -v pnpm >/dev/null || { echo "ERROR: pnpm missing in CT"; exit 1; }
pnpm install
pnpm build
'
pct exec ${VMID} -- systemctl start ${SERVICE_NAME}
pct exec ${VMID} -- systemctl is-active ${SERVICE_NAME}
REMOTE_EOF
echo ""
echo "🔐 Ensuring NextAuth URL/secret on CT (see sankofa-portal-ensure-nextauth-on-ct.sh)…"
SANKOFA_PORTAL_NEXTAUTH_URL="${SANKOFA_PORTAL_NEXTAUTH_URL:-https://sankofa.nexus}"
export SANKOFA_PORTAL_VMID SANKOFA_PORTAL_CT_DIR SANKOFA_PORTAL_SERVICE SANKOFA_PORTAL_NEXTAUTH_URL PROXMOX_HOST
bash "${SCRIPT_DIR}/sankofa-portal-ensure-nextauth-on-ct.sh"
echo ""
echo "✅ Done. Verify:"
echo " curl -sS http://${IP_SANKOFA_PORTAL:-192.168.11.51}:3000/ | head -c 120"
echo " curl -sSI https://sankofa.nexus/api/auth/signin | head -n 15"
echo " https://sankofa.nexus/ (via NPM)"
echo ""
echo "Override public auth URL: SANKOFA_PORTAL_NEXTAUTH_URL=https://portal.sankofa.nexus $0"

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env bash
# TsunamiSwap VM 5010 — inventory check + example qm lines (always informational).
# Ref: docs/00-meta/OPERATOR_READY_CHECKLIST.md section 5c.
#
# Usage: ./scripts/deployment/tsunamiswap-vm-5010-provision.sh
# Env: PROXMOX_HOST (default 192.168.11.11), TSUNAMI_VMID (5010)
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
VMID="${TSUNAMI_VMID:-5010}"
SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new"
echo "=== TsunamiSwap VM ${VMID} on ${PROXMOX_HOST} ==="
if ssh $SSH_OPTS "root@${PROXMOX_HOST}" "qm status ${VMID} 2>/dev/null" | grep -q .; then
echo "Status: VMID ${VMID} exists."
ssh $SSH_OPTS "root@${PROXMOX_HOST}" "qm status ${VMID}"
exit 0
fi
echo "Status: VMID ${VMID} not found (still to provision)."
echo "Checklist target: r630-01, 8 vCPU, 16 GB RAM, ~160 GB, IP e.g. 192.168.11.91."
echo "Create with your Proxmox template/ISO, then run setup/deploy scripts if available."
exit 0

View File

@@ -0,0 +1,100 @@
#!/usr/bin/env node
import fs from 'fs';
import path from 'path';
const projectRoot = process.cwd();
const inputPath = path.resolve(projectRoot, 'config/aggregator-route-matrix.json');
const outputPath = path.resolve(projectRoot, 'config/aggregator-route-matrix.csv');
const matrix = JSON.parse(fs.readFileSync(inputPath, 'utf8'));
const rows = [];
function csvEscape(value) {
const text = value == null ? '' : String(value);
if (text.includes('"') || text.includes(',') || text.includes('\n')) {
return `"${text.replace(/"/g, '""')}"`;
}
return text;
}
function pushRouteRow(kind, route) {
rows.push([
kind,
route.routeId,
route.status,
route.routeType,
route.fromChainId,
route.toChainId,
route.tokenInSymbol || route.assetSymbol || '',
route.tokenInAddress || route.assetAddress || '',
route.tokenOutSymbol || route.assetSymbol || '',
route.tokenOutAddress || route.assetAddress || '',
route.hopCount || '',
route.bridgeType || '',
route.bridgeAddress || '',
(route.aggregatorFamilies || []).join('|'),
(route.tags || []).join('|'),
(route.intermediateSymbols || []).join('|'),
(route.legs || []).map((leg) => leg.poolAddress || leg.executorAddress || leg.protocol || leg.kind).join('|'),
(route.notes || []).join(' | '),
]);
}
for (const route of matrix.liveSwapRoutes || []) {
pushRouteRow('liveSwapRoute', route);
}
for (const route of matrix.liveBridgeRoutes || []) {
pushRouteRow('liveBridgeRoute', route);
}
for (const route of matrix.blockedOrPlannedRoutes || []) {
rows.push([
'blockedOrPlannedRoute',
route.routeId,
route.status,
route.routeType,
route.fromChainId,
route.toChainId,
(route.tokenInSymbols || []).join('|'),
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
route.reason || '',
]);
}
const header = [
'kind',
'routeId',
'status',
'routeType',
'fromChainId',
'toChainId',
'tokenInSymbol',
'tokenInAddress',
'tokenOutSymbol',
'tokenOutAddress',
'hopCount',
'bridgeType',
'bridgeAddress',
'aggregatorFamilies',
'tags',
'intermediateSymbols',
'legRefs',
'notesOrReason',
];
const csv = [header, ...rows].map((row) => row.map(csvEscape).join(',')).join('\n') + '\n';
fs.writeFileSync(outputPath, csv, 'utf8');
console.log(`Wrote ${rows.length} rows to ${outputPath}`);

View File

@@ -40,10 +40,17 @@ done
command -v cast &>/dev/null || { echo "cast (foundry) required"; exit 1; }
command -v jq &>/dev/null || { echo "jq required"; exit 1; }
hex_to_addr() {
local value="${1#0x}"
[[ ${#value} -ge 40 ]] || return 1
printf '0x%s\n' "${value: -40}"
}
# Fetch pool count: call allPools(length) by trying 0..MAX_POOLS (contract has allPools(uint256))
pools=()
for ((i=0; i<MAX_POOLS; i++)); do
addr=$(cast call "$INT" "allPools(uint256)(address)" "$i" --rpc-url "$RPC" 2>/dev/null | cast --to-addr 2>/dev/null || true)
raw=$(cast call "$INT" "allPools(uint256)(address)" "$i" --rpc-url "$RPC" 2>/dev/null || true)
addr=$(hex_to_addr "$raw" 2>/dev/null || true)
[[ -n "$addr" && "$addr" != "0x0000000000000000000000000000000000000000" ]] || break
pools+=("$addr")
done
@@ -53,6 +60,11 @@ echo "Found ${#pools[@]} pools on Chain 138" >&2
# Build JSON array of pool entries
entries="[]"
for pool in "${pools[@]}"; do
registered=$(cast call "$INT" "isRegisteredPool(address)(bool)" "$pool" --rpc-url "$RPC" 2>/dev/null || true)
if [[ "$registered" != "true" ]]; then
echo " Skip $pool (not registered in integration)" >&2
continue
fi
# poolConfigs(pool) -> (pool, baseToken, quoteToken, lpFeeRate, i, k, isOpenTWAP, createdAt)
config=$(cast call "$INT" "poolConfigs(address)(address,address,address,uint256,uint256,uint256,bool,uint256)" "$pool" --rpc-url "$RPC" 2>/dev/null || true)
if [[ -z "$config" ]]; then

View File

@@ -37,7 +37,7 @@ IP_TO_VMID = {
"192.168.11.110": "106",
"192.168.11.111": "107",
"192.168.11.112": "108",
"192.168.11.120": "10120",
"192.168.11.125": "10120",
"192.168.11.130": "10130",
"192.168.11.140": "5000",
"192.168.11.150": "1500",
@@ -92,7 +92,7 @@ IP_TO_HOSTNAME = {
"192.168.11.110": "redis-rpc-translator",
"192.168.11.111": "web3signer-rpc-translator",
"192.168.11.112": "vault-rpc-translator",
"192.168.11.120": "dbis-redis",
"192.168.11.125": "dbis-redis",
"192.168.11.130": "dbis-frontend",
"192.168.11.140": "blockscout-1",
"192.168.11.150": "besu-sentry-1",

View File

@@ -32,6 +32,20 @@ err_exit() { echo "ERROR: $1" >&2; exit 1; }
# 4. dbis_core config if present
[[ -f "${PROJECT_ROOT}/dbis_core/config/dbis-core-proxmox.conf" ]] && source "${PROJECT_ROOT}/dbis_core/config/dbis-core-proxmox.conf" 2>/dev/null || true
# 4b. Strip trailing CR/LF from RPC URL vars (editor mistakes; breaks cast/curl)
for _lpr_k in RPC_URL_138 RPC_URL CHAIN138_RPC CHAIN138_RPC_URL ETHEREUM_MAINNET_RPC \
RPC_URL_138_PUBLIC GNOSIS_MAINNET_RPC GNOSIS_RPC CRONOS_RPC_URL CRONOS_RPC \
CELO_MAINNET_RPC CELO_RPC WEMIX_RPC WEMIX_MAINNET_RPC BSC_RPC_URL \
POLYGON_MAINNET_RPC BASE_MAINNET_RPC OPTIMISM_MAINNET_RPC ARBITRUM_MAINNET_RPC \
AVALANCHE_RPC_URL AVALANCHE_RPC; do
_lpr_v="${!_lpr_k:-}"
[[ -z "$_lpr_v" ]] && continue
_lpr_v="${_lpr_v%$'\r'}"
_lpr_v="${_lpr_v%$'\n'}"
export "$_lpr_k=$_lpr_v"
done
unset _lpr_k _lpr_v 2>/dev/null || true
# 5. Contract addresses from master JSON (config/smart-contracts-master.json) when not set by .env
[[ -f "${PROJECT_ROOT}/scripts/lib/load-contract-addresses.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-contract-addresses.sh" 2>/dev/null || true

View File

@@ -3,6 +3,10 @@
# Usage: ./scripts/maintenance/fstrim-all-running-ct.sh [--dry-run]
# Requires: SSH key-based access to ml110, r630-01, r630-02.
# See: docs/04-configuration/STORAGE_GROWTH_AND_HEALTH.md
#
# Environment (optional):
# FSTRIM_TIMEOUT_SEC Seconds per CT (default 180). Use 4560 for faster fleet passes when many CTs hang on FITRIM.
# FSTRIM_HOSTS Space-separated host keys: ml110 r630-01 r630-02 (default: all three).
set -euo pipefail
@@ -14,10 +18,14 @@ ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
FSTRIM_TIMEOUT_SEC="${FSTRIM_TIMEOUT_SEC:-180}"
# shellcheck disable=SC2206
FSTRIM_HOSTS_ARR=(${FSTRIM_HOSTS:-ml110 r630-01 r630-02})
DRY_RUN=0
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=1
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$1" "$2" 2>/dev/null || true; }
run_ssh() { ssh -o ConnectTimeout=15 -o ServerAliveInterval=10 -o StrictHostKeyChecking=accept-new root@"$1" "$2" 2>/dev/null || true; }
fstrim_host() {
local host_ip="$1" host_name="$2"
@@ -29,21 +37,30 @@ fstrim_host() {
fi
for vmid in $vmids; do
if [[ $DRY_RUN -eq 1 ]]; then
echo " [dry-run] $host_name VMID $vmid: would run fstrim -v /"
echo " [dry-run] $host_name VMID $vmid: would run fstrim -v / (timeout ${FSTRIM_TIMEOUT_SEC}s)"
else
out=$(run_ssh "$host_ip" "pct exec $vmid -- fstrim -v / 2>&1" || true)
# timeout: some CTs hang on FITRIM or slow storage; do not block entire fleet
out=$(run_ssh "$host_ip" "timeout \"${FSTRIM_TIMEOUT_SEC}\" pct exec $vmid -- fstrim -v / 2>&1" || true)
echo " $host_name VMID $vmid: ${out:-done}"
fi
done
}
echo "=== fstrim all running CTs (reclaim thin pool space) ==="
echo " timeout_per_ct=${FSTRIM_TIMEOUT_SEC}s hosts=${FSTRIM_HOSTS_ARR[*]}"
[[ $DRY_RUN -eq 1 ]] && echo "(dry-run: no changes)"
echo ""
fstrim_host "$ML110" "ml110"
fstrim_host "$R630_01" "r630-01"
fstrim_host "$R630_02" "r630-02"
for key in "${FSTRIM_HOSTS_ARR[@]}"; do
case "$key" in
ml110) fstrim_host "$ML110" "ml110" ;;
r630-01) fstrim_host "$R630_01" "r630-01" ;;
r630-02) fstrim_host "$R630_02" "r630-02" ;;
*)
echo " Unknown FSTRIM_HOSTS entry: $key (use ml110, r630-01, r630-02)"
;;
esac
done
echo ""
echo "Done. Schedule weekly via cron or run with daily-weekly-checks weekly."

View File

@@ -1,8 +1,9 @@
#!/usr/bin/env bash
# Make RPC VMIDs (2101, 2500-2505) writable by running e2fsck on their rootfs (fixes read-only remount after ext4 errors).
# Make Besu CT rootfs writable by running e2fsck on their root LV (fixes read-only / emergency_ro after ext4 errors).
# SSHs to the Proxmox host (r630-01), stops each CT, runs e2fsck -f -y on the LV, starts the CT.
#
# Usage: ./scripts/maintenance/make-rpc-vmids-writable-via-ssh.sh [--dry-run]
# Optional: BESU_WRITABLE_VMIDS="1500 1501 1502" to add sentries or other CTs (default: Core RPC 2101 only).
# Run from project root. Requires: SSH to r630-01 (root, key-based).
# See: docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md §Read-only CT
@@ -13,9 +14,14 @@ PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
# RPC VMIDs on r630-01: Core (2101) + Alltra/HYBX (2500-2505)
RPC_VMIDS=(2101 2500 2501 2502 2503 2504 2505)
SSH_OPTS="-o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new"
# Default: Core RPC on r630-01 (2101). 2500-2505 removed — destroyed; see ALL_VMIDS_ENDPOINTS.md.
# Add sentries with: BESU_WRITABLE_VMIDS="1500 1501 1502 2101" ./scripts/maintenance/make-rpc-vmids-writable-via-ssh.sh
if [[ -n "${BESU_WRITABLE_VMIDS:-}" ]]; then
read -r -a RPC_VMIDS <<< "${BESU_WRITABLE_VMIDS}"
else
RPC_VMIDS=(2101)
fi
SSH_OPTS="-o ConnectTimeout=20 -o ServerAliveInterval=15 -o StrictHostKeyChecking=accept-new"
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true

View File

@@ -2,6 +2,14 @@
# Migrate one LXC container from r630-01 to r630-02 (backup → copy → restore).
# Use to free space on r630-01's thin pool. Run from project root (LAN); needs SSH to both hosts.
#
# IMPORTANT — unprivileged CTs: vzdump often fails with tar "Permission denied" inside the guest.
# Prefer cluster migration via API (maps source storage to target), e.g.:
# ssh root@192.168.11.11 "pvesh create /nodes/r630-01/lxc/<VMID>/migrate --target r630-02 --target-storage thin5 --restart 1"
# See docs/03-deployment/MIGRATE_CT_R630_01_TO_R630_02.md
#
# NEVER run `pct set <vmid> --delete unused0` if unused0 and rootfs reference the same disk name
# on different storages (e.g. local-lvm:vm-N-disk-0 vs thin1:vm-N-disk-0) — Proxmox may remove the only root LV.
#
# Usage:
# ./scripts/maintenance/migrate-ct-r630-01-to-r630-02.sh <VMID> [target_storage]
# ./scripts/maintenance/migrate-ct-r630-01-to-r630-02.sh 5200 thin1

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
# NPMplus admin API on loopback :81 inside CT 10233 (r630-01).
# Ref: docs/04-configuration/NPMPLUS_QUICK_REF.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=../config
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SSH_HOST="${NPMPLUS_SSH_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
VMID="${NPMPLUS_VMID:-10233}"
SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new"
echo "NPMplus :81 check via ssh root@${SSH_HOST} pct exec ${VMID}"
# First hop only (no -L): NPM often 301/308 to HTTPS; following redirects breaks on localhost TLS.
raw="$(timeout 45 ssh $SSH_OPTS "root@${SSH_HOST}" "pct exec ${VMID} -- sh -c 'curl -s -o /dev/null -w \"%{http_code}\" --connect-timeout 5 http://127.0.0.1:81/ 2>/dev/null'" 2>/dev/null || true)"
code="$(echo "$raw" | tr -d '\r\n' | grep -oE '[0-9]{3}' | tail -1)"
[[ -n "$code" ]] || code="000"
echo "HTTP ${code}"
[[ "$code" =~ ^(2[0-9]{2}|3[0-9]{2}|401|403)$ ]] || { echo "Unexpected code (want 2xx/3xx/401/403 = reachable)"; exit 1; }
echo "OK"

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
# Bring up static networking inside unprivileged LXC 3501 (ccip-monitor) when eth0 stays DOWN.
# Run on the Proxmox node that hosts VMID 3501 (r630-02). Optional: @reboot cron on the host.
#
# After `pct reboot 3501` (or stop/start), eth0 may be DOWN until you run this script again —
# host @reboot cron does not run on container-only reboots.
#
# Usage (on r630-02 as root): /usr/local/sbin/pct-lxc-3501-net-up.sh
# Install: scp to r630-02 /usr/local/sbin/ && chmod +x
set -euo pipefail
VMID="${CCIP_MONITOR_VMID:-3501}"
IP="${CCIP_MONITOR_IP:-192.168.11.28/24}"
GW="${CCIP_MONITOR_GW:-192.168.11.1}"
BCAST="${CCIP_MONITOR_BCAST:-192.168.11.255}"
if ! pct status "$VMID" 2>/dev/null | grep -q running; then
exit 0
fi
pct exec "$VMID" -- ip link set eth0 up
pct exec "$VMID" -- ip addr replace "$IP" dev eth0 broadcast "$BCAST" 2>/dev/null || \
pct exec "$VMID" -- ip addr add "$IP" dev eth0 broadcast "$BCAST"
pct exec "$VMID" -- ip route replace default via "$GW" dev eth0 2>/dev/null || \
pct exec "$VMID" -- ip route add default via "$GW" dev eth0

View File

@@ -0,0 +1,115 @@
#!/usr/bin/env bash
# Additional pass: diagnose I/O + load on Proxmox nodes, then apply safe host-level optimizations.
# - Reports: load, PSI, zpool, pvesm, scrub, vzdump, running CT count
# - Applies (idempotent): vm.swappiness on ml110; sysstat; host fstrim where supported
#
# Usage: ./scripts/maintenance/proxmox-host-io-optimize-pass.sh [--diagnose-only]
# Requires: SSH key root@ ml110, r630-01, r630-02 (see config/ip-addresses.conf)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
ML="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"
R1="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
R2="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
SSH_OPTS=(-o ConnectTimeout=20 -o ServerAliveInterval=15 -o StrictHostKeyChecking=accept-new)
DIAG_ONLY=false
[[ "${1:-}" == "--diagnose-only" ]] && DIAG_ONLY=true
remote() { ssh "${SSH_OPTS[@]}" "root@$1" bash -s; }
echo "=== Proxmox host I/O optimize pass ($(date -Is)) ==="
echo " ml110=$ML r630-01=$R1 r630-02=$R2 diagnose-only=$DIAG_ONLY"
echo ""
for H in "$ML" "$R1" "$R2"; do
echo "########## DIAGNOSTIC: $H ##########"
remote "$H" <<'EOS'
set +e
hostname
uptime
echo "--- PSI ---"
cat /proc/pressure/cpu 2>/dev/null | head -2
cat /proc/pressure/io 2>/dev/null | head -2
echo "--- pvesm ---"
pvesm status 2>/dev/null | head -25
echo "--- running workloads ---"
echo -n "LXC running: "; pct list 2>/dev/null | awk 'NR>1 && $2=="running"' | wc -l
echo -n "VM running: "; qm list 2>/dev/null | awk 'NR>1 && $3=="running"' | wc -l
echo "--- vzdump ---"
ps aux 2>/dev/null | grep -E '[v]zdump|[p]bs-|proxmox-backup' | head -5 || echo "(none visible)"
echo "--- ZFS ---"
zpool status 2>/dev/null | head -20 || echo "no zfs"
echo "--- scrub ---"
zpool status 2>/dev/null | grep -E 'scan|scrub' || true
EOS
echo ""
done
if $DIAG_ONLY; then
echo "Diagnose-only: done."
exit 0
fi
echo "########## OPTIMIZE: ml110 swappiness ##########"
remote "$ML" <<'EOS'
set -e
F=/etc/sysctl.d/99-proxmox-ml110-swappiness.conf
if ! grep -q '^vm.swappiness=10$' "$F" 2>/dev/null; then
printf '%s\n' '# Prefer RAM over swap when plenty of memory free (operator pass)' 'vm.swappiness=10' > "$F"
sysctl -p "$F"
echo "Wrote and applied $F"
else
echo "Already vm.swappiness=10 in $F"
sysctl vm.swappiness=10 2>/dev/null || true
fi
EOS
echo ""
echo "########## OPTIMIZE: sysstat (all hosts) ##########"
for H in "$ML" "$R1" "$R2"; do
echo "--- $H ---"
remote "$H" <<'EOS'
set -e
export DEBIAN_FRONTEND=noninteractive
if command -v sar >/dev/null 2>&1; then
echo "sysstat already present"
else
apt-get update -qq && apt-get install -y -qq sysstat
fi
sed -i 's/^ENABLED="false"/ENABLED="true"/' /etc/default/sysstat 2>/dev/null || true
systemctl enable sysstat 2>/dev/null || true
systemctl restart sysstat 2>/dev/null || true
echo "sar: $(command -v sar || echo missing)"
EOS
done
echo ""
echo "########## OPTIMIZE: host fstrim (hypervisor root / and /var/lib/vz if supported) ##########"
for H in "$ML" "$R1" "$R2"; do
echo "--- $H ---"
remote "$H" <<'EOS'
set +e
for m in / /var/lib/vz; do
if mountpoint -q "$m" 2>/dev/null; then
out=$(fstrim -v "$m" 2>&1)
echo "$m: $out"
fi
done
EOS
done
echo ""
echo "########## POST: quick load snapshot ##########"
for H in "$ML" "$R1" "$R2"; do
echo -n "$H "
ssh "${SSH_OPTS[@]}" "root@$H" "cat /proc/loadavg | cut -d' ' -f1-3" 2>/dev/null || echo "unreachable"
done
echo ""
echo "Done. Optional: run ./scripts/maintenance/fstrim-all-running-ct.sh during a quiet window (can be I/O heavy)."

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
# List or remove old E2E report dirs under docs/04-configuration/verification-evidence/e2e-verification-*
# Default: dry-run only. Deletes dirs older than KEEP_DAYS (default 45). Never deletes the two latest by mtime.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
EVIDENCE="$PROJECT_ROOT/docs/04-configuration/verification-evidence"
DRY_RUN=true
for a in "$@"; do
[[ "$a" == "--apply" ]] && DRY_RUN=false
done
KEEP_DAYS="${KEEP_DAYS:-45}"
MIN_KEEP="${MIN_KEEP:-2}"
if [[ ! -d "$EVIDENCE" ]]; then
echo "No directory: $EVIDENCE"
exit 1
fi
mapfile -t ALL < <(find "$EVIDENCE" -maxdepth 1 -type d -name 'e2e-verification-*' -printf '%T@ %p\n' 2>/dev/null | sort -n | awk '{print $2}')
if [[ ${#ALL[@]} -eq 0 ]]; then
echo "No e2e-verification-* directories under $EVIDENCE"
exit 0
fi
# Newest MIN_KEEP paths (never prune)
declare -A PROTECT
for ((i = ${#ALL[@]} - MIN_KEEP; i < ${#ALL[@]}; i++)); do
[[ $i -ge 0 ]] || continue
PROTECT["${ALL[$i]}"]=1
done
now=$(date +%s)
cutoff=$((now - KEEP_DAYS * 86400))
removed=0
checked=0
for dir in "${ALL[@]}"; do
[[ -n "${PROTECT[$dir]:-}" ]] && continue
mt=$(stat -c %Y "$dir" 2>/dev/null || echo 0)
(( checked++ )) || true
if (( mt < cutoff )); then
if [[ "$DRY_RUN" == true ]]; then
echo "Would remove (older than ${KEEP_DAYS}d): $dir"
else
rm -rf "$dir"
echo "Removed: $dir"
fi
(( removed++ )) || true
fi
done
if [[ "$DRY_RUN" == true ]]; then
echo ""
echo "Dry-run. Protected newest $MIN_KEEP dir(s). Set KEEP_DAYS=$KEEP_DAYS."
echo "To delete: KEEP_DAYS=$KEEP_DAYS bash $0 --apply"
else
echo "Done. Removed $removed director(y/ies); checked $checked (excluding protected)."
fi

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
# Staggered restart of Besu RPC services on ML110 (192.168.11.10) only.
# Use after fleet restarts or when multiple RPC CTs compete for disk — avoids all nodes stuck in RocksDB open/compact.
#
# Usage: ./scripts/maintenance/restart-ml110-besu-rpc-staggered.sh [--dry-run]
# Env: ML110_WAIT_SEC between restarts (default 75), PROXMOX_HOST_ML110 (default 192.168.11.10)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
HOST="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"
WAIT="${ML110_WAIT_SEC:-75}"
SSH_OPTS=(-o ConnectTimeout=25 -o ServerAliveInterval=15 -o StrictHostKeyChecking=accept-new)
# RPC-only CTs on ML110 (see ALL_VMIDS_ENDPOINTS.md)
RPC_VMIDS=(2102 2301 2304 2305 2306 2307 2308 2400 2402 2403)
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
echo "=== Staggered besu-rpc restart on $HOST ==="
echo " VMIDs: ${RPC_VMIDS[*]}"
echo " Wait between: ${WAIT}s dry-run=$DRY_RUN"
echo ""
if ! ssh "${SSH_OPTS[@]}" "root@$HOST" "echo OK" 2>/dev/null; then
echo "Cannot SSH to root@$HOST" >&2
exit 1
fi
last="${RPC_VMIDS[$(( ${#RPC_VMIDS[@]} - 1 ))]}"
for vmid in "${RPC_VMIDS[@]}"; do
if $DRY_RUN; then
echo "[dry-run] would restart VMID $vmid"
else
echo "$(date -Is) restarting VMID $vmid ..."
if ssh "${SSH_OPTS[@]}" "root@$HOST" "timeout 180 pct exec $vmid -- systemctl restart besu-rpc.service"; then
echo " OK"
else
echo " FAIL (timeout or error)" >&2
fi
fi
if [[ "$vmid" != "$last" ]] && ! $DRY_RUN; then
echo " waiting ${WAIT}s ..."
sleep "$WAIT"
fi
done
echo ""
echo "Done. Wait 25 minutes for 2402/2403 if RocksDB compaction runs; then:"
echo " ./scripts/verify/check-chain138-rpc-health.sh"

View File

@@ -184,7 +184,7 @@ echo ""
# Test matrix: from_container -> to_container
test_pairs=(
"10100:192.168.11.105:10000:192.168.11.44:DBIS PostgreSQL:Order PostgreSQL"
"10100:192.168.11.105:10120:192.168.11.120:DBIS PostgreSQL:DBIS Redis"
"10100:192.168.11.105:10120:192.168.11.125:DBIS PostgreSQL:DBIS Redis"
"10000:192.168.11.44:10001:192.168.11.45:Order PostgreSQL Primary:Order PostgreSQL Replica"
"10000:192.168.11.44:10020:192.168.11.38:Order PostgreSQL:Order Redis"
"10130:192.168.11.130:10150:192.168.11.155:DBIS Frontend:DBIS API"

View File

@@ -69,9 +69,8 @@ const DOMAINS = [
// www.* domains that redirect to parent domains
const REDIRECT_DOMAINS = [
// REMOVED: Sankofa redirects - services not deployed
// { domain: 'www.sankofa.nexus', redirectTo: 'sankofa.nexus' },
// { domain: 'www.phoenix.sankofa.nexus', redirectTo: 'phoenix.sankofa.nexus' },
// Sankofa www → apex: use scripts/nginx-proxy-manager/update-npmplus-proxy-hosts-api.sh (301 via proxy host advanced_config).
// Do not add duplicate NPM "Redirection Host" rows for www.sankofa / www.phoenix here while those names are proxy hosts with LE certs.
{ domain: 'www.mim4u.org', redirectTo: 'mim4u.org' },
];

View File

@@ -3,7 +3,7 @@ set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
@@ -38,12 +38,12 @@ echo "📝 Domains to Configure (19 total):"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
cat > "$DOMAINS_FILE" << 'DOMAINS'
sankofa.nexus → http://${IP_BLOCKSCOUT}:80
www.sankofa.nexus → http://${IP_BLOCKSCOUT}:80
phoenix.sankofa.nexus → http://${IP_BLOCKSCOUT}:80
www.phoenix.sankofa.nexus → http://${IP_BLOCKSCOUT}:80
the-order.sankofa.nexus → http://${IP_BLOCKSCOUT}:80
cat > "$DOMAINS_FILE" <<DOMAINS
sankofa.nexus → http://${IP_SANKOFA_PORTAL}:${SANKOFA_PORTAL_PORT} (portal CT 7801)
www.sankofa.nexus → http://${IP_SANKOFA_PORTAL}:${SANKOFA_PORTAL_PORT}
phoenix.sankofa.nexus → http://${IP_SANKOFA_PHOENIX_API}:${SANKOFA_PHOENIX_API_PORT} (Phoenix API CT 7800)
www.phoenix.sankofa.nexus → http://${IP_SANKOFA_PHOENIX_API}:${SANKOFA_PHOENIX_API_PORT}
the-order.sankofa.nexus → http://${IP_ORDER_HAPROXY}:80
explorer.d-bis.org → http://${IP_BLOCKSCOUT}:80
rpc-http-pub.d-bis.org → https://${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}}}}:443 (WebSocket)
rpc-ws-pub.d-bis.org → https://${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}}}}:443 (WebSocket)

View File

@@ -3,7 +3,7 @@ set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
@@ -164,12 +164,12 @@ echo ""
SUCCESS=0
FAILED=0
# sankofa.nexus (5 domains)
create_proxy_host "sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "phoenix.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.phoenix.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "the-order.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# sankofa.nexus (5 domains) — portal :3000 / Phoenix API :4000 (not Blockscout)
create_proxy_host "sankofa.nexus" "http" "192.168.11.51" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.sankofa.nexus" "http" "192.168.11.51" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "phoenix.sankofa.nexus" "http" "192.168.11.50" "4000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.phoenix.sankofa.nexus" "http" "192.168.11.50" "4000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "the-order.sankofa.nexus" "http" "192.168.11.39" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# d-bis.org (9 domains)
create_proxy_host "explorer.d-bis.org" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))

View File

@@ -3,7 +3,7 @@ set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
@@ -291,12 +291,17 @@ echo ""
SUCCESS=0
FAILED=0
# sankofa.nexus (5 domains)
create_proxy_host "sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "phoenix.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.phoenix.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "the-order.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# sankofa.nexus (5 domains) — portal :3000 / Phoenix API :4000 (not Blockscout; explorer is IP_BLOCKSCOUT:80)
IP_SANKOFA_PORTAL="${IP_SANKOFA_PORTAL:-${IP_SERVICE_51:-192.168.11.51}}"
IP_SANKOFA_PHOENIX_API="${IP_SANKOFA_PHOENIX_API:-${IP_SERVICE_50:-192.168.11.50}}"
SANKOFA_PORTAL_PORT="${SANKOFA_PORTAL_PORT:-3000}"
SANKOFA_PHOENIX_API_PORT="${SANKOFA_PHOENIX_API_PORT:-4000}"
IP_ORDER_HAPROXY="${IP_ORDER_HAPROXY:-192.168.11.39}"
create_proxy_host "sankofa.nexus" "http" "${IP_SANKOFA_PORTAL}" "${SANKOFA_PORTAL_PORT}" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.sankofa.nexus" "http" "${IP_SANKOFA_PORTAL}" "${SANKOFA_PORTAL_PORT}" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "phoenix.sankofa.nexus" "http" "${IP_SANKOFA_PHOENIX_API}" "${SANKOFA_PHOENIX_API_PORT}" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.phoenix.sankofa.nexus" "http" "${IP_SANKOFA_PHOENIX_API}" "${SANKOFA_PHOENIX_API_PORT}" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "the-order.sankofa.nexus" "http" "${IP_ORDER_HAPROXY}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# d-bis.org (9 domains)
create_proxy_host "explorer.d-bis.org" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))

View File

@@ -3,21 +3,17 @@
# Auth failures: only a short error message is printed by default. For a redacted JSON snippet set NPM_DEBUG_AUTH=1.
set -euo pipefail
# Load IP configuration
# Repo root (…/proxmox) — same as second block below; load IPs once from the right path
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Update existing NPMplus proxy hosts via API with correct VMIDs and IPs
# This script updates existing proxy hosts, not creates new ones.
# PUT payload includes only forward_* / websocket / block_exploits — existing certificate_id and ssl_forced are preserved by NPMplus.
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Preserve NPM credentials from environment so "export NPM_PASSWORD=...; ./script" works
_orig_npm_url="${NPM_URL:-}"
_orig_npm_email="${NPM_EMAIL:-}"
@@ -58,11 +54,12 @@ echo ""
# NPMplus API can stall indefinitely without --max-time (override e.g. NPM_CURL_MAX_TIME=300)
NPM_CURL_MAX_TIME="${NPM_CURL_MAX_TIME:-120}"
curl_npm() { curl -s -k --connect-timeout 10 --max-time "$NPM_CURL_MAX_TIME" "$@"; }
# -L: port 81 often 301s HTTP→HTTPS; POST /api/tokens without -L returns 400 "Payload is undefined"
curl_npm() { curl -s -k -L --connect-timeout 10 --max-time "$NPM_CURL_MAX_TIME" "$@"; }
# Connection check (NPMplus is on LAN 192.168.11.x). Try HTTP if HTTPS fails; try alternate IP .166/.167 if unreachable.
echo "🔐 Authenticating to NPMplus..."
try_connect() { curl -s -k -o /dev/null --connect-timeout 5 --max-time 15 "$1" 2>/dev/null; }
try_connect() { curl -s -k -L -o /dev/null --connect-timeout 5 --max-time 15 "$1" 2>/dev/null; }
if ! try_connect "$NPM_URL/"; then
# Try HTTP instead of HTTPS (NPM admin often listens on HTTP only on port 81)
http_url="${NPM_URL/https:/http:}"
@@ -73,7 +70,7 @@ if ! try_connect "$NPM_URL/"; then
alt_url=""
if [[ "$NPM_URL" == *"${IP_NPMPLUS_ETH0}"* ]]; then
alt_url="https://${IP_NPMPLUS}:81"
elif [[ "$NPM_URL" == *"${IP_NPMPLUS}"* ]] || [[ "$NPM_URL" == *"${IP_NPMPLUS_ETH1}"* ]]; then
elif [[ "$NPM_URL" == *"${IP_NPMPLUS}"* ]] || [[ -n "${IP_NPMPLUS_ETH1:-}" && "$NPM_URL" == *"${IP_NPMPLUS_ETH1}"* ]]; then
alt_url="https://${IP_NPMPLUS_ETH0}:81"
fi
connected=""
@@ -134,13 +131,46 @@ resolve_proxy_host_id() {
.id' 2>/dev/null | head -n1
}
# www → apex redirect: only https://hostname[:port] (no path/query); rejects characters that could break nginx advanced_config.
validate_canonical_https_redirect() {
local url="$1"
local ctx="${2:-canonical_https}"
if [[ "$url" != https://* ]]; then
echo "$ctx: canonical_https must start with https:// (got: $url)"
return 1
fi
if [[ "$url" == *$'\n'* || "$url" == *$'\r'* || "$url" == *' '* || "$url" == *';'* || "$url" == *'$'* || "$url" == *'`'* ]]; then
echo "$ctx: canonical_https contains forbidden characters (no spaces, semicolons, dollar, backticks)"
return 1
fi
local rest="${url#https://}"
if [[ "$rest" == */* ]]; then
echo "$ctx: canonical_https must not include a path (got: $url)"
return 1
fi
if ! [[ "$rest" =~ ^[a-zA-Z0-9._-]+(:[0-9]{1,5})?$ ]]; then
echo "$ctx: canonical_https must be https://hostname or https://hostname:port (got: $url)"
return 1
fi
return 0
}
# Function to add proxy host (POST) when domain does not exist
# Optional 6th arg: canonical HTTPS apex for www-style hosts (sets advanced_config 301 → apex$request_uri)
add_proxy_host() {
local domain=$1
local forward_host=$2
local forward_port=$3
local websocket=$4
local block_exploits=${5:-false}
local canonical_https="${6:-}"
local adv_line=""
if [ -n "$canonical_https" ] && ! validate_canonical_https_redirect "$canonical_https" "add_proxy_host($domain)"; then
return 1
fi
if [ -n "$canonical_https" ]; then
adv_line="return 301 ${canonical_https}\$request_uri;"
fi
local payload
payload=$(jq -n \
--arg domain "$domain" \
@@ -148,6 +178,7 @@ add_proxy_host() {
--argjson port "$forward_port" \
--argjson ws "$websocket" \
--argjson block_exploits "$([ "$block_exploits" = "true" ] && echo true || echo false)" \
--arg adv "$adv_line" \
'{
domain_names: [$domain],
forward_scheme: "http",
@@ -157,7 +188,7 @@ add_proxy_host() {
block_exploits: $block_exploits,
certificate_id: null,
ssl_forced: false
}' 2>/dev/null)
} + (if $adv != "" then {advanced_config: $adv} else {} end)' 2>/dev/null)
if [ -z "$payload" ]; then
echo " ❌ Failed to build payload for $domain"
return 1
@@ -170,7 +201,11 @@ add_proxy_host() {
local id
id=$(echo "$resp" | jq -r '.id // empty' 2>/dev/null)
if [ -n "$id" ] && [ "$id" != "null" ]; then
echo " ✅ Added: $domain -> http://${forward_host}:${forward_port} (WebSocket: $websocket)"
if [ -n "$canonical_https" ]; then
echo " ✅ Added: $domain -> http://${forward_host}:${forward_port} (WebSocket: $websocket) + 301 → ${canonical_https}\$request_uri"
else
echo " ✅ Added: $domain -> http://${forward_host}:${forward_port} (WebSocket: $websocket)"
fi
return 0
else
local err
@@ -180,7 +215,7 @@ add_proxy_host() {
echo " ↪ Host likely exists; refreshing list and attempting PUT update..."
PROXY_HOSTS_JSON=$(curl_npm -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN")
if update_proxy_host "$domain" "http://${forward_host}:${forward_port}" "$websocket" "$block_exploits"; then
if update_proxy_host "$domain" "http://${forward_host}:${forward_port}" "$websocket" "$block_exploits" "$canonical_https"; then
echo " ✅ Updated after duplicate-create error: $domain"
return 0
fi
@@ -191,12 +226,17 @@ add_proxy_host() {
# Function to update proxy host
# block_exploits: set false for RPC hosts (JSON-RPC uses POST to /; block_exploits can cause 405)
# Optional 5th arg: canonical HTTPS URL (no path) — sets advanced_config to 301 redirect (www → apex)
update_proxy_host() {
local domain=$1
local target=$2
local websocket=$3
local block_exploits=${4:-true}
local canonical_https="${5:-}"
if [ -n "$canonical_https" ] && ! validate_canonical_https_redirect "$canonical_https" "update_proxy_host($domain)"; then
return 1
fi
# Parse target URL
local scheme=$(echo "$target" | sed -E 's|^([^:]+):.*|\1|')
local hostname=$(echo "$target" | sed -E 's|^[^/]+//([^:]+):.*|\1|')
@@ -208,6 +248,17 @@ update_proxy_host() {
hostname=$(echo "$target" | sed -E 's|^https://([^:]+):.*|\1|')
port=$(echo "$target" | sed -E 's|^https://[^:]+:([0-9]+).*|\1|' || echo "443")
fi
# Reject bad parses (e.g. https://:443 when forward IP env is empty) — NPM returns errors without .id and jq message is empty.
if [[ -z "$hostname" || "$hostname" == *"://"* || "$hostname" == *"/"* ]]; then
echo " ❌ Invalid forward target for $domain (check env / ip-addresses.conf): $target → host=[$hostname]"
return 1
fi
port="${port//[^0-9]/}"
if [[ -z "$port" || "$port" -lt 1 || "$port" -gt 65535 ]]; then
echo " ❌ Invalid forward port for $domain: $target (parsed port=$port)"
return 1
fi
# Get host ID (case-insensitive); refresh once if missing (stale list / race with other writers)
HOST_ID=$(resolve_proxy_host_id "$domain" "$PROXY_HOSTS_JSON")
@@ -228,19 +279,24 @@ update_proxy_host() {
# block_exploits must be false for RPC so POST to / is allowed (JSON-RPC); explicit false fixes 405
local be_json="false"
[ "$block_exploits" = "true" ] && be_json="true"
local adv_line=""
if [ -n "$canonical_https" ]; then
adv_line="return 301 ${canonical_https}\$request_uri;"
fi
UPDATE_PAYLOAD=$(jq -n \
--arg scheme "$scheme" \
--arg hostname "$hostname" \
--argjson port "$(echo "$port" | sed 's/[^0-9]//g' || echo "80")" \
--argjson websocket "$websocket" \
--argjson block_exploits "$be_json" \
--arg adv "$adv_line" \
'{
forward_scheme: $scheme,
forward_host: $hostname,
forward_port: $port,
allow_websocket_upgrade: $websocket,
block_exploits: $block_exploits
}' 2>/dev/null || echo "")
} + (if $adv != "" then {advanced_config: $adv} else {} end)' 2>/dev/null || echo "")
UPDATE_RESPONSE=$(curl_npm -X PUT "$NPM_URL/api/nginx/proxy-hosts/$HOST_ID" \
-H "Authorization: Bearer $TOKEN" \
@@ -250,10 +306,16 @@ update_proxy_host() {
UPDATE_ID=$(echo "$UPDATE_RESPONSE" | jq -r '.id // empty' 2>/dev/null || echo "")
if [ -n "$UPDATE_ID" ] && [ "$UPDATE_ID" != "null" ]; then
echo " ✅ Updated: $scheme://$hostname:$port (WebSocket: $websocket)"
if [ -n "$canonical_https" ]; then
echo " ✅ Updated: $scheme://$hostname:$port (WebSocket: $websocket) + 301 → ${canonical_https}\$request_uri"
else
echo " ✅ Updated: $scheme://$hostname:$port (WebSocket: $websocket)"
fi
return 0
else
ERROR=$(echo "$UPDATE_RESPONSE" | jq -r '.error.message // .error // "Unknown error"' 2>/dev/null || echo "$UPDATE_RESPONSE")
ERROR=$(echo "$UPDATE_RESPONSE" | jq -r '.error.message // .message // .error // empty' 2>/dev/null || echo "")
[ -z "$ERROR" ] && ERROR=$(echo "$UPDATE_RESPONSE" | head -c 400 | tr -d '\r\n')
[ -z "$ERROR" ] && ERROR="(empty API response — timeout or connection error; try NPM_CURL_MAX_TIME=300)"
echo " ❌ Failed: $ERROR"
return 1
fi
@@ -280,7 +342,9 @@ update_proxy_host "wss.tw-core.d-bis.org" "http://${RPC_THIRDWEB_ADMIN_CORE}:854
# Catch-all for foo.tw-core.d-bis.org → Besu HTTP JSON-RPC :8545 (exact rpc./wss. hosts above take precedence for nginx server_name)
update_proxy_host '*.tw-core.d-bis.org' "http://${RPC_THIRDWEB_ADMIN_CORE}:8545" true false && updated_count=$((updated_count + 1)) || { add_proxy_host '*.tw-core.d-bis.org' "${RPC_THIRDWEB_ADMIN_CORE}" 8545 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# RPC Core-2 (Nathan) is on the THIRD NPMplus (192.168.11.169) — use add-rpc-core-2-npmplus-proxy.sh and update-npmplus-alltra-hybx-proxy-hosts.sh
update_proxy_host "rpc.public-0138.defi-oracle.io" "https://${RPC_THIRDWEB_PRIMARY}:443" true false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1))
# ThirdWeb / public-0138 edge (VMID 2400 nginx HTTPS) — default IP must match ALL_VMIDS_ENDPOINTS if env is unset
RPC_THIRDWEB_PRIMARY="${RPC_THIRDWEB_PRIMARY:-192.168.11.240}"
update_proxy_host "rpc.public-0138.defi-oracle.io" "https://${RPC_THIRDWEB_PRIMARY}:443" true false && updated_count=$((updated_count + 1)) || { sleep 2; echo " ↪ Retry rpc.public-0138.defi-oracle.io after transient NPM/API error..."; update_proxy_host "rpc.public-0138.defi-oracle.io" "https://${RPC_THIRDWEB_PRIMARY}:443" true false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1)); }
# rpc.defi-oracle.io / wss.defi-oracle.io → same backend as rpc-http-pub / rpc-ws-pub (VMID 2201)
update_proxy_host "rpc.defi-oracle.io" "http://${RPC_PUBLIC_1}:8545" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "rpc.defi-oracle.io" "${RPC_PUBLIC_1}" 8545 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "wss.defi-oracle.io" "http://${RPC_PUBLIC_1}:8546" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "wss.defi-oracle.io" "${RPC_PUBLIC_1}" 8546 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
@@ -309,6 +373,36 @@ update_proxy_host "dbis.xom-dev.phoenix.sankofa.nexus" "http://${IP_GOV_PORTALS_
update_proxy_host "iccc.xom-dev.phoenix.sankofa.nexus" "http://${IP_GOV_PORTALS_DEV}:3002" false && updated_count=$((updated_count + 1)) || { add_proxy_host "iccc.xom-dev.phoenix.sankofa.nexus" "${IP_GOV_PORTALS_DEV}" 3002 false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "omnl.xom-dev.phoenix.sankofa.nexus" "http://${IP_GOV_PORTALS_DEV}:3003" false && updated_count=$((updated_count + 1)) || { add_proxy_host "omnl.xom-dev.phoenix.sankofa.nexus" "${IP_GOV_PORTALS_DEV}" 3003 false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "xom.xom-dev.phoenix.sankofa.nexus" "http://${IP_GOV_PORTALS_DEV}:3004" false && updated_count=$((updated_count + 1)) || { add_proxy_host "xom.xom-dev.phoenix.sankofa.nexus" "${IP_GOV_PORTALS_DEV}" 3004 false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# Sankofa portal (Next.js CT 7801) and Phoenix API (Fastify CT 7800) — not Blockscout / SolaceScanScout (that is explorer.d-bis.org / IP_BLOCKSCOUT:80)
# Public web intent: sankofa.nexus = Sankofa Sovereign Technologies; phoenix.sankofa.nexus = Phoenix Cloud Services (division). Client SSO: admin / portal + keycloak IdP. Operator: dash (IP+MFA). See docs/02-architecture/EXPECTED_WEB_CONTENT.md.
# www.sankofa.nexus → 301 https://sankofa.nexus$request_uri; www.phoenix → phoenix; www.the-order → the-order (NPM advanced_config).
IP_SANKOFA_PORTAL="${IP_SANKOFA_PORTAL:-${IP_SERVICE_51:-192.168.11.51}}"
IP_SANKOFA_PHOENIX_API="${IP_SANKOFA_PHOENIX_API:-${IP_SERVICE_50:-192.168.11.50}}"
SANKOFA_PORTAL_PORT="${SANKOFA_PORTAL_PORT:-3000}"
SANKOFA_PHOENIX_API_PORT="${SANKOFA_PHOENIX_API_PORT:-4000}"
update_proxy_host "sankofa.nexus" "http://${IP_SANKOFA_PORTAL}:${SANKOFA_PORTAL_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "sankofa.nexus" "${IP_SANKOFA_PORTAL}" "${SANKOFA_PORTAL_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "www.sankofa.nexus" "http://${IP_SANKOFA_PORTAL}:${SANKOFA_PORTAL_PORT}" false false "https://sankofa.nexus" && updated_count=$((updated_count + 1)) || { add_proxy_host "www.sankofa.nexus" "${IP_SANKOFA_PORTAL}" "${SANKOFA_PORTAL_PORT}" false false "https://sankofa.nexus" && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "phoenix.sankofa.nexus" "http://${IP_SANKOFA_PHOENIX_API}:${SANKOFA_PHOENIX_API_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "phoenix.sankofa.nexus" "${IP_SANKOFA_PHOENIX_API}" "${SANKOFA_PHOENIX_API_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "www.phoenix.sankofa.nexus" "http://${IP_SANKOFA_PHOENIX_API}:${SANKOFA_PHOENIX_API_PORT}" false false "https://phoenix.sankofa.nexus" && updated_count=$((updated_count + 1)) || { add_proxy_host "www.phoenix.sankofa.nexus" "${IP_SANKOFA_PHOENIX_API}" "${SANKOFA_PHOENIX_API_PORT}" false false "https://phoenix.sankofa.nexus" && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# Keycloak (CT 7802) — portal SSO; NPM must forward X-Forwarded-* (Keycloak KC_PROXY_HEADERS=xforwarded on upstream)
IP_KEYCLOAK="${IP_KEYCLOAK:-192.168.11.52}"
update_proxy_host "keycloak.sankofa.nexus" "http://${IP_KEYCLOAK}:8080" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "keycloak.sankofa.nexus" "${IP_KEYCLOAK}" 8080 false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# the-order.sankofa.nexus — public hostname for the Sovereign Military Order of Malta (OSJ) management portal (secure auth).
# Application source (operator workstation): repo the_order at ~/projects/the_order (e.g. /home/intlc/projects/the_order).
# Default upstream: VMID 10210 order-haproxy @ IP_ORDER_HAPROXY:80 (provision: scripts/deployment/provision-order-haproxy-10210.sh).
# If 10210 is down: THE_ORDER_UPSTREAM_IP=${IP_SANKOFA_PORTAL} THE_ORDER_UPSTREAM_PORT=${SANKOFA_PORTAL_PORT} (direct portal 7801).
# www.the-order.sankofa.nexus → 301 https://the-order.sankofa.nexus$request_uri (same pattern as www.sankofa / www.phoenix).
IP_ORDER_HAPROXY="${IP_ORDER_HAPROXY:-192.168.11.39}"
THE_ORDER_UPSTREAM_IP="${THE_ORDER_UPSTREAM_IP:-${IP_ORDER_HAPROXY}}"
THE_ORDER_UPSTREAM_PORT="${THE_ORDER_UPSTREAM_PORT:-80}"
# block_exploits false — same policy as sankofa.nexus portal (Next/API-friendly; avoid 405 on some POST paths)
update_proxy_host "the-order.sankofa.nexus" "http://${THE_ORDER_UPSTREAM_IP}:${THE_ORDER_UPSTREAM_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "the-order.sankofa.nexus" "${THE_ORDER_UPSTREAM_IP}" "${THE_ORDER_UPSTREAM_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "www.the-order.sankofa.nexus" "http://${THE_ORDER_UPSTREAM_IP}:${THE_ORDER_UPSTREAM_PORT}" false false "https://the-order.sankofa.nexus" && updated_count=$((updated_count + 1)) || { add_proxy_host "www.the-order.sankofa.nexus" "${THE_ORDER_UPSTREAM_IP}" "${THE_ORDER_UPSTREAM_PORT}" false false "https://the-order.sankofa.nexus" && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# Sankofa Studio (FusionAI) — VMID 7805; UI at /studio/ on same origin (port 8000). Prefer IP_SANKOFA_STUDIO from ip-addresses.conf / .env
IP_SANKOFA_STUDIO="${IP_SANKOFA_STUDIO:-192.168.11.72}"
SANKOFA_STUDIO_PORT="${SANKOFA_STUDIO_PORT:-8000}"
# block_exploits false — studio UI/API may POST; align with portal policy (avoid spurious 405 from NPM WAF)
update_proxy_host "studio.sankofa.nexus" "http://${IP_SANKOFA_STUDIO}:${SANKOFA_STUDIO_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "studio.sankofa.nexus" "${IP_SANKOFA_STUDIO}" "${SANKOFA_STUDIO_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -4,7 +4,12 @@
set -uo pipefail
NODE_IP="${PROXMOX_HOST_R630_01}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
NODE_IP="${PROXMOX_HOST_R630_01:-192.168.11.11}"
# VLAN 200 containers that need reassignment
declare -A vlan200_containers=(
@@ -42,7 +47,7 @@ available_ips=(
"${IP_SERVICE_43:-${IP_SERVICE_43:-${IP_SERVICE_43:-192.168.11.43}}}"
"${ORDER_POSTGRES_PRIMARY:-${ORDER_POSTGRES_PRIMARY:-192.168.11.44}}"
"${ORDER_POSTGRES_REPLICA:-${ORDER_POSTGRES_REPLICA:-192.168.11.45}}"
"${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-192.168.11.46}}}"
"${IP_ORDER_PROMETHEUS:-${ORDER_REDIS_REPLICA:-192.168.11.46}}"
"${IP_SERVICE_47:-${IP_SERVICE_47:-${IP_SERVICE_47:-192.168.11.47}}}"
"${IP_ORDER_OPENSEARCH:-${IP_ORDER_OPENSEARCH:-${IP_ORDER_OPENSEARCH:-192.168.11.48}}}"
"${IP_SERVICE_49:-${IP_SERVICE_49:-${IP_SERVICE_49:-192.168.11.49}}}"
@@ -50,7 +55,7 @@ available_ips=(
"${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}}}"
"${IP_SERVICE_52:-${IP_SERVICE_52:-192.168.11.52}}"
"${DB_HOST:-192.168.11.53}"
"${IP_SERVICE_54:-${IP_SERVICE_54:-192.168.11.54}}"
"${IP_ORDER_LEGAL:-192.168.11.87}"
"${IP_SERVICE_55:-${IP_SERVICE_55:-192.168.11.55}}"
"${IP_SERVICE_56:-${IP_SERVICE_56:-192.168.11.56}}"
"${IP_SERVICE_57:-${IP_SERVICE_57:-192.168.11.57}}"

View File

@@ -4,7 +4,12 @@
set -uo pipefail
NODE_IP="${PROXMOX_HOST_R630_01}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
NODE_IP="${PROXMOX_HOST_R630_01:-192.168.11.11}"
BACKUP_DIR="/root/container-backups-$(date +%Y%m%d-%H%M%S)"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
@@ -143,7 +148,7 @@ declare -A CONTAINERS=(
["10040"]="order-intake:${IP_SERVICE_41:-${IP_SERVICE_41:-${IP_SERVICE_41:-192.168.11.41}}}:2048:2:20"
["10050"]="order-finance:${IP_SERVICE_49:-${IP_SERVICE_49:-${IP_SERVICE_49:-192.168.11.49}}}:2048:2:20"
["10060"]="order-dataroom:${IP_SERVICE_42:-${IP_SERVICE_42:-${IP_SERVICE_42:-192.168.11.42}}}:2048:2:20"
["10070"]="order-legal:${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}}}:2048:2:20"
["10070"]="order-legal:${IP_ORDER_LEGAL:-192.168.11.87}:2048:2:20"
["10080"]="order-eresidency:${IP_SERVICE_43:-${IP_SERVICE_43:-${IP_SERVICE_43:-192.168.11.43}}}:2048:2:20"
["10090"]="order-portal-public:${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}}}:2048:2:20"
["10091"]="order-portal-internal:${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}}}:2048:2:20"

View File

@@ -150,7 +150,7 @@ declare -A CONTAINERS=(
["10092"]="order-mcp-legal:192.168.11.37:2048:2:20"
["10100"]="dbis-postgres-primary:192.168.11.105:4096:4:50"
["10101"]="dbis-postgres-replica-1:192.168.11.106:4096:4:50"
["10120"]="dbis-redis:192.168.11.120:2048:2:20"
["10120"]="dbis-redis:192.168.11.125:2048:2:20"
["10130"]="dbis-frontend:192.168.11.130:2048:2:20"
["10150"]="dbis-api-primary:192.168.11.155:4096:4:30"
["10151"]="dbis-api-secondary:192.168.11.156:4096:4:30"

View File

@@ -1,8 +1,9 @@
#!/usr/bin/env bash
# IP Conflict Resolution Script
# Resolves 3 verified IP conflicts on r630-01
# Date: 2026-01-20
# Resolves IP conflicts on r630-01 (Sankofa vs Order vs vault CTs).
# 2026-03-25: VMID 7804 (gov-portals) owns 192.168.11.54. VMID 10070 (order-legal) must use IP_ORDER_LEGAL (default 192.168.11.87), not .54.
# Original date: 2026-01-20
set -euo pipefail
@@ -25,8 +26,17 @@ log_error() {
verify_ip_available() {
local ip=$1
local vmid_if_mine=$2
log "Verifying IP $ip is available..."
if ping -c 1 -W 1 "$ip" > /dev/null 2>&1; then
if [ -n "${vmid_if_mine:-}" ]; then
local cur
cur=$(ssh root@$PROXMOX_HOST "pct config $vmid_if_mine 2>/dev/null | grep -oE 'ip=[0-9.]+' | head -1 | cut -d= -f2" | tr -d '\r')
if [ "$cur" = "$ip" ]; then
log "IP $ip already assigned to VMID $vmid_if_mine (ok) ✓"
return 0
fi
fi
log_error "IP $ip is already in use!"
return 1
fi
@@ -44,8 +54,8 @@ resolve_conflict() {
log "Current IP: $old_ip"
log "New IP: $new_ip"
# Verify new IP is available
if ! verify_ip_available "$new_ip"; then
# Verify new IP is available (or already ours on this VMID)
if ! verify_ip_available "$new_ip" "$vmid"; then
log_error "Cannot proceed - IP $new_ip is in use"
return 1
fi
@@ -129,9 +139,9 @@ main() {
log "SSH access verified ✓"
log ""
# Verify IP availability
# Verify target IPs are free (ping) — not .54 (in use by gov-portals 7804)
log "=== Step 1: Verifying IP Availability ==="
for ip in ${IP_SERVICE_54:-${IP_SERVICE_54:-192.168.11.54}} ${IP_SERVICE_55:-${IP_SERVICE_55:-192.168.11.55}} ${IP_SERVICE_56:-${IP_SERVICE_56:-192.168.11.56}}; do
for ip in ${IP_SERVICE_55:-192.168.11.55} ${IP_SERVICE_56:-192.168.11.56}; do
if ! verify_ip_available "$ip"; then
log_error "Required IP $ip is not available. Cannot proceed."
exit 1
@@ -141,7 +151,7 @@ main() {
if [ "${DRY_RUN:-false}" = "true" ]; then
log "DRY RUN - Would resolve conflicts:"
log " VMID 10070 (order-legal): ${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}}}${IP_SERVICE_54:-${IP_SERVICE_54:-192.168.11.54}}"
log " VMID 10070 (order-legal): if still on ${IP_GOV_PORTALS_DEV:-192.168.11.54}$IP_ORDER_LEGAL"
log " VMID 10230 (order-vault): ${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}}}${IP_SERVICE_55:-${IP_SERVICE_55:-192.168.11.55}}"
log " VMID 10232 (CT10232): ${IP_SERVICE_52:-${IP_SERVICE_52:-192.168.11.52}}${IP_SERVICE_56:-${IP_SERVICE_56:-192.168.11.56}}"
exit 0
@@ -151,9 +161,22 @@ main() {
log "=== Step 2: Resolving IP Conflicts ==="
log ""
# Conflict 1: VMID 10070 (order-legal)
resolve_conflict 10070 "${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}}}" "${IP_SERVICE_54:-${IP_SERVICE_54:-192.168.11.54}}" "order-legal"
local result1=$?
# Conflict 1: VMID 10070 (order-legal) must not share IP with VMID 7804 (gov-portals on .54)
IP_GOV="${IP_GOV_PORTALS_DEV:-192.168.11.54}"
IP_ORDER_LEGAL="${IP_ORDER_LEGAL:-192.168.11.87}"
CURRENT_LEGAL=$(ssh root@$PROXMOX_HOST "pct config 10070 2>/dev/null | grep -oE 'ip=[0-9.]+' | head -1 | cut -d= -f2" | tr -d '\r' || echo "")
result1=0
if [ "$CURRENT_LEGAL" = "$IP_GOV" ]; then
log "VMID 10070 shares gov-portals IP $IP_GOV — moving to $IP_ORDER_LEGAL"
resolve_conflict 10070 "$IP_GOV" "$IP_ORDER_LEGAL" "order-legal"
result1=$?
elif [ "$CURRENT_LEGAL" = "$IP_ORDER_LEGAL" ]; then
log "VMID 10070 already on $IP_ORDER_LEGAL — skip"
result1=0
else
log "VMID 10070 is $CURRENT_LEGAL (expected $IP_ORDER_LEGAL or conflict with $IP_GOV); no automatic change"
result1=0
fi
log ""
# Conflict 2: VMID 10230 (order-vault)
@@ -175,7 +198,8 @@ main() {
log " ${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}}} → VMID 7800 (sankofa-api-1) only"
log " ${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}}} → VMID 7801 (sankofa-portal-1) only"
log " ${IP_SERVICE_52:-${IP_SERVICE_52:-192.168.11.52}} → VMID 7802 (sankofa-keycloak-1) only"
log " ${IP_SERVICE_54:-${IP_SERVICE_54:-192.168.11.54}} → VMID 10070 (order-legal)"
log " ${IP_GOV_PORTALS_DEV:-192.168.11.54} → VMID 7804 (gov-portals-dev) only"
log " ${IP_ORDER_LEGAL:-192.168.11.87} → VMID 10070 (order-legal)"
log " ${IP_SERVICE_55:-${IP_SERVICE_55:-192.168.11.55}} → VMID 10230 (order-vault)"
log " ${IP_SERVICE_56:-${IP_SERVICE_56:-192.168.11.56}} → VMID 10232 (CT10232)"
exit 0

View File

@@ -3,6 +3,16 @@
# Sets all records to DNS only mode (gray cloud) for direct NAT routing
# Supports multiple zones: sankofa.nexus, d-bis.org, mim4u.org, defi-oracle.io
# UDM Pro port forwarding: 76.53.10.36:80/443 → ${IP_NPMPLUS:-${IP_NPMPLUS:-192.168.11.167}}:80/443 (NPMplus)
#
# WARNING: For each hostname, existing CNAME records are deleted before an A record is created.
# By default all configured zones are processed. Use --zone-only to limit scope.
#
# Usage:
# ./scripts/update-all-dns-to-public-ip.sh
# ./scripts/update-all-dns-to-public-ip.sh --dry-run
# ./scripts/update-all-dns-to-public-ip.sh --zone-only=sankofa.nexus
# ./scripts/update-all-dns-to-public-ip.sh --dry-run --zone-only=d-bis.org
# Env (optional): CLOUDFLARE_DNS_DRY_RUN=1, DNS_ZONE_ONLY=sankofa.nexus (same as flags)
set -euo pipefail
@@ -11,6 +21,22 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# --help before .env (so operators can read usage without credentials)
for _arg in "$@"; do
if [[ "$_arg" == "--help" || "$_arg" == "-h" ]]; then
cat <<'EOF'
Cloudflare DNS → PUBLIC_IP (A records, DNS-only / gray cloud).
Options:
--dry-run Log intended changes only; no Cloudflare API calls.
--zone-only=ZONE ZONE one of: sankofa.nexus | d-bis.org | mim4u.org | defi-oracle.io
-h, --help This message.
Requires repo .env with Cloudflare auth and zone IDs (see script header).
EOF
exit 0
fi
done
# Colors
RED='\033[0;31m'
@@ -63,6 +89,23 @@ ZONE_D_BIS_ORG="${CLOUDFLARE_ZONE_ID_D_BIS_ORG:-${CLOUDFLARE_ZONE_ID:-}}"
ZONE_MIM4U_ORG="${CLOUDFLARE_ZONE_ID_MIM4U_ORG:-}"
ZONE_DEFI_ORACLE_IO="${CLOUDFLARE_ZONE_ID_DEFI_ORACLE_IO:-}"
# CLI / env: dry-run and single-zone scope
CLOUDFLARE_DNS_DRY_RUN="${CLOUDFLARE_DNS_DRY_RUN:-0}"
DNS_ZONE_ONLY="${DNS_ZONE_ONLY:-}"
parse_dns_update_cli_args() {
for arg in "$@"; do
case "$arg" in
--dry-run) CLOUDFLARE_DNS_DRY_RUN=1 ;;
--zone-only=*) DNS_ZONE_ONLY="${arg#*=}" ;;
esac
done
}
parse_dns_update_cli_args "$@"
if [ "$CLOUDFLARE_DNS_DRY_RUN" = "1" ]; then
log_warn "DRY-RUN: no Cloudflare list/create/update/delete API calls will be made."
fi
# Function to make Cloudflare API request
cf_api_request() {
local method="$1"
@@ -152,6 +195,11 @@ create_or_update_dns_record() {
fi
log_info "Processing: $full_name$ip (proxied: $proxied)"
if [ "$CLOUDFLARE_DNS_DRY_RUN" = "1" ]; then
log_success "[dry-run] Would remove CNAME(s) on $full_name if any, then upsert A → $ip (proxied=$proxied)"
return 0
fi
# Check for existing CNAME records (must delete before creating A record)
local all_records=$(get_all_dns_records "$zone_id" "$full_name")
@@ -256,28 +304,46 @@ main() {
echo ""
log_info "Public IP: $PUBLIC_IP"
log_info "Proxy Mode: DNS Only (gray cloud)"
if [ -n "$DNS_ZONE_ONLY" ]; then
log_info "Zone filter: only $DNS_ZONE_ONLY"
fi
echo ""
local total_failures=0
local run_sankofa=1 run_dbis=1 run_mim4u=1 run_defi=1
if [ -n "$DNS_ZONE_ONLY" ]; then
run_sankofa=0 run_dbis=0 run_mim4u=0 run_defi=0
case "$DNS_ZONE_ONLY" in
sankofa.nexus) run_sankofa=1 ;;
d-bis.org) run_dbis=1 ;;
mim4u.org) run_mim4u=1 ;;
defi-oracle.io) run_defi=1 ;;
*)
log_error "Unknown --zone-only=$DNS_ZONE_ONLY (expected: sankofa.nexus | d-bis.org | mim4u.org | defi-oracle.io)"
return 2
;;
esac
fi
# sankofa.nexus domain records
if [ -n "$ZONE_SANKOFA_NEXUS" ]; then
if [ "$run_sankofa" = 1 ] && [ -n "$ZONE_SANKOFA_NEXUS" ]; then
SANKOFA_RECORDS=(
"@" # sankofa.nexus
"www" # www.sankofa.nexus
"phoenix" # phoenix.sankofa.nexus
"www.phoenix" # www.phoenix.sankofa.nexus
"the-order" # the-order.sankofa.nexus
"www.the-order" # www.the-order.sankofa.nexus
)
if ! process_zone "$ZONE_SANKOFA_NEXUS" "sankofa.nexus" "${SANKOFA_RECORDS[@]}"; then
((total_failures++))
fi
else
elif [ "$run_sankofa" = 1 ]; then
log_warn "Skipping sankofa.nexus (no zone ID configured)"
fi
# d-bis.org domain records
if [ -n "$ZONE_D_BIS_ORG" ]; then
if [ "$run_dbis" = 1 ] && [ -n "$ZONE_D_BIS_ORG" ]; then
DBIS_RECORDS=(
"rpc-http-pub" # rpc-http-pub.d-bis.org
"rpc-ws-pub" # rpc-ws-pub.d-bis.org
@@ -296,12 +362,12 @@ main() {
if ! process_zone "$ZONE_D_BIS_ORG" "d-bis.org" "${DBIS_RECORDS[@]}"; then
((total_failures++))
fi
else
elif [ "$run_dbis" = 1 ]; then
log_warn "Skipping d-bis.org (no zone ID configured)"
fi
# mim4u.org domain records
if [ -n "$ZONE_MIM4U_ORG" ]; then
if [ "$run_mim4u" = 1 ] && [ -n "$ZONE_MIM4U_ORG" ]; then
MIM4U_RECORDS=(
"@" # mim4u.org
"www" # www.mim4u.org
@@ -311,12 +377,12 @@ main() {
if ! process_zone "$ZONE_MIM4U_ORG" "mim4u.org" "${MIM4U_RECORDS[@]}"; then
((total_failures++))
fi
else
elif [ "$run_mim4u" = 1 ]; then
log_warn "Skipping mim4u.org (no zone ID configured)"
fi
# defi-oracle.io domain records
if [ -n "$ZONE_DEFI_ORACLE_IO" ]; then
if [ "$run_defi" = 1 ] && [ -n "$ZONE_DEFI_ORACLE_IO" ]; then
DEFI_ORACLE_RECORDS=(
"explorer" # explorer.defi-oracle.io (Blockscout - same as explorer.d-bis.org)
"rpc.public-0138" # rpc.public-0138.defi-oracle.io
@@ -326,7 +392,7 @@ main() {
if ! process_zone "$ZONE_DEFI_ORACLE_IO" "defi-oracle.io" "${DEFI_ORACLE_RECORDS[@]}"; then
((total_failures++))
fi
else
elif [ "$run_defi" = 1 ]; then
log_warn "Skipping defi-oracle.io (no zone ID configured)"
fi
@@ -353,5 +419,5 @@ main() {
return $total_failures
}
# Run main function
main "$@"
# Run (CLI already parsed above)
main

View File

@@ -4,15 +4,11 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
@@ -25,8 +21,8 @@ log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
# Oracle Publisher LXC 3500 is on r630-02 (2026-03-28+)
PROXMOX_HOST="${PROXMOX_ORACLE_PUBLISHER_HOST:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
ORACLE_VMID="${ORACLE_VMID:-3500}"
COINGECKO_API_KEY="${COINGECKO_API_KEY:?COINGECKO_API_KEY must be set. Export from .env or use: export COINGECKO_API_KEY=your-key}"
@@ -71,20 +67,12 @@ else
fi
# Update DATA_SOURCE_1_URL to include API key
# NOTE: Do not use sed with the URL in the replacement string — query params contain "&", which sed treats as "matched text".
log_info "Updating DATA_SOURCE_1_URL with API key..."
# Check if DATA_SOURCE_1_URL exists
if echo "$CURRENT_ENV" | grep -q "^DATA_SOURCE_1_URL="; then
# Update existing URL
NEW_URL="https://api.coingecko.com/api/v3/simple/price?ids=ethereum&vs_currencies=usd&x_cg_demo_api_key=$COINGECKO_API_KEY"
ssh "root@$PROXMOX_HOST" "pct exec $ORACLE_VMID -- bash -c 'sed -i \"s|^DATA_SOURCE_1_URL=.*|DATA_SOURCE_1_URL=$NEW_URL|\" /opt/oracle-publisher/.env'"
log_success "Updated DATA_SOURCE_1_URL"
else
# Add new URL
NEW_URL="https://api.coingecko.com/api/v3/simple/price?ids=ethereum&vs_currencies=usd&x_cg_demo_api_key=$COINGECKO_API_KEY"
ssh "root@$PROXMOX_HOST" "pct exec $ORACLE_VMID -- bash -c 'echo \"DATA_SOURCE_1_URL=$NEW_URL\" >> /opt/oracle-publisher/.env'"
log_success "Added DATA_SOURCE_1_URL"
fi
NEW_URL="https://api.coingecko.com/api/v3/simple/price?ids=ethereum&vs_currencies=usd&x_cg_demo_api_key=$COINGECKO_API_KEY"
# Double-quote value for systemd EnvironmentFile (ampersands in URL).
ssh "root@$PROXMOX_HOST" "pct exec $ORACLE_VMID -- bash -c 'grep -v \"^DATA_SOURCE_1_URL=\" /opt/oracle-publisher/.env > /tmp/op.env.$$ && mv /tmp/op.env.$$ /opt/oracle-publisher/.env && printf \"%s\\n\" \"DATA_SOURCE_1_URL=\\\"$NEW_URL\\\"\" >> /opt/oracle-publisher/.env'"
log_success "DATA_SOURCE_1_URL set (grep+append, quoted for systemd)"
# Ensure DATA_SOURCE_1_PARSER is set correctly
log_info "Ensuring DATA_SOURCE_1_PARSER is set..."
@@ -106,7 +94,7 @@ VERIFIED_KEY=$(ssh "root@$PROXMOX_HOST" "pct exec $ORACLE_VMID -- grep '^COINGEC
VERIFIED_URL=$(ssh "root@$PROXMOX_HOST" "pct exec $ORACLE_VMID -- grep '^DATA_SOURCE_1_URL=' /opt/oracle-publisher/.env | cut -d= -f2-" || echo "")
if [ "$VERIFIED_KEY" = "$COINGECKO_API_KEY" ]; then
log_success "CoinGecko API key verified: $VERIFIED_KEY"
log_success "CoinGecko API key verified (length ${#VERIFIED_KEY} chars; value not logged)"
else
log_error "API key verification failed"
exit 1

View File

@@ -1,34 +1,52 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
# Update Sankofa NPMplus proxy hosts (portal + Phoenix API + the-order) via API by numeric host ID.
# Prefer for production: scripts/nginx-proxy-manager/update-npmplus-proxy-hosts-api.sh (domain-based, full fleet).
# NPM proxy host IDs below match backup backup-20260325_183932 (36, 7, 59); override with SANKOFA_NPM_ID_* if your DB differs.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Update Sankofa NPMplus proxy hosts via API
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Load environment variables
if [ -f "$PROJECT_ROOT/.env" ]; then
export $(cat "$PROJECT_ROOT/.env" | grep -v '^#' | xargs)
set -a
# shellcheck source=/dev/null
source "$PROJECT_ROOT/.env"
set +a
fi
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS}:81}"
NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
NPM_CURL_MAX_TIME="${NPM_CURL_MAX_TIME:-180}"
# Sankofa proxy host mappings
IP_SANKOFA_PORTAL="${IP_SANKOFA_PORTAL:-${IP_SERVICE_51:-192.168.11.51}}"
IP_SANKOFA_PHOENIX_API="${IP_SANKOFA_PHOENIX_API:-${IP_SERVICE_50:-192.168.11.50}}"
SANKOFA_PORTAL_PORT="${SANKOFA_PORTAL_PORT:-3000}"
SANKOFA_PHOENIX_API_PORT="${SANKOFA_PHOENIX_API_PORT:-4000}"
IP_ORDER_HAPROXY="${IP_ORDER_HAPROXY:-192.168.11.39}"
THE_ORDER_UPSTREAM_IP="${THE_ORDER_UPSTREAM_IP:-${IP_ORDER_HAPROXY}}"
THE_ORDER_UPSTREAM_PORT="${THE_ORDER_UPSTREAM_PORT:-80}"
# NPM proxy host IDs: sankofa=3, www.sankofa=4, phoenix=5, www.phoenix=6; the-order=7, www.the-order=59 (typical; verify in NPM UI)
SANKOFA_NPM_ID_ROOT="${SANKOFA_NPM_ID_ROOT:-3}"
SANKOFA_NPM_ID_WWW="${SANKOFA_NPM_ID_WWW:-4}"
SANKOFA_NPM_ID_PHOENIX="${SANKOFA_NPM_ID_PHOENIX:-5}"
SANKOFA_NPM_ID_WWW_PHOENIX="${SANKOFA_NPM_ID_WWW_PHOENIX:-6}"
SANKOFA_NPM_ID_THE_ORDER="${SANKOFA_NPM_ID_THE_ORDER:-7}"
SANKOFA_NPM_ID_WWW_THE_ORDER="${SANKOFA_NPM_ID_WWW_THE_ORDER:-59}"
# Optional 4th field: canonical HTTPS apex — NPM advanced_config 301 (www → apex). Matches update-npmplus-proxy-hosts-api.sh.
declare -A PROXY_HOSTS=(
["21"]="sankofa.nexus|${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}}}|3000"
["22"]="www.sankofa.nexus|${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}}}|3000"
["23"]="phoenix.sankofa.nexus|${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}}}|4000"
["24"]="www.phoenix.sankofa.nexus|${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}}}|4000"
["$SANKOFA_NPM_ID_ROOT"]="sankofa.nexus|${IP_SANKOFA_PORTAL}|${SANKOFA_PORTAL_PORT}|"
["$SANKOFA_NPM_ID_WWW"]="www.sankofa.nexus|${IP_SANKOFA_PORTAL}|${SANKOFA_PORTAL_PORT}|https://sankofa.nexus"
["$SANKOFA_NPM_ID_PHOENIX"]="phoenix.sankofa.nexus|${IP_SANKOFA_PHOENIX_API}|${SANKOFA_PHOENIX_API_PORT}|"
["$SANKOFA_NPM_ID_WWW_PHOENIX"]="www.phoenix.sankofa.nexus|${IP_SANKOFA_PHOENIX_API}|${SANKOFA_PHOENIX_API_PORT}|https://phoenix.sankofa.nexus"
["$SANKOFA_NPM_ID_THE_ORDER"]="the-order.sankofa.nexus|${THE_ORDER_UPSTREAM_IP}|${THE_ORDER_UPSTREAM_PORT}|"
["$SANKOFA_NPM_ID_WWW_THE_ORDER"]="www.the-order.sankofa.nexus|${THE_ORDER_UPSTREAM_IP}|${THE_ORDER_UPSTREAM_PORT}|https://the-order.sankofa.nexus"
)
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
@@ -38,7 +56,7 @@ echo ""
# Authenticate
echo "🔐 Authenticating to NPMplus..."
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" \
TOKEN_RESPONSE=$(curl -s -k --connect-timeout 15 --max-time "$NPM_CURL_MAX_TIME" -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}")
@@ -58,11 +76,12 @@ update_proxy_host() {
local domain=$2
local target_ip=$3
local target_port=$4
local canonical_https="${5:-}"
echo "📝 Updating Proxy Host $host_id: $domain$target_ip:$target_port"
# Get current proxy host
CURRENT_HOST=$(curl -s -k -X GET "$NPM_URL/api/nginx/proxy-hosts/$host_id" \
CURRENT_HOST=$(curl -s -k --connect-timeout 15 --max-time "$NPM_CURL_MAX_TIME" -X GET "$NPM_URL/api/nginx/proxy-hosts/$host_id" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "{}")
if [ "$(echo "$CURRENT_HOST" | jq -r '.id // empty')" = "" ]; then
@@ -70,11 +89,28 @@ update_proxy_host() {
return 1
fi
# Update proxy host
UPDATE_PAYLOAD=$(echo "$CURRENT_HOST" | jq --arg ip "$target_ip" --arg port "$target_port" \
'.forward_host = $ip | .forward_port = ($port | tonumber)')
# NPMplus rejects full-document PUT (e.g. locations: null) — send only allowed forward fields.
local scheme="http"
local adv_line=""
if [ -n "$canonical_https" ]; then
adv_line="return 301 ${canonical_https}\$request_uri;"
fi
UPDATE_PAYLOAD=$(jq -n \
--arg scheme "$scheme" \
--arg hostname "$target_ip" \
--argjson port "$(echo "$target_port" | sed 's/[^0-9]//g')" \
--argjson websocket false \
--argjson block_exploits false \
--arg adv "$adv_line" \
'{
forward_scheme: $scheme,
forward_host: $hostname,
forward_port: $port,
allow_websocket_upgrade: $websocket,
block_exploits: $block_exploits
} + (if $adv != "" then {advanced_config: $adv} else {} end)')
RESPONSE=$(curl -s -k -X PUT "$NPM_URL/api/nginx/proxy-hosts/$host_id" \
RESPONSE=$(curl -s -k --connect-timeout 15 --max-time "${NPM_CURL_MAX_TIME:-120}" -X PUT "$NPM_URL/api/nginx/proxy-hosts/$host_id" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "$UPDATE_PAYLOAD")
@@ -87,6 +123,9 @@ update_proxy_host() {
else
echo "❌ Failed to update proxy host $host_id"
echo "$RESPONSE" | jq '.' 2>/dev/null || echo "$RESPONSE"
if echo "$RESPONSE" | jq -e '.error.code == 403' >/dev/null 2>&1; then
echo " (403 often means NPM user lacks permission to mutate proxy hosts; check UI role or use an admin identity.)"
fi
return 1
fi
}
@@ -96,9 +135,9 @@ SUCCESS=0
FAILED=0
for host_id in "${!PROXY_HOSTS[@]}"; do
IFS='|' read -r domain target_ip target_port <<< "${PROXY_HOSTS[$host_id]}"
IFS='|' read -r domain target_ip target_port canonical_https _ <<< "${PROXY_HOSTS[$host_id]}"
if update_proxy_host "$host_id" "$domain" "$target_ip" "$target_port"; then
if update_proxy_host "$host_id" "$domain" "$target_ip" "$target_port" "$canonical_https"; then
((SUCCESS++))
else
((FAILED++))

View File

@@ -112,6 +112,48 @@ else
ERRORS=$((ERRORS + 1))
fi
fi
# Public-sector program manifest (served by phoenix-deploy-api GET /api/v1/public-sector/programs)
if [[ -f "$PROJECT_ROOT/config/public-sector-program-manifest.json" ]]; then
log_ok "Found: config/public-sector-program-manifest.json"
if command -v jq &>/dev/null; then
if jq -e '
(.schemaVersion | type == "string")
and (.programs | type == "array")
and (.programs | length > 0)
and ((.programs | map(.id) | unique | length) == (.programs | length))
' "$PROJECT_ROOT/config/public-sector-program-manifest.json" &>/dev/null; then
log_ok "public-sector-program-manifest.json: schemaVersion, programs[], unique .id"
else
log_err "public-sector-program-manifest.json: invalid structure or duplicate program ids"
ERRORS=$((ERRORS + 1))
fi
fi
else
log_err "Missing config/public-sector-program-manifest.json"
ERRORS=$((ERRORS + 1))
fi
# Proxmox operational template (VMID/IP/FQDN mirror; see docs/03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md)
if [[ -f "$PROJECT_ROOT/config/proxmox-operational-template.json" ]]; then
log_ok "Found: config/proxmox-operational-template.json"
if command -v jq &>/dev/null; then
if jq -e '
(.schemaVersion | type == "string")
and (.network.management_lan.gateway | type == "string")
and (.proxmox_nodes | type == "array")
and (.proxmox_nodes | length >= 1)
and (.services | type == "array")
and (.services | length >= 1)
' "$PROJECT_ROOT/config/proxmox-operational-template.json" &>/dev/null; then
log_ok "proxmox-operational-template.json: schema, network, nodes, services"
else
log_err "proxmox-operational-template.json: invalid top-level structure"
ERRORS=$((ERRORS + 1))
fi
fi
else
log_err "Missing config/proxmox-operational-template.json"
ERRORS=$((ERRORS + 1))
fi
fi
if [[ -n "$OPTIONAL_ENV" ]]; then

View File

@@ -1,7 +1,10 @@
#!/usr/bin/env bash
# Verify deployed contracts on Blockscout (Chain 138)
# Usage: ./scripts/verify-contracts-blockscout.sh [--only contract1,contract2] [--skip contract3]
# Version: 2026-01-31
# Before each verify, uses `cast code` on RPC to skip EOAs / empty code (avoids Blockscout
# "not a smart contract" noise for addresses like WETH10 when nothing is deployed there).
# Set VERIFY_SKIP_BYTECODE_CHECK=1 to attempt forge verify even when code lookup fails or is empty.
# Version: 2026-03-26
set -euo pipefail
@@ -41,9 +44,23 @@ should_verify() {
cd "$SMOM"
# Returns 0 if address has non-empty runtime bytecode on RPC, 1 otherwise (or if check skipped).
has_contract_bytecode() {
local addr="$1"
local code
[[ "${VERIFY_SKIP_BYTECODE_CHECK:-0}" == "1" ]] && return 0
command -v cast >/dev/null 2>&1 || return 0
code=$(cast code "$addr" --rpc-url "$RPC" 2>/dev/null | tr -d '\n\r \t' | tr '[:upper:]' '[:lower:]') || true
[[ -n "$code" && "$code" != "0x" && "$code" != "0x0" ]]
}
verify_one() {
local addr="$1" contract="$2" path="$3"
echo "Verifying $contract at $addr..."
if ! has_contract_bytecode "$addr"; then
echo " skip: no contract bytecode at $addr on RPC (EOA or undeployed; verify would fail in Blockscout)"
return 0
fi
if forge verify-contract "$addr" "$path" \
--chain-id 138 \
--verifier blockscout \

View File

@@ -0,0 +1,92 @@
#!/usr/bin/env bash
# Read-only: compare expected VMIDs from config/proxmox-operational-template.json
# to live Proxmox inventory (pct/qm list) over SSH. No cluster changes.
#
# Usage (from repo root):
# bash scripts/verify/audit-proxmox-operational-template.sh
# SSH_USER=root SSH_OPTS="-o BatchMode=yes" bash scripts/verify/audit-proxmox-operational-template.sh
#
# Env:
# PROXMOX_HOSTS Space-separated IPs (default: sources config/ip-addresses.conf — ML110, R630-01, R630-02)
# SSH_USER default root
# SSH_OPTS extra ssh options (e.g. -i /path/key)
#
# Exit: 0 always (report-only). Prints [MISSING_ON_CLUSTER] / [EXTRA_ON_CLUSTER] when SSH works.
set -uo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
TEMPLATE_JSON="$PROJECT_ROOT/config/proxmox-operational-template.json"
SSH_USER="${SSH_USER:-root}"
SSH_OPTS="${SSH_OPTS:--o ConnectTimeout=6 -o StrictHostKeyChecking=accept-new}"
cd "$PROJECT_ROOT"
if ! command -v jq &>/dev/null; then
echo "[WARN] jq not installed; install jq to compare VMIDs."
exit 0
fi
if [[ ! -f "$TEMPLATE_JSON" ]]; then
echo "[ERROR] Missing $TEMPLATE_JSON"
exit 1
fi
# shellcheck source=/dev/null
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOSTS="${PROXMOX_HOSTS:-${PROXMOX_HOST_ML110:-192.168.11.10} ${PROXMOX_HOST_R630_01:-192.168.11.11} ${PROXMOX_HOST_R630_02:-192.168.11.12}}"
EXPECTED_VMIDS=$(jq -r '.services[] | select(.vmid != null) | .vmid' "$TEMPLATE_JSON" | sort -n | uniq)
echo "=== Proxmox template audit (read-only) ==="
echo "Template: $TEMPLATE_JSON"
echo "Expected VMIDs (non-null): $(echo "$EXPECTED_VMIDS" | wc -l) rows"
echo ""
ALL_LIVE=""
for h in $PROXMOX_HOSTS; do
out=$(ssh $SSH_OPTS "${SSH_USER}@${h}" "pct list 2>/dev/null | awk 'NR>1 {print \$1}'; qm list 2>/dev/null | awk 'NR>1 {print \$1}'" 2>/dev/null || true)
if [[ -z "$out" ]]; then
echo "[SKIP] No inventory from $h (SSH failed or empty)"
continue
fi
echo "--- Live inventory: $h ---"
while IFS= read -r vid; do
[[ -z "${vid:-}" ]] && continue
echo " VMID $vid"
ALL_LIVE+="$vid"$'\n'
done <<< "$out"
done
LIVE_SORTED=$(echo "$ALL_LIVE" | sed '/^$/d' | sort -n | uniq)
if [[ -z "$LIVE_SORTED" ]]; then
echo ""
echo "[INFO] No live VMIDs collected (no SSH to cluster). Run from LAN with keys to Proxmox nodes."
exit 0
fi
echo ""
echo "=== Diff (template expected vs union of live VMIDs) ==="
MISSING=0
while IFS= read -r ev; do
[[ -z "${ev:-}" ]] && continue
if ! echo "$LIVE_SORTED" | grep -qx "$ev"; then
echo "[MISSING_ON_CLUSTER] VMID $ev (in template, not seen on scanned nodes)"
MISSING=$((MISSING + 1))
fi
done <<< "$EXPECTED_VMIDS"
EXTRA=0
while IFS= read -r lv; do
[[ -z "${lv:-}" ]] && continue
if ! echo "$EXPECTED_VMIDS" | grep -qx "$lv"; then
echo "[EXTRA_ON_CLUSTER] VMID $lv (on cluster, not in template services[])"
EXTRA=$((EXTRA + 1))
fi
done <<< "$LIVE_SORTED"
echo ""
echo "Summary: missing_on_template_scan=$MISSING extra_vs_template=$EXTRA"
echo "Note: VMIDs on nodes not scanned (other hosts) appear as MISSING. Expand PROXMOX_HOSTS if needed."

View File

@@ -0,0 +1,153 @@
#!/usr/bin/env bash
# Chain 138 — RPC health: parallel head check + per-node peer count + public RPC capability probe.
# Exit 0 if all HTTP RPCs respond, head spread <= max_blocks_spread, each peer count >= min_peers,
# and the public RPC capability probe matches the currently documented support matrix.
#
# Usage: ./scripts/verify/check-chain138-rpc-health.sh
# Env: RPC_MAX_HEAD_SPREAD (default 12), RPC_MIN_PEERS (default 10), RPC_TIMEOUT_SEC (default 20),
# CHAIN138_PUBLIC_RPC_URL (default https://rpc-http-pub.d-bis.org)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
MAX_SPREAD="${RPC_MAX_HEAD_SPREAD:-12}"
MIN_PEERS="${RPC_MIN_PEERS:-10}"
TO="${RPC_TIMEOUT_SEC:-20}"
PUBLIC_RPC_URL="${CHAIN138_PUBLIC_RPC_URL:-https://rpc-http-pub.d-bis.org}"
# VMID|IP (HTTP :8545)
RPC_ROWS=(
"2101|${IP_BESU_RPC_CORE_1:-192.168.11.211}"
"2102|${IP_BESU_RPC_CORE_2:-192.168.11.212}"
"2201|${IP_BESU_RPC_PUBLIC_1:-192.168.11.221}"
"2301|${IP_BESU_RPC_PRIVATE_1:-192.168.11.232}"
"2303|192.168.11.233"
"2304|192.168.11.234"
"2305|192.168.11.235"
"2306|192.168.11.236"
"2307|192.168.11.237"
"2308|192.168.11.238"
"2400|192.168.11.240"
"2401|192.168.11.241"
"2402|192.168.11.242"
"2403|192.168.11.243"
)
PAYLOAD_BN='{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
PAYLOAD_PC='{"jsonrpc":"2.0","method":"net_peerCount","params":[],"id":1}'
tmpdir=$(mktemp -d)
trap 'rm -rf "$tmpdir"' EXIT
for row in "${RPC_ROWS[@]}"; do
vmid="${row%%|*}"
ip="${row#*|}"
(
curl -sS -m "$TO" -X POST "http://${ip}:8545" -H "Content-Type: application/json" -d "$PAYLOAD_BN" >"$tmpdir/bn-$vmid.json" 2>/dev/null || echo '{"error":"curl"}' >"$tmpdir/bn-$vmid.json"
curl -sS -m "$TO" -X POST "http://${ip}:8545" -H "Content-Type: application/json" -d "$PAYLOAD_PC" >"$tmpdir/pc-$vmid.json" 2>/dev/null || echo '{"error":"curl"}' >"$tmpdir/pc-$vmid.json"
) &
done
wait
fail=0
min_b=999999999
max_b=0
echo "Chain 138 RPC health (parallel sample)"
printf '%-5s %-15s %-10s %-8s\n' "VMID" "IP" "block(dec)" "peers"
echo "------------------------------------------------------------"
for row in "${RPC_ROWS[@]}"; do
vmid="${row%%|*}"
ip="${row#*|}"
bh=$(jq -r '.result // empty' "$tmpdir/bn-$vmid.json" 2>/dev/null || true)
ph=$(jq -r '.result // empty' "$tmpdir/pc-$vmid.json" 2>/dev/null || true)
if [[ -z "$bh" ]]; then
printf '%-5s %-15s %-10s %-8s\n' "$vmid" "$ip" "FAIL" "—"
((fail++)) || true
continue
fi
bd=$((bh))
pd=$((ph))
[[ "$bd" -lt "$min_b" ]] && min_b=$bd
[[ "$bd" -gt "$max_b" ]] && max_b=$bd
if [[ "$pd" -lt "$MIN_PEERS" ]]; then
printf '%-5s %-15s %-10s %-8s LOW_PEERS\n' "$vmid" "$ip" "$bd" "$pd"
((fail++)) || true
else
printf '%-5s %-15s %-10s %-8s\n' "$vmid" "$ip" "$bd" "$pd"
fi
done
spread=$((max_b - min_b))
echo "------------------------------------------------------------"
echo "Head spread (max-min): $spread (max allowed $MAX_SPREAD)"
if [[ "$spread" -gt "$MAX_SPREAD" ]]; then
echo "FAIL: head spread too large"
((fail++)) || true
fi
if [[ "$fail" -eq 0 ]]; then
echo "OK: all RPCs responded, peers >= $MIN_PEERS, spread <= $MAX_SPREAD"
else
echo "FAIL: $fail check(s) failed"
fi
echo
echo "Chain 138 public RPC capability probe ($PUBLIC_RPC_URL)"
rpc_request() {
local method="$1"
local params="${2:-[]}"
curl -sS -m "$TO" -X POST "$PUBLIC_RPC_URL" \
-H "Content-Type: application/json" \
-d "{\"jsonrpc\":\"2.0\",\"method\":\"${method}\",\"params\":${params},\"id\":1}"
}
check_supported_method() {
local method="$1"
local params="${2:-[]}"
local response result
response="$(rpc_request "$method" "$params" || printf '%s' '{"error":"curl"}')"
result="$(printf '%s' "$response" | jq -r '.result // empty' 2>/dev/null || true)"
if [[ -n "$result" ]]; then
printf ' %-32s %s\n' "$method" "OK"
return 0
fi
printf ' %-32s %s\n' "$method" "FAIL"
((fail++)) || true
return 1
}
check_expected_missing_method() {
local method="$1"
local params="${2:-[]}"
local response code message
response="$(rpc_request "$method" "$params" || printf '%s' '{"error":"curl"}')"
code="$(printf '%s' "$response" | jq -r '.error.code // empty' 2>/dev/null || true)"
message="$(printf '%s' "$response" | jq -r '.error.message // empty' 2>/dev/null || true)"
if [[ "$code" == "-32601" || "$message" == "Method not found" ]]; then
printf ' %-32s %s\n' "$method" "EXPECTED_MISSING"
return 0
fi
printf ' %-32s %s\n' "$method" "UNEXPECTED"
((fail++)) || true
return 1
}
check_supported_method "eth_chainId"
check_supported_method "eth_gasPrice"
check_supported_method "eth_feeHistory" "[\"0x1\", \"latest\", []]"
check_supported_method "trace_block" "[\"0x1\"]"
check_supported_method "trace_replayBlockTransactions" "[\"0x1\", [\"trace\"]]"
check_expected_missing_method "eth_maxPriorityFeePerGas"
if [[ "$fail" -eq 0 ]]; then
echo "OK: node health and public RPC capability checks passed"
exit 0
fi
echo "FAIL: $fail check(s) failed"
exit 1

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Check that Chain 138 deployed contracts have bytecode on-chain.
# Address list: 59 (core, CCIP, PMM, vault/reserve, CompliantFiatTokens). Source: CONTRACT_ADDRESSES_REFERENCE, ADDRESS_MATRIX.
# Address list: 61 (core, CCIP, PMM, vault/reserve, oracle keeper path, CompliantFiatTokens). Source: CONTRACT_ADDRESSES_REFERENCE, ADDRESS_MATRIX.
# Usage: ./scripts/verify/check-contracts-on-chain-138.sh [RPC_URL] [--dry-run]
# Default RPC: from env RPC_URL_138 (Chain 138 Core standard) or config/ip-addresses.conf, else https://rpc-core.d-bis.org
# Optional: SKIP_EXIT=1 to exit 0 even when some addresses MISS (e.g. when RPC unreachable from this host).
@@ -65,6 +65,8 @@ else
"0x0C4FD27018130A00762a802f91a72D6a64a60F14" # PolicyManager
"0x0059e237973179146237aB49f1322E8197c22b21" # TokenImplementation
"0xD3AD6831aacB5386B8A25BB8D8176a6C8a026f04" # Price Feed Keeper
"0x8918eE0819fD687f4eb3e8b9B7D0ef7557493cfa" # OraclePriceFeed (keeper → ReserveSystem)
"0x3e8725b8De386feF3eFE5678c92eA6aDB41992B2" # WETH MockPriceFeed (keeper WETH aggregator)
"0x16D9A2cB94A0b92721D93db4A6Cd8023D3338800" # MerchantSettlementRegistry
"0xe77cb26eA300e2f5304b461b0EC94c8AD6A7E46D" # WithdrawalEscrow
"0xAEE4b7fBe82E1F8295951584CBc772b8BBD68575" # UniversalAssetRegistry (proxy)
@@ -82,11 +84,11 @@ else
"0x6427F9739e6B6c3dDb4E94fEfeBcdF35549549d8" # MirrorRegistry
"0x66FEBA2fC9a0B47F26DD4284DAd24F970436B8Dc" # AlltraAdapter
"0x7131F887DBEEb2e44c1Ed267D2A68b5b83285afc" # TransactionMirror Chain 138 (deployed 2026-02-27; set TRANSACTION_MIRROR_ADDRESS in .env)
"0x9fcB06Aa1FD5215DC0E91Fd098aeff4B62fEa5C8" # DODO cUSDT-cUSDC pool
"0x79cdbaFBaA0FdF9F55D26F360F54cddE5c743F7D" # DODOPMMIntegration
"0x8EF6657D2a86c569F6ffc337EE6b4260Bd2e59d0" # DODOPMMProvider
"0xa3Ee6091696B28e5497b6F491fA1e99047250c59" # DODO pool cUSDT/USDT
"0x90bd9Bf18Daa26Af3e814ea224032d015db58Ea5" # DODO pool cUSDC/USDC
"0xff8d3b8fDF7B112759F076B69f4271D4209C0849" # DODO cUSDT-cUSDC pool
"0x5BDc62f1ae7D630c37A8B363a1d49845356Ee72d" # DODOPMMIntegration (corrected canonical stack)
"0x5CAe6Ce155b7f08D3a956F5Dc82fC9945f29B381" # DODOPMMProvider (corrected canonical stack)
"0x6fc60DEDc92a2047062294488539992710b99D71" # DODO pool cUSDT/USDT
"0x9f74Be42725f2Aa072a9E0CdCce0E7203C510263" # DODO pool cUSDC/USDC
"0x607e97cD626f209facfE48c1464815DDE15B5093" # ReserveSystem
"0x34B73e6EDFd9f85a7c25EeD31dcB13aB6E969b96" # ReserveTokenIntegration
"0xEA4C892D6c1253797c5D95a05BF3863363080b4B" # RegulatedEntityRegistry (vault)
@@ -160,7 +162,7 @@ if [[ $MISS -gt 0 && -z "$rpc_reachable" ]]; then
echo " → RPC was unreachable from this host; see WARN above. Run from LAN/VPN or pass a reachable RPC URL." >&2
fi
# Expected missing (pending deploy or confirm): TransactionMirror, DODO pool; exit 0 when only these are missing
EXPECTED_MISSING=("0xb5876547c52CaBf49d7f40233B6f6a140F403d25" "0x9fcB06Aa1FD5215DC0E91Fd098aeff4B62fEa5C8")
EXPECTED_MISSING=("0xb5876547c52CaBf49d7f40233B6f6a140F403d25")
if [[ -n "${SKIP_EXIT:-}" && "${SKIP_EXIT}" != "0" ]]; then
exit 0
fi

View File

@@ -1,7 +1,11 @@
#!/usr/bin/env bash
# Check all Chain 138 DODO PMM pool token balances (base + quote).
# Check the corrected Chain 138 funded DODO PMM pool token balances (base + quote).
# Uses eth_call (curl) for compatibility with RPCs that reject some cast call formats.
#
# Canonical source for addresses:
# docs/11-references/ADDRESS_MATRIX_AND_STATUS.md (updated 2026-03-26)
# docs/11-references/DEPLOYED_TOKENS_BRIDGES_LPS_AND_ROUTING_STATUS.md
#
# Usage: ./scripts/verify/check-pmm-pool-balances-chain138.sh [RPC_URL]
# Default RPC: http://192.168.11.211:8545
@@ -14,14 +18,10 @@ RPC="${1:-${RPC_URL_138:-http://192.168.11.211:8545}}"
# Token addresses (Chain 138 canonical)
cUSDT="0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
cUSDC="0xf22258f57794CC8E06237084b353Ab30fFfa640b"
OFFICIAL_USDT="0x15DF1D5BFDD8Aa4b380445D4e3E9B38d34283619"
# Official USDC from integration or placeholder (no contract on 138)
OFFICIAL_USDC="0x0000000000000000000000000000000000000000"
# Pool addresses
POOL_CUSDTCUSDC="0x9fcB06Aa1FD5215DC0E91Fd098aeff4B62fEa5C8"
POOL_CUSDTUSDT="0xa3Ee6091696B28e5497b6F491fA1e99047250c59"
POOL_CUSDCUSDC="0x90bd9Bf18Daa26Af3e814ea224032d015db58Ea5"
OFFICIAL_USDT="0x004b63A7B5b0E06f6bB6adb4a5F9f590BF3182D1"
OFFICIAL_USDC="0x71D6687F38b93CCad569Fa6352c876eea967201b"
cXAUC="0x290E52a8819A4fbD0714E517225429aA2B70EC6b"
cEURT="0xdf4b71c61E5912712C1Bdd451416B9aC26949d72"
pads() { local a; a=$(echo "$1" | sed 's/0x//'); printf '%064s' "$a" | tr ' ' '0'; }
balance() {
@@ -35,43 +35,89 @@ balance() {
}
hex2dec() { local h="$1"; [[ -z "$h" || "$h" == "0x" ]] && echo "0" && return; printf '%d' "$h"; }
human6() { local r; r=$(hex2dec "$1"); echo "scale=6; $r / 1000000" | bc 2>/dev/null || echo "$r"; }
add_hex() {
local a b
a=$(hex2dec "$1")
b=$(hex2dec "$2")
printf '0x%x' "$((a + b))"
}
PUBLIC_STABLE_TOTAL="0x0"
PUBLIC_XAU_TOTAL="0x0"
PRIVATE_STABLE_TOTAL="0x0"
PRIVATE_XAU_TOTAL="0x0"
print_pool() {
local label="$1" pool="$2" base_name="$3" base_token="$4" quote_name="$5" quote_token="$6" bucket="$7"
local base_raw quote_raw
base_raw=$(balance "$base_token" "$pool")
quote_raw=$(balance "$quote_token" "$pool")
echo "$label"
echo " Address: $pool"
echo " $base_name (base): raw=$base_raw -> $(human6 "$base_raw")"
echo " $quote_name (quote): raw=$quote_raw -> $(human6 "$quote_raw")"
echo ""
if [[ "$bucket" == "public" ]]; then
if [[ "$base_name" == "cXAUC" ]]; then
PUBLIC_XAU_TOTAL=$(add_hex "$PUBLIC_XAU_TOTAL" "$base_raw")
else
PUBLIC_STABLE_TOTAL=$(add_hex "$PUBLIC_STABLE_TOTAL" "$base_raw")
fi
if [[ "$quote_name" == "cXAUC" ]]; then
PUBLIC_XAU_TOTAL=$(add_hex "$PUBLIC_XAU_TOTAL" "$quote_raw")
else
PUBLIC_STABLE_TOTAL=$(add_hex "$PUBLIC_STABLE_TOTAL" "$quote_raw")
fi
else
if [[ "$base_name" == "cXAUC" ]]; then
PRIVATE_XAU_TOTAL=$(add_hex "$PRIVATE_XAU_TOTAL" "$base_raw")
else
PRIVATE_STABLE_TOTAL=$(add_hex "$PRIVATE_STABLE_TOTAL" "$base_raw")
fi
if [[ "$quote_name" == "cXAUC" ]]; then
PRIVATE_XAU_TOTAL=$(add_hex "$PRIVATE_XAU_TOTAL" "$quote_raw")
else
PRIVATE_STABLE_TOTAL=$(add_hex "$PRIVATE_STABLE_TOTAL" "$quote_raw")
fi
fi
}
echo "=============================================="
echo " Chain 138 — PMM pool balances"
echo " RPC: $RPC"
echo " Pool map: corrected 2026-03-26 public + private funded set"
echo "=============================================="
echo ""
# Pool 1: cUSDT / cUSDC (base / quote)
echo "Pool 1: cUSDT / cUSDC"
echo " Address: $POOL_CUSDTCUSDC"
r1=$(balance "$cUSDT" "$POOL_CUSDTCUSDC")
r2=$(balance "$cUSDC" "$POOL_CUSDTCUSDC")
echo " cUSDT (base): raw=$r1$(human6 "$r1")"
echo " cUSDC (quote): raw=$r2$(human6 "$r2")"
echo "Public pools"
echo "------------"
print_pool "Pool 1: cUSDT / cUSDC" "0xff8d3b8fDF7B112759F076B69f4271D4209C0849" "cUSDT" "$cUSDT" "cUSDC" "$cUSDC" "public"
print_pool "Pool 2: cUSDT / USDT (official mirror)" "0x6fc60DEDc92a2047062294488539992710b99D71" "cUSDT" "$cUSDT" "USDT" "$OFFICIAL_USDT" "public"
print_pool "Pool 3: cUSDC / USDC (official mirror)" "0x0309178ae30302D83c76d6Dd402a684eF3160eec" "cUSDC" "$cUSDC" "USDC" "$OFFICIAL_USDC" "public"
print_pool "Pool 4: cUSDT / cXAUC" "0x1AA55E2001E5651349AfF5A63FD7A7Ae44f0F1b0" "cUSDT" "$cUSDT" "cXAUC" "$cXAUC" "public"
print_pool "Pool 5: cUSDC / cXAUC" "0xEA9Ac6357CaCB42a83b9082B870610363B177cBa" "cUSDC" "$cUSDC" "cXAUC" "$cXAUC" "public"
print_pool "Pool 6: cEURT / cXAUC" "0xbA99bc1eAAC164569d5AcA96C806934DDaF970Cf" "cEURT" "$cEURT" "cXAUC" "$cXAUC" "public"
echo "Private pools"
echo "-------------"
print_pool "Pool 7: cUSDT / cXAUC (private)" "0x94316511621430423a2cff0C036902BAB4aA70c2" "cUSDT" "$cUSDT" "cXAUC" "$cXAUC" "private"
print_pool "Pool 8: cUSDC / cXAUC (private)" "0x7867D58567948e5b9908F1057055Ee4440de0851" "cUSDC" "$cUSDC" "cXAUC" "$cXAUC" "private"
print_pool "Pool 9: cEURT / cXAUC (private)" "0x505403093826D494983A93b43Aa0B8601078A44e" "cEURT" "$cEURT" "cXAUC" "$cXAUC" "private"
TOTAL_STABLE=$(add_hex "$PUBLIC_STABLE_TOTAL" "$PRIVATE_STABLE_TOTAL")
TOTAL_XAU=$(add_hex "$PUBLIC_XAU_TOTAL" "$PRIVATE_XAU_TOTAL")
echo "Summary"
echo "-------"
echo " Public stable liquidity: raw=$PUBLIC_STABLE_TOTAL -> $(human6 "$PUBLIC_STABLE_TOTAL")"
echo " Public XAU liquidity: raw=$PUBLIC_XAU_TOTAL -> $(human6 "$PUBLIC_XAU_TOTAL")"
echo " Private stable liquidity: raw=$PRIVATE_STABLE_TOTAL -> $(human6 "$PRIVATE_STABLE_TOTAL")"
echo " Private XAU liquidity: raw=$PRIVATE_XAU_TOTAL -> $(human6 "$PRIVATE_XAU_TOTAL")"
echo " Total stable liquidity: raw=$TOTAL_STABLE -> $(human6 "$TOTAL_STABLE")"
echo " Total XAU liquidity: raw=$TOTAL_XAU -> $(human6 "$TOTAL_XAU")"
echo ""
# Pool 2: cUSDT / USDT (official USDT has no code on 138)
echo "Pool 2: cUSDT / USDT (official)"
echo " Address: $POOL_CUSDTUSDT"
r1=$(balance "$cUSDT" "$POOL_CUSDTUSDT")
r2=$(balance "$OFFICIAL_USDT" "$POOL_CUSDTUSDT")
echo " cUSDT (base): raw=${r1:-0x0}$(human6 "$r1")"
echo " USDT (quote): raw=${r2:-0x0}$(human6 "$r2")"
echo ""
# Pool 3: cUSDC / USDC (official USDC not deployed on 138)
echo "Pool 3: cUSDC / USDC (official)"
echo " Address: $POOL_CUSDCUSDC"
r1=$(balance "$cUSDC" "$POOL_CUSDCUSDC")
echo " cUSDC (base): raw=$r1$(human6 "$r1")"
if [[ "$OFFICIAL_USDC" != "0x0000000000000000000000000000000000000000" ]]; then
r2=$(balance "$OFFICIAL_USDC" "$POOL_CUSDCUSDC")
echo " USDC (quote): raw=$r2$(human6 "$r2")"
else
echo " USDC (quote): N/A (no official USDC contract on 138)"
fi
echo ""
echo "Note: Pool 1 (cUSDT/cUSDC) is the only pool with liquidity on 138. Pools 2 and 3 use official USDT/USDC which have no contract on Chain 138."
echo "Note: This script verifies the current funded public and private PMM set from the 2026-03-26 docs."
echo "Done."

View File

@@ -55,6 +55,7 @@ declare -A DOMAIN_ZONES=(
["phoenix.sankofa.nexus"]="sankofa.nexus"
["www.phoenix.sankofa.nexus"]="sankofa.nexus"
["the-order.sankofa.nexus"]="sankofa.nexus"
["www.the-order.sankofa.nexus"]="sankofa.nexus"
["studio.sankofa.nexus"]="sankofa.nexus"
["rpc.public-0138.defi-oracle.io"]="defi-oracle.io"
)

View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
# Exit 0 if every submodule has a clean working tree (no modified/untracked files).
# Use in CI or after merges: bash scripts/verify/submodules-clean.sh
set -euo pipefail
ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
cd "$ROOT"
tmp="$(mktemp)"
trap 'rm -f "$tmp"' EXIT
dirty=0
while IFS= read -r subpath; do
[[ -z "$subpath" ]] && continue
if ! git -C "$ROOT/$subpath" rev-parse --git-dir >/dev/null 2>&1; then
continue
fi
out="$(git -C "$ROOT/$subpath" status --porcelain 2>/dev/null || true)"
if [[ -n "$out" ]]; then
dirty=1
printf '%s\n' "=== $subpath ===" >>"$tmp"
printf '%s\n' "$out" >>"$tmp"
fi
done < <(git config --file .gitmodules --get-regexp '^submodule\..*\.path$' | awk '{print $2}' | sort -u)
if (( dirty )); then
echo "submodules-clean: dirty submodule working trees:" >&2
cat "$tmp" >&2
exit 1
fi
echo "submodules-clean: OK (all submodules clean)"

View File

@@ -34,6 +34,9 @@ PUBLIC_IP_FOURTH="${PUBLIC_IP_FOURTH:-76.53.10.40}"
ACCEPT_ANY_DNS="${ACCEPT_ANY_DNS:-0}"
# Use system resolver (e.g. /etc/hosts) instead of dig @8.8.8.8 — set when running from LAN with generate-e2e-hosts.sh entries
E2E_USE_SYSTEM_RESOLVER="${E2E_USE_SYSTEM_RESOLVER:-0}"
# openssl s_client has no built-in connect timeout; wrap to avoid hangs (private/wss hosts).
E2E_OPENSSL_TIMEOUT="${E2E_OPENSSL_TIMEOUT:-15}"
E2E_OPENSSL_X509_TIMEOUT="${E2E_OPENSSL_X509_TIMEOUT:-5}"
if [ "$E2E_USE_SYSTEM_RESOLVER" = "1" ]; then
ACCEPT_ANY_DNS=1
log_info "E2E_USE_SYSTEM_RESOLVER=1: using getent (respects /etc/hosts); ACCEPT_ANY_DNS=1"
@@ -77,7 +80,8 @@ declare -A DOMAIN_TYPES_ALL=(
["www.sankofa.nexus"]="web"
["phoenix.sankofa.nexus"]="web"
["www.phoenix.sankofa.nexus"]="web"
["the-order.sankofa.nexus"]="web"
["the-order.sankofa.nexus"]="web" # OSJ portal (secure auth); app: ~/projects/the_order
["www.the-order.sankofa.nexus"]="web" # 301 → https://the-order.sankofa.nexus
["studio.sankofa.nexus"]="web"
["rpc.public-0138.defi-oracle.io"]="rpc-http"
["rpc.defi-oracle.io"]="rpc-http"
@@ -162,11 +166,15 @@ else
fi
# Domains that are optional when any test fails (off-LAN, 502, unreachable); fail → skip so run passes.
_PUB_OPTIONAL_WHEN_FAIL="dapp.d-bis.org mifos.d-bis.org explorer.d-bis.org dbis-admin.d-bis.org dbis-api.d-bis.org dbis-api-2.d-bis.org secure.d-bis.org sankofa.nexus www.sankofa.nexus phoenix.sankofa.nexus www.phoenix.sankofa.nexus the-order.sankofa.nexus www.the-order.sankofa.nexus studio.sankofa.nexus mim4u.org www.mim4u.org secure.mim4u.org training.mim4u.org rpc-http-pub.d-bis.org rpc.d-bis.org rpc2.d-bis.org rpc.public-0138.defi-oracle.io rpc.defi-oracle.io ws.rpc.d-bis.org ws.rpc2.d-bis.org"
_PRIV_OPTIONAL_WHEN_FAIL="rpc-http-prv.d-bis.org rpc-ws-prv.d-bis.org rpc-fireblocks.d-bis.org ws.rpc-fireblocks.d-bis.org"
if [[ -z "${E2E_OPTIONAL_WHEN_FAIL:-}" ]]; then
if [[ "$PROFILE" == "private" ]]; then
E2E_OPTIONAL_WHEN_FAIL="rpc-http-prv.d-bis.org rpc-ws-prv.d-bis.org rpc-fireblocks.d-bis.org ws.rpc-fireblocks.d-bis.org"
E2E_OPTIONAL_WHEN_FAIL="$_PRIV_OPTIONAL_WHEN_FAIL"
elif [[ "$PROFILE" == "all" ]]; then
E2E_OPTIONAL_WHEN_FAIL="$_PRIV_OPTIONAL_WHEN_FAIL $_PUB_OPTIONAL_WHEN_FAIL"
else
E2E_OPTIONAL_WHEN_FAIL="dapp.d-bis.org mifos.d-bis.org explorer.d-bis.org dbis-admin.d-bis.org dbis-api.d-bis.org dbis-api-2.d-bis.org secure.d-bis.org sankofa.nexus www.sankofa.nexus phoenix.sankofa.nexus www.phoenix.sankofa.nexus the-order.sankofa.nexus studio.sankofa.nexus mim4u.org www.mim4u.org secure.mim4u.org training.mim4u.org rpc-http-pub.d-bis.org rpc.d-bis.org rpc2.d-bis.org rpc.public-0138.defi-oracle.io rpc.defi-oracle.io ws.rpc.d-bis.org ws.rpc2.d-bis.org"
E2E_OPTIONAL_WHEN_FAIL="$_PUB_OPTIONAL_WHEN_FAIL"
fi
else
E2E_OPTIONAL_WHEN_FAIL="${E2E_OPTIONAL_WHEN_FAIL}"
@@ -178,6 +186,36 @@ declare -A EXPECTED_IP=(
["dev.d-bis.org"]="$PUBLIC_IP_FOURTH"
["codespaces.d-bis.org"]="$PUBLIC_IP_FOURTH"
)
# HTTPS check path (default "/"). API-first hosts may 404 on /; see docs/02-architecture/EXPECTED_WEB_CONTENT.md
declare -A E2E_HTTPS_PATH=(
["phoenix.sankofa.nexus"]="/health"
["www.phoenix.sankofa.nexus"]="/health"
["studio.sankofa.nexus"]="/studio/"
)
# Expected apex URL for NPM www → canonical 301/308 (Location must use this host; path from E2E_HTTPS_PATH must appear when set)
declare -A E2E_WWW_CANONICAL_BASE=(
["www.sankofa.nexus"]="https://sankofa.nexus"
["www.phoenix.sankofa.nexus"]="https://phoenix.sankofa.nexus"
["www.the-order.sankofa.nexus"]="https://the-order.sankofa.nexus"
)
# Returns 0 if Location URL matches expected canonical apex (and HTTPS path suffix when non-empty).
e2e_www_redirect_location_ok() {
local loc_val="$1" base="$2" path="${3:-}"
local loc_lc base_lc
loc_lc=$(printf '%s' "$loc_val" | tr '[:upper:]' '[:lower:]')
base_lc=$(printf '%s' "$base" | tr '[:upper:]' '[:lower:]')
if [[ "$loc_lc" != "$base_lc" && "$loc_lc" != "$base_lc/"* ]]; then
return 1
fi
if [ -n "$path" ] && [ "$path" != "/" ]; then
local p_lc
p_lc=$(printf '%s' "$path" | tr '[:upper:]' '[:lower:]')
[[ "$loc_lc" == *"$p_lc"* ]] || return 1
fi
return 0
}
# --list-endpoints: print selected profile endpoints and exit (no tests)
if [[ "$LIST_ENDPOINTS" == "1" ]]; then
@@ -257,7 +295,7 @@ test_domain() {
if [ "$domain_type" != "unknown" ]; then
log_info "Test 2: SSL Certificate"
cert_info=$(echo | openssl s_client -connect "$domain:443" -servername "$domain" 2>/dev/null | openssl x509 -noout -subject -issuer -dates -ext subjectAltName 2>/dev/null || echo "")
cert_info=$( (echo | timeout "$E2E_OPENSSL_TIMEOUT" openssl s_client -connect "$domain:443" -servername "$domain" 2>/dev/null) | timeout "$E2E_OPENSSL_X509_TIMEOUT" openssl x509 -noout -subject -issuer -dates -ext subjectAltName 2>/dev/null || echo "")
if [ -n "$cert_info" ]; then
cert_cn=$(echo "$cert_info" | grep "subject=" | sed -E 's/.*CN\s*=\s*([^,]*).*/\1/' | sed 's/^ *//;s/ *$//' || echo "")
@@ -301,10 +339,12 @@ test_domain() {
# Test 3: HTTPS Request
if [ "$domain_type" = "web" ] || [ "$domain_type" = "api" ]; then
log_info "Test 3: HTTPS Request"
https_path="${E2E_HTTPS_PATH[$domain]:-}"
https_url="https://${domain}${https_path}"
log_info "Test 3: HTTPS Request (${https_url})"
START_TIME=$(date +%s.%N)
http_response=$(curl -s -I -k --connect-timeout 10 -w "\n%{time_total}" "https://$domain" 2>&1 || echo "")
http_response=$(curl -s -I -k --connect-timeout 10 -w "\n%{time_total}" "$https_url" 2>&1 || echo "")
END_TIME=$(date +%s.%N)
RESPONSE_TIME=$(echo "$END_TIME - $START_TIME" | bc 2>/dev/null || echo "0")
@@ -315,8 +355,39 @@ test_domain() {
echo "$headers" > "$OUTPUT_DIR/${domain//./_}_https_headers.txt"
if [ -n "$http_code" ]; then
if [ "$http_code" -ge 200 ] && [ "$http_code" -lt 400 ]; then
log_success "HTTPS: $domain returned HTTP $http_code (Time: ${time_total}s)"
# NPM canonical www → apex (advanced_config return 301/308)
local _e2e_canonical_www_redirect=""
local location_hdr=""
case "$domain" in
www.sankofa.nexus|www.phoenix.sankofa.nexus|www.the-order.sankofa.nexus)
if [ "$http_code" = "301" ] || [ "$http_code" = "308" ]; then
_e2e_canonical_www_redirect=1
fi
;;
esac
if [ -n "$_e2e_canonical_www_redirect" ]; then
location_hdr=$(echo "$headers" | grep -iE '^[Ll]ocation:' | head -1 | tr -d '\r' || echo "")
loc_val=$(printf '%s' "$location_hdr" | sed -E 's/^[Ll][Oo][Cc][Aa][Tt][Ii][Oo][Nn]:[[:space:]]*//' | sed 's/[[:space:]]*$//')
expected_base="${E2E_WWW_CANONICAL_BASE[$domain]:-}"
if [ -z "$loc_val" ]; then
log_warn "HTTPS: $domain returned HTTP $http_code but no Location header${https_path:+ (${https_url})}"
result=$(echo "$result" | jq --arg code "$http_code" --arg time "$time_total" \
'.tests.https = {"status": "warn", "http_code": ($code | tonumber), "response_time_seconds": ($time | tonumber), "note": "missing Location on redirect"}')
elif [ -z "$expected_base" ]; then
log_warn "HTTPS: $domain redirect pass (no E2E_WWW_CANONICAL_BASE entry)"
result=$(echo "$result" | jq --arg code "$http_code" --arg time "$time_total" --arg loc "$location_hdr" \
'.tests.https = {"status": "pass", "http_code": ($code | tonumber), "response_time_seconds": ($time | tonumber), "canonical_redirect": true, "location_header": $loc}')
elif ! e2e_www_redirect_location_ok "$loc_val" "$expected_base" "$https_path"; then
log_error "HTTPS: $domain Location mismatch (got \"$loc_val\", expected prefix \"$expected_base\" with path \"${https_path:-/}\")"
result=$(echo "$result" | jq --arg code "$http_code" --arg time "$time_total" --arg loc "$loc_val" --arg exp "$expected_base" --arg pth "${https_path:-}" \
'.tests.https = {"status": "fail", "http_code": ($code | tonumber), "response_time_seconds": ($time | tonumber), "reason": "location_mismatch", "location": $loc, "expected_prefix": $exp, "expected_path_suffix": $pth}')
else
log_success "HTTPS: $domain returned HTTP $http_code (canonical redirect → $loc_val)${https_path:+ at ${https_url}}"
result=$(echo "$result" | jq --arg code "$http_code" --arg time "$time_total" --arg loc "$location_hdr" \
'.tests.https = {"status": "pass", "http_code": ($code | tonumber), "response_time_seconds": ($time | tonumber), "canonical_redirect": true, "location_header": $loc}')
fi
elif [ "$http_code" -ge 200 ] && [ "$http_code" -lt 400 ]; then
log_success "HTTPS: $domain returned HTTP $http_code (Time: ${time_total}s)${https_path:+ at ${https_path}}"
# Check security headers
hsts=$(echo "$headers" | grep -i "strict-transport-security" || echo "")
@@ -330,12 +401,12 @@ test_domain() {
--argjson hsts "$HAS_HSTS" --argjson csp "$HAS_CSP" --argjson xfo "$HAS_XFO" \
'.tests.https = {"status": "pass", "http_code": ($code | tonumber), "response_time_seconds": ($time | tonumber), "has_hsts": $hsts, "has_csp": $csp, "has_xfo": $xfo}')
else
log_warn "HTTPS: $domain returned HTTP $http_code (Time: ${time_total}s)"
log_warn "HTTPS: $domain returned HTTP $http_code (Time: ${time_total}s)${https_path:+ (${https_url})}"
result=$(echo "$result" | jq --arg code "$http_code" --arg time "$time_total" \
'.tests.https = {"status": "warn", "http_code": ($code | tonumber), "response_time_seconds": ($time | tonumber)}')
fi
else
log_error "HTTPS: Failed to connect to $domain"
log_error "HTTPS: Failed to connect to ${https_url}"
result=$(echo "$result" | jq --arg time "$time_total" '.tests.https = {"status": "fail", "response_time_seconds": ($time | tonumber)}')
fi
# Optional: Blockscout API check for explorer.d-bis.org (does not affect E2E pass/fail)
@@ -401,13 +472,21 @@ test_domain() {
# Check if wscat is available for full test
if command -v wscat >/dev/null 2>&1; then
log_info " Attempting full WebSocket test with wscat..."
WS_FULL_TEST=$(timeout 3 wscat -c "wss://$domain" -x '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' 2>&1 || echo "")
# -n: no TLS verify (aligns with curl -k); -w: seconds to wait for JSON-RPC response
WS_FULL_TEST=""
WS_FULL_EXIT=0
if ! WS_FULL_TEST=$(timeout 15 wscat -n -c "wss://$domain" -x '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' -w 5 2>&1); then
WS_FULL_EXIT=$?
fi
if echo "$WS_FULL_TEST" | grep -q "result"; then
log_success "WebSocket: Full test passed"
result=$(echo "$result" | jq --arg code "$WS_RESULT" '.tests.websocket = {"status": "pass", "http_code": $code, "full_test": true}')
result=$(echo "$result" | jq --arg code "$WS_RESULT" '.tests.websocket = {"status": "pass", "http_code": $code, "full_test": true, "full_test_output": "result"}')
elif [ "$WS_FULL_EXIT" -eq 0 ]; then
log_success "WebSocket: Full test connected cleanly"
result=$(echo "$result" | jq --arg code "$WS_RESULT" '.tests.websocket = {"status": "pass", "http_code": $code, "full_test": true, "note": "wscat exited successfully without printable RPC output"}')
else
log_warn "WebSocket: Connection established but RPC test failed"
result=$(echo "$result" | jq --arg code "$WS_RESULT" '.tests.websocket = {"status": "warning", "http_code": $code, "full_test": false}')
result=$(echo "$result" | jq --arg code "$WS_RESULT" --arg exit_code "$WS_FULL_EXIT" '.tests.websocket = {"status": "warning", "http_code": $code, "full_test": false, "exit_code": $exit_code}')
fi
else
log_warn "WebSocket: Basic test (Code: $WS_RESULT) - Install wscat for full test: npm install -g wscat"
@@ -558,6 +637,7 @@ cat >> "$REPORT_FILE" <<EOF
- **Optional domains:** Domains in \`E2E_OPTIONAL_WHEN_FAIL\` (default: many d-bis.org/sankofa/mim4u/rpc) have any fail treated as skip so the run passes when off-LAN or services unreachable. Set \`E2E_OPTIONAL_WHEN_FAIL=\` (empty) for strict mode.
- WebSocket tests require \`wscat\` tool: \`npm install -g wscat\`
- OpenSSL fetch uses \`timeout\` (\`E2E_OPENSSL_TIMEOUT\` / \`E2E_OPENSSL_X509_TIMEOUT\`, defaults 15s / 5s) so \`openssl s_client\` cannot hang indefinitely
- Internal connectivity tests require access to NPMplus container
- Explorer (explorer.d-bis.org): optional Blockscout API check; use \`SKIP_BLOCKSCOUT_API=1\` to skip when backend is unreachable (e.g. off-LAN). Fix runbook: docs/03-deployment/BLOCKSCOUT_FIX_RUNBOOK.md