chore: update submodule references and documentation
Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled

- Marked submodules ai-mcp-pmm-controller, explorer-monorepo, and smom-dbis-138 as dirty to reflect recent changes.
- Updated documentation to clarify operator script usage, including dotenv loading and task execution instructions.
- Enhanced the README and various index files to provide clearer navigation and task completion guidance.

Made-with: Cursor
This commit is contained in:
defiQUG
2026-03-04 02:03:08 -08:00
parent 70eadb7bf0
commit e4c9dda0fd
246 changed files with 17774 additions and 93 deletions

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
# Wrapper: run smom-dbis-138/scripts/create-pmm-full-mesh-chain138.sh from repo root.
# Usage: ./scripts/create-pmm-full-mesh-chain138.sh
# MESH_ONLY_C_STAR=1 = only c* vs c* pairs (no official USDT/USDC)
# DRY_RUN=1 = print actions only
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
SMOM="${REPO_ROOT}/smom-dbis-138"
[ -d "$SMOM" ] || { echo "Not found: $SMOM"; exit 1; }
exec bash "$SMOM/scripts/create-pmm-full-mesh-chain138.sh" "$@"

View File

@@ -118,7 +118,7 @@ fi
if [[ -z "$SKIP_MIRROR" ]]; then
echo "Deploying TransactionMirror (NEXT_NONCE=$NEXT_NONCE, gas $GAS_PRICE)..."
if ! forge script script/DeployTransactionMirror.s.sol:DeployTransactionMirror \
--rpc-url "$RPC" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price "$GAS_PRICE"; then
--rpc-url "$RPC" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price "$GAS_PRICE" --gas-estimate-multiplier 150; then
echo ""
echo "If the failure was CreateCollision (contract already at expected address), set in $SMOM/.env:" >&2
echo " TRANSACTION_MIRROR_ADDRESS=0xC7f2Cf4845C6db0e1a1e91ED41Bcd0FcC1b0E141" >&2
@@ -143,7 +143,7 @@ while true; do
echo ""
echo "Creating DODO cUSDT/cUSDC pool (NEXT_NONCE=$NEXT_NONCE, gas $POOL_GAS)..."
POOL_OUTPUT=$(forge script script/dex/CreateCUSDTCUSDCPool.s.sol:CreateCUSDTCUSDCPool \
--rpc-url "$RPC" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price "$POOL_GAS" 2>&1) || true
--rpc-url "$RPC" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price "$POOL_GAS" --gas-estimate-multiplier 150 2>&1) || true
echo "$POOL_OUTPUT"
if echo "$POOL_OUTPUT" | grep -q "Replacement transaction underpriced"; then
POOL_RETRY=$((POOL_RETRY + 1))

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
# Render global-arbitrage-engine.dot to SVG and PNG.
# Usage: from repo root: ./scripts/diagrams/render-global-arbitrage-engine.sh
# Requires: graphviz (dot)
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
DIAGRAM_DIR="${DIAGRAM_DIR:-$REPO_ROOT/docs/11-references/diagrams}"
DOT_FILE="$DIAGRAM_DIR/global-arbitrage-engine.dot"
if [[ ! -f "$DOT_FILE" ]]; then
echo "Error: $DOT_FILE not found." >&2
exit 1
fi
cd "$DIAGRAM_DIR"
dot -Tsvg global-arbitrage-engine.dot -o global-arbitrage-engine.svg
dot -Tpng global-arbitrage-engine.dot -o global-arbitrage-engine.png
echo "Rendered: global-arbitrage-engine.svg, global-arbitrage-engine.png in $DIAGRAM_DIR"

View File

@@ -0,0 +1,89 @@
#!/usr/bin/env bash
# Generate MCP allowlist for Chain 138 from DODOPMMIntegration.
# Reads getAllPools() and getPoolConfig(pool) via RPC and outputs allowlist JSON.
#
# Usage:
# ./scripts/generate-mcp-allowlist-from-chain138.sh # print to stdout
# ./scripts/generate-mcp-allowlist-from-chain138.sh -o allowlist.json # write file
# OUT_PATH=ai-mcp-pmm-controller/config/allowlist-138.json ./scripts/generate-mcp-allowlist-from-chain138.sh
#
# Requires: RPC_URL_138 (or RPC_URL), DODO_PMM_INTEGRATION_ADDRESS in env (or .env in smom-dbis-138).
# Optional: MAX_POOLS (default 200), PROFILE (default dodo_pmm_v2_like).
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$REPO_ROOT"
# Load env from smom-dbis-138 if present
if [[ -f "$REPO_ROOT/smom-dbis-138/.env" ]]; then
set -a
source "$REPO_ROOT/smom-dbis-138/.env"
set +a
fi
RPC="${RPC_URL_138:-${RPC_URL:-http://192.168.11.211:8545}}"
INT="${DODO_PMM_INTEGRATION_ADDRESS:-${DODO_PMM_INTEGRATION:-}}"
OUT_PATH=""
PROFILE="${PROFILE:-dodo_pmm_v2_like}"
MAX_POOLS="${MAX_POOLS:-200}"
while [[ $# -gt 0 ]]; do
case "$1" in
-o) OUT_PATH="$2"; shift 2 ;;
*) shift ;;
esac
done
[[ -z "${1:-}" ]] || true
[[ -n "$INT" ]] || { echo "DODO_PMM_INTEGRATION_ADDRESS not set"; exit 1; }
command -v cast &>/dev/null || { echo "cast (foundry) required"; exit 1; }
command -v jq &>/dev/null || { echo "jq required"; exit 1; }
# Fetch pool count: call allPools(length) by trying 0..MAX_POOLS (contract has allPools(uint256))
pools=()
for ((i=0; i<MAX_POOLS; i++)); do
addr=$(cast call "$INT" "allPools(uint256)(address)" "$i" --rpc-url "$RPC" 2>/dev/null | cast --to-addr 2>/dev/null || true)
[[ -n "$addr" && "$addr" != "0x0000000000000000000000000000000000000000" ]] || break
pools+=("$addr")
done
echo "Found ${#pools[@]} pools on Chain 138" >&2
# Build JSON array of pool entries
entries="[]"
for pool in "${pools[@]}"; do
# poolConfigs(pool) -> (pool, baseToken, quoteToken, lpFeeRate, i, k, isOpenTWAP, createdAt)
config=$(cast call "$INT" "poolConfigs(address)(address,address,address,uint256,uint256,uint256,bool,uint256)" "$pool" --rpc-url "$RPC" 2>/dev/null || true)
if [[ -z "$config" ]]; then
echo " Skip $pool (poolConfigs failed)" >&2
continue
fi
# cast may output "addr0 addr1 addr2 ..." or "( addr0 addr1 addr2 ..."; first=pool, second=base, third=quote
addrs=($(echo "$config" | grep -oE '0x[0-9a-fA-F]{40}' || true))
base="${addrs[1]:-}"
quote="${addrs[2]:-}"
[[ -n "$base" && -n "$quote" ]] || continue
name="pool-${pool:2:8}"
entry=$(jq -n \
--arg name "$name" \
--arg pool "$pool" \
--arg base "$base" \
--arg quote "$quote" \
--arg profile "$PROFILE" \
'{name: $name, pool_address: $pool, base_token: $base, quote_token: $quote, profile: $profile, limits: {max_slippage_bps: 50, max_single_tx_notional_usd: 2500, max_daily_notional_usd: 10000, cooldown_seconds: 1800, max_oracle_deviation_bps: 75, gas_cap_gwei: 35}}')
entries=$(echo "$entries" | jq --argjson e "$entry" '. + [$e]')
done
result=$(jq -n \
--arg chain "138" \
--argjson pools "$entries" \
'{chain: $chain, description: "Chain 138 (DeFi Oracle) DODO PMM pools. Auto-generated from DODOPMMIntegration.getAllPools/getPoolConfig. Set ALLOWLIST_PATH and CHAIN=138 when running MCP.", pools: $pools}')
if [[ -n "$OUT_PATH" ]]; then
mkdir -p "$(dirname "$OUT_PATH")"
echo "$result" | jq . > "$OUT_PATH"
echo "Wrote $OUT_PATH" >&2
else
echo "$result" | jq .
fi

View File

@@ -0,0 +1,71 @@
#!/usr/bin/env bash
# Generate MCP allowlist fragment for a public chain from deployment-status.json.
# Reads cross-chain-pmm-lps/config/deployment-status.json and outputs pools for the given chainId.
#
# Usage:
# ./scripts/generate-mcp-allowlist-from-deployment-status.sh <chain_id> # e.g. 137
# ./scripts/generate-mcp-allowlist-from-deployment-status.sh 137 -o fragment-137.json
#
# Output: JSON with "chain", "pools" (array of {name, pool_address, base_token, quote_token, profile}).
# pmmPools in deployment-status must have poolAddress (or pool_address), base, quote (or base_token, quote_token).
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
STATUS="${REPO_ROOT}/cross-chain-pmm-lps/config/deployment-status.json"
OUT_PATH=""
CHAIN_ID="${1:-}"
[[ -n "$CHAIN_ID" ]] || { echo "Usage: $0 <chain_id> [-o output.json]"; exit 1; }
shift || true
while [[ $# -gt 0 ]]; do
case "$1" in
-o) OUT_PATH="$2"; shift 2 ;;
*) shift ;;
esac
done
[[ -f "$STATUS" ]] || { echo "Not found: $STATUS"; exit 1; }
command -v jq &>/dev/null || { echo "jq required"; exit 1; }
# Read pmmPools for this chain; schema can be { "base", "quote", "poolAddress" } or { "base_token", "quote_token", "pool_address" }
pools_json=$(jq -c --arg c "$CHAIN_ID" '
.chains[$c].pmmPools // [] | map(
{
name: ("pool-" + (.["poolAddress"] // .pool_address // "?")[0:10]),
pool_address: (.["poolAddress"] // .pool_address),
base_token: (.["base"] // .base_token),
quote_token: (.["quote"] // .quote_token),
profile: (.["profile"] // "dodo_pmm_v2_like")
} | select(.pool_address != null and .base_token != null and .quote_token != null)
)
' "$STATUS" 2>/dev/null || echo "[]")
# Add default limits to each pool
with_limits=$(echo "$pools_json" | jq '
map(. + {
limits: {
max_slippage_bps: 50,
max_single_tx_notional_usd: 2500,
max_daily_notional_usd: 10000,
cooldown_seconds: 1800,
max_oracle_deviation_bps: 75,
gas_cap_gwei: 35
}
})
')
chain_name=$(jq -r --arg c "$CHAIN_ID" '.chains[$c].name // "Unknown"' "$STATUS")
result=$(jq -n \
--arg chain "$CHAIN_ID" \
--arg name "$chain_name" \
--argjson pools "$with_limits" \
'{chain: $chain, description: ("MCP allowlist for chain " + $chain + " (" + $name + ") from deployment-status.json. Use with multi-chain MCP or per-chain allowlist."), pools: $pools}')
if [[ -n "$OUT_PATH" ]]; then
mkdir -p "$(dirname "$OUT_PATH")"
echo "$result" | jq . > "$OUT_PATH"
echo "Wrote $OUT_PATH ($(echo "$pools_json" | jq 'length') pools)" >&2
else
echo "$result" | jq .
fi

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
# List single-sided PMM pools to create per public chain (for aggregator and DEX routing).
# Reads cross-chain-pmm-lps/config/pool-matrix.json and prints poolsFirst + poolsOptional per chain.
# Usage: ./scripts/list-single-sided-pools-by-chain.sh [chain_id]
# If chain_id is omitted, lists all chains. If provided, lists only that chain.
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
MATRIX="${REPO_ROOT}/cross-chain-pmm-lps/config/pool-matrix.json"
if [[ ! -f "$MATRIX" ]]; then
echo "Not found: $MATRIX"
exit 1
fi
if ! command -v jq &>/dev/null; then
echo "jq is required. Install with: apt-get install jq / brew install jq"
exit 1
fi
CHAIN="${1:-}"
list_chain() {
local cid="$1"
local name name_hub first optional
name=$(jq -r --arg c "$cid" '.chains[$c].name // "Unknown"' "$MATRIX")
name_hub=$(jq -r --arg c "$cid" '.chains[$c].hubStable // "?"' "$MATRIX")
echo "Chain $cid$name (hub: $name_hub)"
echo " poolsFirst (create these first):"
jq -r --arg c "$cid" '.chains[$c].poolsFirst[]?' "$MATRIX" 2>/dev/null | while read -r p; do
[[ -n "$p" ]] && echo " - $p"
done
echo " poolsOptional:"
jq -r --arg c "$cid" '.chains[$c].poolsOptional[]?' "$MATRIX" 2>/dev/null | while read -r p; do
[[ -n "$p" ]] && echo " - $p"
done
echo ""
}
if [[ -n "$CHAIN" ]]; then
if ! jq -e --arg c "$CHAIN" '.chains[$c]' "$MATRIX" &>/dev/null; then
echo "Chain $CHAIN not found in pool-matrix.json"
exit 1
fi
list_chain "$CHAIN"
else
for cid in $(jq -r '.chains | keys[]' "$MATRIX"); do
list_chain "$cid"
done
fi
echo "---"
echo "Source: $MATRIX"
echo "Use SINGLE_SIDED_LPS_PUBLIC_NETWORKS_RUNBOOK.md for deployment steps."

View File

@@ -0,0 +1,80 @@
#!/usr/bin/env bash
# Ensure Core RPC nodes 2101 and 2102 have TXPOOL and ADMIN (and DEBUG) in rpc-http-api and rpc-ws-api.
# Does NOT add txpool_besuClear/txpool_clear/admin_removeTransaction — Besu does not implement them.
# See: docs/04-configuration/CORE_RPC_2101_2102_TXPOOL_ADMIN_STATUS.md
#
# Usage: ./scripts/maintenance/ensure-core-rpc-config-2101-2102.sh [--dry-run] [--2101-only] [--2102-only]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Canonical API list for Core RPC (max that Besu supports for txpool + admin)
RPC_HTTP_API='["ETH","NET","WEB3","TXPOOL","QBFT","ADMIN","DEBUG","TRACE"]'
RPC_WS_API='["ETH","NET","WEB3","TXPOOL","QBFT","ADMIN"]'
VMID_2101=2101
VMID_2102=2102
HOST_2101="${PROXMOX_HOST_R630_01:-192.168.11.11}"
HOST_2102="${PROXMOX_HOST_ML110:-192.168.11.10}"
CONFIG_2101="/etc/besu/config-rpc-core.toml"
CONFIG_2102="/etc/besu/config-rpc.toml"
DRY_RUN=false
ONLY_2101=false
ONLY_2102=false
for a in "$@"; do
[[ "$a" == "--dry-run" ]] && DRY_RUN=true
[[ "$a" == "--2101-only" ]] && ONLY_2101=true
[[ "$a" == "--2102-only" ]] && ONLY_2102=true
done
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$1" "$2"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
ensure_apis() {
local vmid=$1
local host=$2
local config_path=$3
log_info "VMID $vmid ($host): ensuring $config_path has TXPOOL, ADMIN, DEBUG..."
if $DRY_RUN; then
echo " Would set rpc-http-api and rpc-ws-api to include TXPOOL, ADMIN, DEBUG, QBFT, TRACE (2101/2102)"
return 0
fi
# Pass API lists via env so quoting is safe; remote sed updates the config
run_ssh "$host" "pct exec $vmid -- env RPC_HTTP_API='$RPC_HTTP_API' RPC_WS_API='$RPC_WS_API' CFG='$config_path' bash -c '
set -e
[ -f \"\$CFG\" ] || { echo \"Config \$CFG not found\"; exit 1; }
cp \"\$CFG\" \"\${CFG}.bak.\$(date +%Y%m%d%H%M%S)\"
grep -q \"rpc-http-api\" \"\$CFG\" && sed -i \"s|^rpc-http-api=.*|rpc-http-api=\$RPC_HTTP_API|\" \"\$CFG\" || echo \"rpc-http-api=\$RPC_HTTP_API\" >> \"\$CFG\"
grep -q \"rpc-ws-api\" \"\$CFG\" && sed -i \"s|^rpc-ws-api=.*|rpc-ws-api=\$RPC_WS_API|\" \"\$CFG\" || echo \"rpc-ws-api=\$RPC_WS_API\" >> \"\$CFG\"
chown besu:besu \"\$CFG\" 2>/dev/null || true
echo OK
'" 2>/dev/null || { log_warn "VMID $vmid: SSH or config update failed"; return 1; }
log_ok "VMID $vmid: config updated"
log_info "Restarting besu-rpc on $vmid..."
run_ssh "$host" "pct exec $vmid -- systemctl restart besu-rpc 2>/dev/null || pct exec $vmid -- systemctl restart besu-rpc.service 2>/dev/null" || { log_warn "Restart failed for $vmid"; return 1; }
log_ok "VMID $vmid: besu-rpc restarted"
return 0
}
echo ""
echo "=== Ensure Core RPC 2101 / 2102 — TXPOOL + ADMIN (max Besu supports) ==="
echo " dry-run=$DRY_RUN 2101-only=$ONLY_2101 2102-only=$ONLY_2102"
echo " Note: txpool_besuClear, txpool_clear, admin_removeTransaction are NOT in Besu; use clear-all-transaction-pools.sh to clear stuck txs."
echo ""
if [[ "$ONLY_2102" != true ]]; then
ensure_apis "$VMID_2101" "$HOST_2101" "$CONFIG_2101" || true
fi
if [[ "$ONLY_2101" != true ]]; then
ensure_apis "$VMID_2102" "$HOST_2102" "$CONFIG_2102" || true
fi
echo ""
echo "Done. Verify: ./scripts/maintenance/health-check-rpc-2101.sh and curl to 192.168.11.212:8545 for 2102."
echo "Ref: docs/04-configuration/CORE_RPC_2101_2102_TXPOOL_ADMIN_STATUS.md"

View File

@@ -0,0 +1,84 @@
#!/usr/bin/env bash
# Staggered restart of Chain 138 validators to restore block production without losing quorum.
# When all 5 validators are restarted at once (e.g. clear-all-transaction-pools), they can all
# enter "full sync" and no node is at head to produce blocks. Restarting one at a time lets
# the rest stay at head so the restarted node syncs quickly and consensus can continue.
#
# Usage: ./scripts/maintenance/fix-block-production-staggered-restart.sh [--dry-run]
# Requires: SSH to Proxmox hosts (192.168.11.10 ML110, 192.168.11.11 R630-01, 192.168.11.12 R630-02)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_ok() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
# Order: restart one at a time; wait between so restarted node can sync from others
# VMID : host
VALIDATORS=(
"1004:${PROXMOX_HOST_ML110:-192.168.11.10}"
"1003:${PROXMOX_HOST_ML110:-192.168.11.10}"
"1002:${PROXMOX_HOST_R630_01:-192.168.11.11}"
"1001:${PROXMOX_HOST_R630_01:-192.168.11.11}"
"1000:${PROXMOX_HOST_R630_01:-192.168.11.11}"
)
WAIT_BETWEEN=90
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
get_block() {
curl -s -m 5 -X POST -H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' "$RPC" 2>/dev/null | jq -r '.result // "0x0"'
}
echo "=== Staggered validator restart (fix block production) ==="
echo " RPC: $RPC"
echo " Wait between restarts: ${WAIT_BETWEEN}s"
$DRY_RUN && echo " (DRY RUN - no restarts)"
echo ""
BLOCK_BEFORE=$(get_block)
log_info "Block before: $BLOCK_BEFORE"
for entry in "${VALIDATORS[@]}"; do
IFS=: read -r vmid host <<< "$entry"
log_info "Restarting validator $vmid on $host..."
if $DRY_RUN; then
echo " Would: ssh root@$host 'pct exec $vmid -- systemctl restart besu-validator'"
else
# Allow up to 120s for restart (Besu stop/start can take 1-2 min)
if timeout 120 ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$host" "pct exec $vmid -- systemctl restart besu-validator" 2>/dev/null; then
log_ok " $vmid restarted"
else
log_warn " $vmid restart timed out or failed (node may still be restarting)"
fi
fi
if ! $DRY_RUN && [[ "$vmid" != "1000" ]]; then
log_info " Waiting ${WAIT_BETWEEN}s for node to rejoin and sync..."
sleep "$WAIT_BETWEEN"
fi
done
if ! $DRY_RUN; then
log_info "Waiting 30s then checking block production..."
sleep 30
BLOCK_AFTER=$(get_block)
log_info "Block after: $BLOCK_AFTER"
echo ""
echo "Run monitor to confirm blocks are advancing:"
echo " ./scripts/monitoring/monitor-blockchain-health.sh"
echo " watch -n 5 'cast block-number --rpc-url $RPC'"
fi
log_ok "Done."

View File

@@ -0,0 +1,60 @@
#!/usr/bin/env bash
# Suggest load-balancing migrations: show current load and example commands to move
# containers from r630-01 to r630-02 (or ml110). Run from project root.
#
# Usage: bash scripts/maintenance/proxmox-load-balance-suggest.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
SSH_OPTS="-o ConnectTimeout=8 -o StrictHostKeyChecking=no"
# Candidates safe to suggest (r630-01 -> r630-02). Excludes NPMplus main, core RPC, validators, sentries, DBIS core.
CANDIDATES="3500 3501 7804 8640 8642 10232 10235 10236"
echo ""
echo "=== Proxmox load balance — suggestion ==="
echo ""
# Current load and counts
for entry in "r630-01:$R630_01" "r630-02:$R630_02" "ml110:$ML110"; do
IFS=: read -r name ip <<< "$entry"
out=$(ssh $SSH_OPTS root@"$ip" "
echo \"LOAD|\$(cat /proc/loadavg 2>/dev/null | cut -d' ' -f1-3)\"
echo \"LXC|\$(pct list 2>/dev/null | tail -n +2 | wc -l)\"
" 2>/dev/null) || true
load=$(echo "$out" | awk -F'|' '$1=="LOAD"{print $2}')
lxc=$(echo "$out" | awk -F'|' '$1=="LXC"{print $2}')
printf " %-10s %s LXC: %s\n" "$name" "load: $load" "$lxc"
done
echo ""
echo "--- Suggested migrations (r630-01 → r630-02) ---"
echo "Run from project root. Use --dry-run first. Target storage on r630-02: thin1, thin2, thin5, thin6."
echo ""
for vmid in $CANDIDATES; do
# Check if CT exists on r630-01
on_src=$(ssh $SSH_OPTS root@"$R630_01" "pct list 2>/dev/null | awk '\$1==$vmid{print \$1}'" 2>/dev/null) || true
if [[ -n "$on_src" ]]; then
name=$(ssh $SSH_OPTS root@"$R630_01" "pct config $vmid 2>/dev/null | grep -E '^hostname:|^name:' | head -1 | sed 's/^[^:]*:[[:space:]]*//'" 2>/dev/null) || echo "CT-$vmid"
echo " VMID $vmid ($name):"
echo " ./scripts/maintenance/migrate-ct-r630-01-to-r630-02.sh $vmid thin1 --dry-run"
echo " ./scripts/maintenance/migrate-ct-r630-01-to-r630-02.sh $vmid thin1 --destroy-source"
echo ""
fi
done
echo "--- Cluster check (optional) ---"
echo "If nodes are in the same cluster, you can try live migrate from r630-01:"
echo " ssh root@$R630_01 \"pvecm status\""
echo " ssh root@$R630_01 \"pct migrate <VMID> r630-02 --storage thin1 --restart\""
echo ""
echo "See: docs/04-configuration/PROXMOX_LOAD_BALANCING_RUNBOOK.md"
echo ""

View File

@@ -0,0 +1,76 @@
#!/usr/bin/env bash
# Merge single-chain allowlists into one multi-chain allowlist for the MCP server.
# Output format: { "description": "...", "chains": [ { "chainId": "138", "pools": [...] }, ... ] }
#
# Usage:
# ./scripts/merge-mcp-allowlist-multichain.sh -o ai-mcp-pmm-controller/config/allowlist-multichain.json
# ALLOWLIST_138=path/to/allowlist-138.json CHAIN_IDS="138 137 1" ./scripts/merge-mcp-allowlist-multichain.sh -o out.json
#
# If ALLOWLIST_138 is not set, runs generate-mcp-allowlist-from-chain138.sh to get Chain 138 pools.
# For other chain IDs, runs generate-mcp-allowlist-from-deployment-status.sh and merges.
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$REPO_ROOT"
OUT_PATH=""
ALLOWLIST_138="${ALLOWLIST_138:-}"
CHAIN_IDS="${CHAIN_IDS:-138}"
DEPLOYMENT_STATUS="${DEPLOYMENT_STATUS:-$REPO_ROOT/cross-chain-pmm-lps/config/deployment-status.json}"
while [[ $# -gt 0 ]]; do
case "$1" in
-o) OUT_PATH="$2"; shift 2 ;;
*) shift ;;
esac
done
command -v jq &>/dev/null || { echo "jq required"; exit 1; }
TMP_DIR=$(mktemp -d)
trap 'rm -rf "$TMP_DIR"' EXIT
# Build chains array
CHAIN_OBJS=()
# Chain 138
if [[ -n "$ALLOWLIST_138" && -f "$ALLOWLIST_138" ]]; then
jq -c '{ chainId: "138", pools: .pools }' "$ALLOWLIST_138" > "$TMP_DIR/138.json"
CHAIN_OBJS+=("$TMP_DIR/138.json")
else
GEN_138="$REPO_ROOT/scripts/generate-mcp-allowlist-from-chain138.sh"
if [[ -x "$GEN_138" ]]; then
"$GEN_138" 2>/dev/null | jq -c '{ chainId: "138", pools: .pools }' > "$TMP_DIR/138.json" || true
[[ -s "$TMP_DIR/138.json" ]] && CHAIN_OBJS+=("$TMP_DIR/138.json")
fi
fi
# Other chains from deployment-status
for cid in $CHAIN_IDS; do
[[ "$cid" == "138" ]] && continue
FRAG="$TMP_DIR/$cid.json"
if "$REPO_ROOT/scripts/generate-mcp-allowlist-from-deployment-status.sh" "$cid" 2>/dev/null | jq -c --arg c "$cid" '{ chainId: $c, pools: .pools }' > "$FRAG" 2>/dev/null && [[ -s "$FRAG" ]]; then
CHAIN_OBJS+=("$FRAG")
fi
done
# Merge: read all chain objects into a jq array
if [[ ${#CHAIN_OBJS[@]} -eq 0 ]]; then
CHAINS_JSON="[]"
else
CHAINS_JSON=$(jq -s '.' "${CHAIN_OBJS[@]}")
fi
RESULT=$(jq -n --argjson chains "$CHAINS_JSON" '{
description: "Multi-chain MCP allowlist. Set ALLOWLIST_PATH to this file; set RPC_138, RPC_137, etc. or RPC_BY_CHAIN_PATH.",
chains: $chains
}')
if [[ -n "$OUT_PATH" ]]; then
mkdir -p "$(dirname "$OUT_PATH")"
echo "$RESULT" | jq . > "$OUT_PATH"
echo "Wrote $OUT_PATH (chains: $(echo "$CHAINS_JSON" | jq 'length'))" >&2
else
echo "$RESULT" | jq .
fi

9
scripts/mint-all-c-star-138.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
# Wrapper: run smom-dbis-138/scripts/mint-all-c-star-138.sh from repo root.
# Usage: ./scripts/mint-all-c-star-138.sh [amount_human]
# Example: ./scripts/mint-all-c-star-138.sh 1000000
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
SMOM="${REPO_ROOT}/smom-dbis-138"
[ -d "$SMOM" ] || { echo "Not found: $SMOM"; exit 1; }
exec bash "$SMOM/scripts/mint-all-c-star-138.sh" "$@"

8
scripts/mint-cw-on-chain.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/usr/bin/env bash
# Wrapper: run smom-dbis-138 mint-cw script from repo root.
# Usage: ./scripts/mint-cw-on-chain.sh <CHAIN_NAME> [amount_human]
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
SMOM="${REPO_ROOT}/smom-dbis-138"
[ -d "$SMOM" ] || { echo "Not found: $SMOM"; exit 1; }
exec bash "$SMOM/scripts/mint-cw-on-chain.sh" "$@"

9
scripts/mint-for-liquidity.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
# Wrapper: run smom-dbis-138/scripts/mint-for-liquidity.sh from repo root.
# Usage: ./scripts/mint-for-liquidity.sh [--add-liquidity]
# Example: ./scripts/mint-for-liquidity.sh --add-liquidity
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
SMOM="${REPO_ROOT}/smom-dbis-138"
[ -d "$SMOM" ] || { echo "Not found: $SMOM"; exit 1; }
exec bash "$SMOM/scripts/mint-for-liquidity.sh" "$@"

View File

@@ -19,15 +19,22 @@ PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
_orig_npm_url="${NPM_URL:-}"
_orig_npm_email="${NPM_EMAIL:-}"
_orig_npm_password="${NPM_PASSWORD:-}"
# Load dotenv: repo root .env then smom-dbis-138/.env (operator creds)
if [ -f "$PROJECT_ROOT/.env" ]; then
set +u
# shellcheck source=/dev/null
source "$PROJECT_ROOT/.env"
set -u
[ -n "$_orig_npm_url" ] && NPM_URL="$_orig_npm_url"
[ -n "$_orig_npm_email" ] && NPM_EMAIL="$_orig_npm_email"
[ -n "$_orig_npm_password" ] && NPM_PASSWORD="$_orig_npm_password"
fi
if [ -f "$PROJECT_ROOT/smom-dbis-138/.env" ]; then
set +u
# shellcheck source=/dev/null
source "$PROJECT_ROOT/smom-dbis-138/.env"
set -u
fi
[ -n "$_orig_npm_url" ] && NPM_URL="$_orig_npm_url"
[ -n "$_orig_npm_email" ] && NPM_EMAIL="$_orig_npm_email"
[ -n "$_orig_npm_password" ] && NPM_PASSWORD="$_orig_npm_password"
[ -f "$PROJECT_ROOT/config/ip-addresses.conf" ] && source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
# Default .167: NPMplus (VMID 10233) reachable on ${IP_NPMPLUS:-${IP_NPMPLUS:-192.168.11.167}}:81; set NPM_URL in .env to override

View File

@@ -19,6 +19,7 @@ Scripts for the **OMNL** tenancy ([omnl.hybxfinance.io](https://omnl.hybxfinance
| **omnl-office2-access-security-test.sh** | Security test: office-2 user must not see other offices data or achieve path traversal/command injection. Set office-2 user and password (e.g. `OMNL_OFFICE2_TEST_USER`, `OMNL_OFFICE2_TEST_PASSWORD`). See [OMNL_OFFICE_2_ACCESS_SECURITY_TEST.md](../../docs/04-configuration/mifos-omnl-central-bank/OMNL_OFFICE_2_ACCESS_SECURITY_TEST.md). |
| **omnl-office-create-samama.sh** | Create Office for Samama Group LLC (Azerbaijan) and post 5B USD M1 from Head Office (Phase C pattern: HO Dr 2100 Cr 2410; office Dr 1410 Cr 2100). Idempotent by externalId. `SKIP_TRANSFER=1` to create office only. See [SAMAMA_OFFICE_AND_5B_M1_TRANSFER.md](../../docs/04-configuration/mifos-omnl-central-bank/SAMAMA_OFFICE_AND_5B_M1_TRANSFER.md). |
| **omnl-office-create-pelican.sh** | Create Office for Pelican Motors And Finance LLC (Chalmette, LA). Idempotent by externalId `PEL-MOTORS-CHALMETTE-LA`. Use with omnl.hybx.global by setting `OMNL_FINERACT_BASE_URL`. See [PELICAN_MOTORS_OFFICE_RUNBOOK.md](../../docs/04-configuration/mifos-omnl-central-bank/PELICAN_MOTORS_OFFICE_RUNBOOK.md). |
| **omnl-office-create-adf-singapore.sh** | Create Office for ADF ASIAN PACIFIC HOLDING SINGAPORE PTE LTD (child of OMNL Head Office). Idempotent by externalId `202328126M`. See [ADF_ASIAN_PACIFIC_SINGAPORE_OFFICE_RUNBOOK.md](../../docs/04-configuration/mifos-omnl-central-bank/ADF_ASIAN_PACIFIC_SINGAPORE_OFFICE_RUNBOOK.md). |
| **resolve_ids.sh** | Resolve GL IDs (1410, 2100, 2410) and payment type; write `ids.env`. Run before closures/reconciliation/templates. See [OPERATING_RAILS.md](../../docs/04-configuration/mifos-omnl-central-bank/OPERATING_RAILS.md). |
| **omnl-gl-closures-post.sh** | Post GL closures for Office 20 and HO (idempotent). `CLOSING_DATE=yyyy-MM-dd`, `DRY_RUN=1`. See [OPERATING_RAILS.md](../../docs/04-configuration/mifos-omnl-central-bank/OPERATING_RAILS.md). |
| **omnl-reconciliation-office20.sh** | Snapshot Office 20 (offices + GL + trial balance), timestamp, sha256. `OUT_DIR=./reconciliation`. See [OPERATING_RAILS.md](../../docs/04-configuration/mifos-omnl-central-bank/OPERATING_RAILS.md). |
@@ -107,6 +108,10 @@ bash scripts/omnl/omnl-office-create-samama.sh
# Pelican Motors And Finance LLC — create office (omnl.hybx.global or omnl.hybxfinance.io)
DRY_RUN=1 bash scripts/omnl/omnl-office-create-pelican.sh
bash scripts/omnl/omnl-office-create-pelican.sh
# ADF Asian Pacific Holding Singapore Pte Ltd — create office (child of OMNL Head Office, externalId 202328126M)
DRY_RUN=1 bash scripts/omnl/omnl-office-create-adf-singapore.sh
bash scripts/omnl/omnl-office-create-adf-singapore.sh
```
**Requirements:** `curl`, `jq` (for ledger posting and pretty-print in discovery).

View File

@@ -0,0 +1,75 @@
#!/usr/bin/env bash
# OMNL Fineract — Create one Office for ADF ASIAN PACIFIC HOLDING SINGAPORE PTE LTD (child of OMNL Head Office).
# Uses Fineract POST /offices (name, parentId, openingDate, externalId).
# See docs/04-configuration/mifos-omnl-central-bank/ADF_ASIAN_PACIFIC_SINGAPORE_OFFICE_RUNBOOK.md
#
# Usage: run from repo root.
# OPENING_DATE=2023-07-11 (default)
# DRY_RUN=1 to print payload only, do not POST.
#
# For omnl.hybx.global set in .env:
# OMNL_FINERACT_BASE_URL=https://omnl.hybx.global/fineract-provider/api/v1
#
# Requires: curl, jq.
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
DRY_RUN="${DRY_RUN:-0}"
OPENING_DATE="${OPENING_DATE:-2023-07-11}"
ADF_SINGAPORE_EXTERNAL_ID="${ADF_SINGAPORE_EXTERNAL_ID:-202328126M}"
ADF_SINGAPORE_OFFICE_NAME="${ADF_SINGAPORE_OFFICE_NAME:-ADF ASIAN PACIFIC HOLDING SINGAPORE PTE LTD}"
if [ -f "${REPO_ROOT}/omnl-fineract/.env" ]; then
set +u
source "${REPO_ROOT}/omnl-fineract/.env" 2>/dev/null || true
set -u
elif [ -f "${REPO_ROOT}/.env" ]; then
set +u
source "${REPO_ROOT}/.env" 2>/dev/null || true
set -u
fi
BASE_URL="${OMNL_FINERACT_BASE_URL:-}"
TENANT="${OMNL_FINERACT_TENANT:-omnl}"
USER="${OMNL_FINERACT_USER:-app.omnl}"
PASS="${OMNL_FINERACT_PASSWORD:-}"
if [ -z "$BASE_URL" ] || [ -z "$PASS" ]; then
echo "Set OMNL_FINERACT_BASE_URL and OMNL_FINERACT_PASSWORD (e.g. omnl-fineract/.env or .env)." >&2
echo "For omnl.hybx.global use: OMNL_FINERACT_BASE_URL=https://omnl.hybx.global/fineract-provider/api/v1" >&2
exit 1
fi
CURL_OPTS=(-s -S -H "Fineract-Platform-TenantId: ${TENANT}" -H "Content-Type: application/json" -u "${USER}:${PASS}")
# Resolve existing office by externalId (idempotent)
offices_json=$(curl "${CURL_OPTS[@]}" "${BASE_URL}/offices" 2>/dev/null)
existing_id=$(echo "$offices_json" | jq -r --arg e "$ADF_SINGAPORE_EXTERNAL_ID" '.[] | select(.externalId == $e) | .id' 2>/dev/null | head -1)
if [ -n "$existing_id" ] && [ "$existing_id" != "null" ]; then
echo "ADF Asian Pacific Singapore office already exists: officeId=$existing_id (externalId=$ADF_SINGAPORE_EXTERNAL_ID)" >&2
echo "OFFICE_ID_ADF_SINGAPORE=$existing_id"
exit 0
fi
payload=$(jq -n \
--arg name "$ADF_SINGAPORE_OFFICE_NAME" \
--arg openingDate "$OPENING_DATE" \
--arg externalId "$ADF_SINGAPORE_EXTERNAL_ID" \
'{ name: $name, parentId: 1, openingDate: $openingDate, externalId: $externalId, dateFormat: "yyyy-MM-dd", locale: "en" }')
if [ "$DRY_RUN" = "1" ]; then
echo "DRY_RUN: would POST /offices with name=$ADF_SINGAPORE_OFFICE_NAME externalId=$ADF_SINGAPORE_EXTERNAL_ID openingDate=$OPENING_DATE" >&2
echo "Payload: $payload" >&2
exit 0
fi
res=$(curl "${CURL_OPTS[@]}" -X POST -d "$payload" "${BASE_URL}/offices" 2>/dev/null) || true
if echo "$res" | jq -e '.resourceId // .officeId' >/dev/null 2>&1; then
ADF_OFFICE_ID=$(echo "$res" | jq -r '.resourceId // .officeId')
echo "Created ADF Asian Pacific Singapore office: officeId=$ADF_OFFICE_ID" >&2
echo "OFFICE_ID_ADF_SINGAPORE=$ADF_OFFICE_ID"
else
echo "Failed to create office: $res" >&2
exit 1
fi

View File

@@ -12,7 +12,9 @@ source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
PROJECT_ROOT_SCRIPT="$(cd "$SCRIPT_DIR/.." && pwd)"
SOURCE_PROJECT="${SOURCE_PROJECT:-${PROJECT_ROOT_SCRIPT}/smom-dbis-138}"
[[ -d "$SOURCE_PROJECT" ]] || SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env bash
# Run operator tasks from a host on LAN with access to .env (PRIVATE_KEY, NPM_PASSWORD, etc.).
# Run operator tasks from a host on LAN. Always loads dotenv (PRIVATE_KEY, NPM_PASSWORD, etc.) from repo .env and smom-dbis-138/.env.
# Optional: contract deploy, Blockscout verify, backup, Proxmox VM/container creation.
#
# Usage:
@@ -17,6 +17,12 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
# Always load dotenv so Operator/LAN has NPM_PASSWORD, PRIVATE_KEY, RPC, etc.
if [[ -f "$SCRIPT_DIR/lib/load-project-env.sh" ]]; then
# shellcheck source=scripts/lib/load-project-env.sh
source "$SCRIPT_DIR/lib/load-project-env.sh"
fi
DRY_RUN=false
SKIP_BACKUP=false
SKIP_VERIFY=false
@@ -62,32 +68,27 @@ echo ""
# 2) Blockscout verification
if [[ "$SKIP_VERIFY" != true ]]; then
if [[ "$DRY_RUN" == true ]]; then
echo "[DRY-RUN] Would run: source smom-dbis-138/.env; ./scripts/verify/run-contract-verification-with-proxy.sh"
echo "[DRY-RUN] Would run: ./scripts/verify/run-contract-verification-with-proxy.sh (dotenv already loaded)"
else
log_info "Blockscout source verification..."
([[ -f smom-dbis-138/.env ]] && source smom-dbis-138/.env 2>/dev/null; bash "$SCRIPT_DIR/verify/run-contract-verification-with-proxy.sh") || log_warn "Blockscout verify skipped (env or script failed)"
(bash "$SCRIPT_DIR/verify/run-contract-verification-with-proxy.sh") || log_warn "Blockscout verify skipped (env or script failed)"
fi
echo ""
fi
# 3) Optional: contract deployment
# 3) Optional: contract deployment (PRIVATE_KEY from dotenv already loaded above)
if [[ "$DO_DEPLOY" == true ]]; then
if [[ "$DRY_RUN" == true ]]; then
echo "[DRY-RUN] Would run: smom-dbis-138 deploy-all-phases.sh (and deploy-transaction-mirror-chain138.sh if needed)"
else
if [[ -f smom-dbis-138/.env ]]; then
source smom-dbis-138/.env 2>/dev/null || true
if [[ -n "${PRIVATE_KEY:-}" ]]; then
if [[ -n "${PRIVATE_KEY:-}" ]]; then
log_info "Contract deployment (phased)..."
(cd smom-dbis-138 && ./scripts/deployment/deploy-all-phases.sh) && log_ok "Phased deploy done" || log_warn "Phased deploy failed (may already be deployed)"
log_info "TransactionMirror (if needed)..."
bash "$SCRIPT_DIR/deployment/deploy-transaction-mirror-chain138.sh" 2>/dev/null && log_ok "TransactionMirror deployed" || log_warn "TransactionMirror skipped or failed (add TRANSACTION_MIRROR_ADDRESS to .env if deployed)"
else
log_warn "PRIVATE_KEY not set; skipping deploy"
log_warn "PRIVATE_KEY not set; set in smom-dbis-138/.env or .env and re-run"
fi
else
log_warn "smom-dbis-138/.env not found; skipping deploy"
fi
fi
echo ""
fi

View File

@@ -0,0 +1,70 @@
#!/usr/bin/env bash
# Run fix-all steps that can be automated from the LAN operator machine.
# Manual steps (Windows hosts, UDM Pro hairpin, Alltra/HYBX) are printed at the end.
# Usage: bash scripts/run-fix-all-from-lan.sh [--verify]
# --verify also run full verification (can take several minutes)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
RUN_VERIFY=false
[[ "${1:-}" == "--verify" ]] && RUN_VERIFY=true
cd "$PROJECT_ROOT"
echo "=== Fix All From LAN ==="
echo ""
# 1. Explorer hosts (this machine)
echo "--- 1. Explorer (explorer.d-bis.org) ---"
if grep -q "explorer.d-bis.org" /etc/hosts 2>/dev/null; then
echo "OK: /etc/hosts already has an entry for explorer.d-bis.org"
grep "explorer.d-bis.org" /etc/hosts
else
echo "Add to /etc/hosts (run with sudo):"
echo " echo '192.168.11.140 explorer.d-bis.org' | sudo tee -a /etc/hosts"
fi
if curl -sI -o /dev/null -w "%{http_code}" --connect-timeout 5 "https://explorer.d-bis.org/" 2>/dev/null | grep -q 200; then
echo "OK: https://explorer.d-bis.org/ returns 200 from this host"
else
echo "WARN: https://explorer.d-bis.org/ did not return 200; add hosts or check network"
fi
echo ""
# 2. Env permissions
echo "--- 2. Env permissions ---"
if [ -f "scripts/security/secure-env-permissions.sh" ]; then
bash scripts/security/secure-env-permissions.sh
else
echo "SKIP: scripts/security/secure-env-permissions.sh not found"
fi
echo ""
# 3. Optional: full verification
if [[ "$RUN_VERIFY" == true ]]; then
echo "--- 3. Full verification ---"
if [ -f "scripts/verify/run-full-verification.sh" ]; then
bash scripts/verify/run-full-verification.sh
else
echo "SKIP: scripts/verify/run-full-verification.sh not found"
fi
else
echo "--- 3. Full verification (skipped) ---"
echo "Run with --verify to run: bash scripts/verify/run-full-verification.sh"
fi
echo ""
# 4. Manual steps
echo "=== Manual steps (see docs/05-network/FIX_ALL_ISSUES_RUNBOOK.md) ==="
echo ""
echo "• Windows browser: Add to C:\\Windows\\System32\\drivers\\etc\\hosts (as Admin):"
echo " 192.168.11.140 explorer.d-bis.org"
echo " Then: ipconfig /flushdns"
echo ""
echo "• UDM Pro: Enable NAT loopback (hairpin) so all LAN clients can use explorer.d-bis.org without hosts."
echo ""
echo "• UDM Pro port forward: 76.53.10.36:80/443 → 192.168.11.167 (for external access)."
echo ""
echo "• Alltra/HYBX: Port forward 76.53.10.38 → 192.168.11.169; fix 502s per docs/04-configuration/FIXES_PREPARED.md"
echo ""

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# Run operator tasks that REQUIRE being on LAN and/or having NPM_PASSWORD, PRIVATE_KEY.
# Run from a host on the same LAN as NPMplus (192.168.11.x) with .env loaded.
# Usage: source .env 2>/dev/null; ./scripts/run-operator-tasks-from-lan.sh [--dry-run] [--skip-backup] [--skip-verify]
# Always loads dotenv from repo .env and smom-dbis-138/.env (no need to source before running).
# Usage: ./scripts/run-operator-tasks-from-lan.sh [--dry-run] [--skip-backup] [--skip-verify]
set -euo pipefail
@@ -9,6 +9,12 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
# Always load dotenv so Operator/LAN has NPM_PASSWORD, PRIVATE_KEY, etc.
if [[ -f "$SCRIPT_DIR/lib/load-project-env.sh" ]]; then
# shellcheck source=scripts/lib/load-project-env.sh
source "$SCRIPT_DIR/lib/load-project-env.sh"
fi
DRY_RUN=false
SKIP_BACKUP=false
SKIP_VERIFY=false

View File

@@ -2,6 +2,7 @@
# Comprehensive verification of all deployed systems
# Tests: Explorer, APIs, Services, MetaMask integration
# Runs all tests even if some fail; exits 1 only if any failed
# Note: 301/404/000 in other checks often expected (HTTPS redirect, wrong port, NPMplus). See docs/04-configuration/DETAILED_GAPS_AND_ISSUES_LIST.md §11a.
set -uo pipefail
@@ -25,10 +26,15 @@ test_endpoint() {
local url="$2"
local expected="$3"
local timeout="${4:-10}"
local follow_redirects="${5:-false}"
echo -n "Testing $name... "
local body
body=$(curl -s --max-time "$timeout" "$url" 2>/dev/null) || true
if [[ "$follow_redirects" == "true" ]]; then
body=$(curl -sL --max-time "$timeout" "$url" 2>/dev/null) || true
else
body=$(curl -s --max-time "$timeout" "$url" 2>/dev/null) || true
fi
if echo "$body" | grep -qE "$expected"; then
echo -e "${GREEN}PASS${NC}"
((PASSED++)) || true
@@ -44,7 +50,7 @@ echo "========================================="
echo ""
echo "1. Explorer (Blockscout) - Public"
test_endpoint "Explorer homepage" "https://explorer.d-bis.org/" "SolaceScanScout|Blockscout|blockscout|<!DOCTYPE" 25
test_endpoint "Explorer homepage" "https://explorer.d-bis.org/" "SolaceScanScout|Blockscout|blockscout|<!DOCTYPE html>|<html " 25 "true"
test_endpoint "Explorer stats API" "https://explorer.d-bis.org/api/v2/stats" "total_blocks"
test_endpoint "Explorer blocks API" "https://explorer.d-bis.org/api/v2/blocks" "height|items"
echo ""

View File

@@ -22,12 +22,17 @@ log_error() { echo -e "${RED}[✗]${NC} $1"; }
cd "$PROJECT_ROOT"
# Source .env
# Source dotenv (operator creds): repo root .env then smom-dbis-138/.env
if [ -f .env ]; then
set +euo pipefail
source .env 2>/dev/null || true
set -euo pipefail
fi
if [ -f smom-dbis-138/.env ]; then
set +euo pipefail
source smom-dbis-138/.env 2>/dev/null || true
set -euo pipefail
fi
# Load ip-addresses.conf for fallbacks (before cd)
[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true

View File

@@ -192,6 +192,8 @@ verify_vm() {
fi
# Health check endpoints
# Note: 301 = HTTPS redirect (normal); 404 = wrong port/path or NPMplus; 000 = no connection (host/firewall/context).
# See docs/04-configuration/DETAILED_GAPS_AND_ISSUES_LIST.md §11a.
HEALTH_ENDPOINTS=()
if [ "$status" = "running" ] && [ -n "$actual_ip" ]; then
# Test HTTP endpoints (nginx and web both use port 80)