diff --git a/.gitignore b/.gitignore index 4babe24..0045c58 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,9 @@ docs/04-configuration/coingecko/logos/*.png # Ephemeral phase markers .phase1-event-status +# DBIS Phase 1 discovery — timestamped reports (run scripts/verify/run-phase1-discovery.sh) +reports/phase1-discovery/phase1-discovery-*.md + # OMNL operator rail (env-specific IDs, reconciliation, audit packets, posted refs) ids.env reconciliation/ diff --git a/AGENTS.md b/AGENTS.md index 729580f..681d631 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -17,6 +17,7 @@ Orchestration for Proxmox VE, Chain 138 (`smom-dbis-138/`), explorers, NPMplus, | Ops template + JSON | `docs/03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md`, `config/proxmox-operational-template.json` | | Live vs template (read-only SSH) | `bash scripts/verify/audit-proxmox-operational-template.sh` | | Config validation | `bash scripts/validation/validate-config-files.sh` | +| FQDN / NPM E2E verifier | `bash scripts/verify/verify-end-to-end-routing.sh --profile=public` — inventory: `docs/04-configuration/E2E_ENDPOINTS_LIST.md`. Gitea Actions URLs (no API): `bash scripts/verify/print-gitea-actions-urls.sh` | | Submodule trees clean (CI / post-merge) | `bash scripts/verify/submodules-clean.sh` | | Submodule + explorer remotes | `docs/00-meta/SUBMODULE_HYGIENE.md` | | smom-dbis-138 `.env` in bash scripts | Prefer `source smom-dbis-138/scripts/lib/deployment/dotenv.sh` + `load_deployment_env --repo-root "$PROJECT_ROOT"` (trims RPC URL line endings). From an interactive shell: `source smom-dbis-138/scripts/load-env.sh`. Proxmox root scripts: `source scripts/lib/load-project-env.sh` (also trims common RPC vars). | diff --git a/config/proxmox-operational-template.json b/config/proxmox-operational-template.json index 004c210..95899b1 100644 --- a/config/proxmox-operational-template.json +++ b/config/proxmox-operational-template.json @@ -800,6 +800,8 @@ "ipv4": "192.168.11.65", "preferred_node": "r630-02", "category": "dlt", + "runtime_state": "reserved_placeholder_stopped", + "notes": "As of 2026-03-28 this CT has been reclassified as a reserved placeholder and stopped. Earlier app-native checks found no active Fabric peer/orderer/couchdb processes, no expected listeners, and no meaningful Fabric payload under /opt, /etc, or /var.", "ports": [ { "port": 7051 @@ -816,6 +818,8 @@ "ipv4": "192.168.11.64", "preferred_node": "r630-02", "category": "dlt", + "runtime_state": "reserved_placeholder_stopped", + "notes": "As of 2026-03-28 this CT has been reclassified as a reserved placeholder and stopped. Earlier app-native checks found no active Indy processes, no expected listeners, and no meaningful Indy payload under /opt, /etc, or /var.", "ports": [ { "port": 9701, @@ -830,6 +834,8 @@ "ipv4": "192.168.11.35", "preferred_node": "r630-02", "category": "firefly", + "runtime_state": "active_minimal_gateway", + "notes": "Restored 2026-03-28 as a minimal local FireFly gateway on ghcr.io/hyperledger/firefly:v1.2.0; API, Postgres, and IPFS checks passed.", "ports": [ { "port": 80 @@ -849,6 +855,8 @@ "ipv4": "192.168.11.57", "preferred_node": "r630-02", "category": "firefly", + "runtime_state": "retired_standby_until_rebuilt", + "notes": "CT exists in inventory only. As of 2026-03-28 it is stopped, its rootfs is effectively empty, and no valid FireFly deployment payload is present. Do not treat as an active secondary node.", "ports": [ { "port": 80 @@ -1130,6 +1138,60 @@ "mifos.d-bis.org" ] }, + { + "vmid": 5802, + "hostname": "rtgs-scsm-1", + "ipv4": "192.168.11.89", + "preferred_node": "r630-02", + "category": "rtgs-sidecar", + "runtime_state": "active_internal_health_ok", + "notes": "Deployed 2026-03-28/29 as the DBIS RTGS SCSM sidecar. systemd service active, local Redis active, and /actuator/health returned UP. Live Fineract reachability to 5800 is confirmed at the HTTP layer; authenticated production flow still requires final tenant/auth freeze.", + "ports": [ + { + "port": 8080 + }, + { + "port": 6379 + } + ], + "fqdns": [] + }, + { + "vmid": 5803, + "hostname": "rtgs-funds-1", + "ipv4": "192.168.11.90", + "preferred_node": "r630-02", + "category": "rtgs-sidecar", + "runtime_state": "active_internal_health_ok", + "notes": "Deployed 2026-03-28/29 as the DBIS RTGS server-funds sidecar. systemd service active, local Redis active, and /actuator/health returned UP. Live Fineract reachability to 5800 is confirmed at the HTTP layer; authenticated production flow still requires final tenant/auth freeze.", + "ports": [ + { + "port": 8080 + }, + { + "port": 6379 + } + ], + "fqdns": [] + }, + { + "vmid": 5804, + "hostname": "rtgs-xau-1", + "ipv4": "192.168.11.92", + "preferred_node": "r630-02", + "category": "rtgs-sidecar", + "runtime_state": "active_internal_health_ok", + "notes": "Deployed 2026-03-28/29 as the DBIS RTGS off-ledger-to-on-ledger XAU sidecar. systemd service active and /actuator/health returned UP. Live Fineract reachability to 5800 is confirmed at the HTTP layer; authenticated production flow still requires final tenant/auth freeze.", + "ports": [ + { + "port": 8080 + }, + { + "port": 6379 + } + ], + "fqdns": [] + }, { "vmid": 5801, "hostname": "dapp-smom", @@ -1613,6 +1675,8 @@ "ipv4": "192.168.11.178", "preferred_node": "r630-02", "category": "dlt", + "runtime_state": "reserved_placeholder_stopped", + "notes": "As of 2026-03-28 this CT has been reclassified as a reserved placeholder and stopped. Earlier app-native checks found no active Fabric payload, processes, or listeners.", "ports": [ { "port": 7051 @@ -1626,6 +1690,8 @@ "ipv4": "192.168.11.252", "preferred_node": "r630-02", "category": "dlt", + "runtime_state": "reserved_placeholder_stopped", + "notes": "As of 2026-03-28 this CT has been reclassified as a reserved placeholder and stopped. Earlier app-native checks found no active Fabric payload, processes, or listeners.", "ports": [ { "port": 7051 @@ -1639,6 +1705,8 @@ "ipv4": "192.168.11.179", "preferred_node": "r630-02", "category": "dlt", + "runtime_state": "reserved_placeholder_stopped", + "notes": "As of 2026-03-28 this CT has been reclassified as a reserved placeholder and stopped. Earlier app-native checks found no active Indy payload, processes, or listeners.", "ports": [], "fqdns": [] }, @@ -1648,6 +1716,8 @@ "ipv4": "192.168.11.253", "preferred_node": "r630-02", "category": "dlt", + "runtime_state": "reserved_placeholder_stopped", + "notes": "As of 2026-03-28 this CT has been reclassified as a reserved placeholder and stopped. Earlier app-native checks found no active Indy payload, processes, or listeners.", "ports": [], "fqdns": [] }, diff --git a/dbis_chain_138_technical_master_plan.md b/dbis_chain_138_technical_master_plan.md new file mode 100644 index 0000000..c4ca0a7 --- /dev/null +++ b/dbis_chain_138_technical_master_plan.md @@ -0,0 +1,811 @@ +# DBIS Chain 138 Technical Master Plan + +## Purpose +This document is the governance and execution baseline for DBIS Chain 138 infrastructure. It is intentionally grounded in repo-backed and operator-verified reality, so it can be used for audits, deployment planning, and readiness decisions without confusing `currently deployed`, `under validation`, and `future-state` work. + +The objective is to move from architecture theory to a production-grade sovereign deployment program that is evidence-based, phased, and operationally auditable. + +--- + +# SECTION 1 — MASTER OBJECTIVES + +## Primary objectives + +1. Inventory currently installed stack components and host placement. +2. Validate actual service readiness, not just declared architecture. +3. Standardize Proxmox VE deployment topology and preferred workload placement. +4. Assign infrastructure ownership across ecosystem entities once governance is finalized. +5. Define production-grade deployment and verification workflows. +6. Track the gap between today’s footprint and sovereign target-state architecture. +7. Produce auditable artifacts that operators can regenerate and maintain. + +--- + +# SECTION 2 — CURRENT STACK STATUS + +## Deployed now + +- Hyperledger Besu (QBFT, Chain 138) +- Hyperledger Fabric containers and VMIDs are allocated +- Hyperledger Indy containers and VMIDs are allocated +- Hyperledger FireFly primary container footprint exists +- Blockscout / explorer stack +- Hyperledger Caliper hook and performance guidance (documentation only) + +## Partially deployed / under validation + +- Hyperledger FireFly: + - primary `6200` is restored as a minimal local FireFly API footprint + - secondary `6201` is present in inventory but currently behaves like a retired / standby shell with no valid deployment payload +- Hyperledger Fabric: + - `6000`, `6001`, `6002` are present in inventory but are now intentionally stopped as reserved placeholders + - current app-level verification did not show active Fabric peer / orderer workloads or meaningful Fabric payloads inside those CTs +- Hyperledger Indy: + - `6400`, `6401`, `6402` are present in inventory but are now intentionally stopped as reserved placeholders + - current app-level verification did not show active Indy node listeners or meaningful Indy payloads inside those CTs + +## Planned / aspirational + +- Hyperledger Aries as a proven deployed service tier +- Hyperledger AnonCreds as an operationally verified deployed layer +- Hyperledger Ursa as a required runtime dependency +- Hyperledger Quilt +- Hyperledger Avalon +- Hyperledger Cacti as a proven live interoperability layer +- Full multi-region sovereignized Proxmox with Ceph-backed storage and segmented production VLANs + +--- + +# SECTION 3 — CURRENT ENVIRONMENT DISCOVERY + +## Canonical discovery artifacts + +The source-of-truth discovery path for current state is: + +- [docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md](docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md) +- [docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md](docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md) +- [docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md](docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md) +- [scripts/verify/run-phase1-discovery.sh](scripts/verify/run-phase1-discovery.sh) +- [config/proxmox-operational-template.json](config/proxmox-operational-template.json) +- [docs/04-configuration/ALL_VMIDS_ENDPOINTS.md](docs/04-configuration/ALL_VMIDS_ENDPOINTS.md) +- [docs/02-architecture/PHYSICAL_HARDWARE_INVENTORY.md](docs/02-architecture/PHYSICAL_HARDWARE_INVENTORY.md) + +## Discovery scope + +Reality mapping must validate: + +1. Proxmox hosts and cluster health +2. VMID / CT inventory versus template JSON +3. Besu validators, sentries, and RPC tiers +4. Explorer and public RPC availability +5. Hyperledger CT presence and app-level readiness where possible +6. Storage topology and current backing stores +7. Network topology and current LAN / VLAN reality +8. ML110 role reality versus migration plan + +## Required outputs + +Every discovery run should produce: + +- Infrastructure inventory report +- Service state map +- Dependency context +- Critical failure summary + +The markdown report is evidence capture; the script exit code is the pass/fail signal. + +--- + +# SECTION 4 — PROXMOX VE DEPLOYMENT DESIGN + +## Current state + +- Current cluster footprint is smaller than the target sovereign model. +- Current storage is primarily local ZFS / LVM-based, not Ceph-backed distributed storage. +- Current workload placement is represented as `preferred host` in the planning template, not guaranteed live placement. + +## Target model + +- Multi-node Proxmox VE cluster with stable quorum +- HA-aware workload placement +- Dedicated roles for core compute, RPC exposure, identity/workflow DLT, ingress, and future storage tiers + +## Current interpretation rule + +This plan must not describe the target sovereignized Proxmox model as already achieved. All references to HA, Ceph, dedicated storage nodes, or dedicated network nodes are roadmap items unless Phase 1 evidence proves they are already active. + +--- + +# SECTION 5 — NETWORK ARCHITECTURE + +## Current network reality + +- Primary active management / services LAN is `192.168.11.0/24` +- Public ingress is fronted through NPMplus / edge services +- RPC exposure is already tiered across core, public, private, named, and thirdweb-facing nodes + +## Target network layers + +1. Management network +2. Storage replication network +3. Blockchain validator / P2P network +4. Identity / workflow DLT network +5. Public access / DMZ network +6. Validator-only restricted paths + +## Status + +- Public access and RPC role separation exist in practice. +- Full sovereign segmentation with dedicated VLANs and zero-trust internal routing remains roadmap work. + +--- + +# SECTION 6 — ENTITY ASSIGNMENT MODEL + +## Governance model + +The entity-assignment model remains valid as a target governance structure: + +- DBIS Core Authority +- Central Banks +- International Financial Institutions +- Regional Operators + +## Current status + +- Entity ownership for many deployed workloads is still `TBD` in the operational matrix. +- Until governance assigns final owners, operator documentation must keep those fields explicitly marked as `TBD` rather than inventing ownership. + +The executable placement artifact is: + +- [docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md](docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md) + +--- + +# SECTION 7 — VM AND CONTAINER DESIGN + +## Current status by workload family + +### Deployed now + +- Settlement / Besu VM family +- Explorer / observability family +- Ingress / proxy family +- Application and DBIS-support workloads + +### Partially deployed / under validation + +- Workflow VM / CT family for FireFly +- Institutional VM / CT family for Fabric +- Identity VM / CT family for Indy + +### Planned / aspirational + +- Identity VM template that includes proven Aries + AnonCreds runtime +- Interoperability VM template for true Hyperledger Cacti usage + +## Implementation rule + +Template language in this plan must map to actual repo artifacts and actual VMIDs, not hypothetical inventory. + +--- + +# SECTION 8 — STORAGE ARCHITECTURE + +## Current state + +- Current guest storage is backed by local Proxmox storage pools. +- Ceph-backed distributed storage is not yet an achieved platform baseline. + +## Target state + +- Ceph or equivalent distributed storage tier +- Snapshot-aware backup strategy by workload class +- Archive and audit retention policy + +## Roadmap artifact + +- [docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md](docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md) + +--- + +# SECTION 9 — SECURITY ARCHITECTURE + +## Current baseline + +- Chain 138 validator, sentry, and RPC tiering exists as an operational pattern. +- Public RPC capability validation is now script-backed. +- Explorer and wallet metadata are now explicitly documented and verifiable. + +## Target-state controls + +- HSM-backed key management +- stronger secrets segregation +- certificate hierarchy and operator MFA +- formalized tier-to-tier firewall policy + +## Status + +These remain partially implemented and must not be represented as fully complete without separate evidence. + +--- + +# SECTION 10 — GOVERNANCE ARCHITECTURE + +## Target + +- validator governance across multiple entities +- admission control +- key rotation +- emergency controls + +## Current state + +- Chain 138 validator topology exists +- final multi-entity validator governance assignment is still pending + +This section remains a target architecture section, not a statement of fully executed governance. + +--- + +# SECTION 11 — FIREFLY WORKFLOW ARCHITECTURE + +## Current state + +- FireFly primary footprint exists and now exposes a local API again. +- Current restored `6200` configuration is a minimal local gateway profile for stability and API availability. +- Full multiparty FireFly workflow behavior across blockchain, shared storage, and data exchange is not yet evidenced as healthy in the current container deployment. + +## Program objective + +Use FireFly as the workflow layer only after: + +1. primary and secondary footprint are clearly defined +2. connector/plugin model is explicit +3. upstream blockchain and shared-storage dependencies are validated + +## Current execution artifacts + +- [docs/03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md](docs/03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md) +- [docs/03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md](docs/03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md) + +## 11.1 Depository / CSD architecture + +### Current state + +- A dedicated depository / central securities depository runtime is not currently evidenced as deployed in this environment. +- The depository role is still implied inside broader settlement, securities, and custody discussions rather than frozen as a first-class production component. +- The canonical production checklist row is: + - [Depository / CSD layer](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) + +### Target role + +- maintain the authoritative asset register for in-scope instruments +- define issuance, transfer, pledge, and lien semantics +- provide the settlement-touch point between asset ownership and RTGS finality + +### Required integrations + +- OMNL / Fineract participant and account model +- custody and safekeeping lifecycle +- Chain 138 settlement and evidence path where on-ledger finality is in scope +- external statements, reconciliation, and regulatory evidence outputs + +### Current gaps + +- No frozen decision yet on whether the depository role is on-ledger, off-ledger, or hybrid. +- No participant-to-asset-register relationship is yet frozen for custody, pledge, and transfer scenarios. + +### Execution artifacts + +- [docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md](docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md) +- [docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md](docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md) + +### System flow + +```mermaid +flowchart LR + OMNL["OMNL / Fineract"] -->|"participant + account context"| CSD["Depository / CSD"] + CSD -->|"asset ownership + settlement touch"| RTGS["RTGS Orchestrator"] + RTGS -->|"cash settlement leg"| BANK["Bank / Correspondent Rail"] + RTGS -->|"optional finality evidence"| CHAIN["Chain 138 Settlement"] + CSD -->|"holdings + entitlements"| CUSTODY["Custody / Safekeeping"] + CUSTODY -->|"statements + evidence"| EVIDENCE["Audit / Reconciliation Package"] +``` + +### Contract — Depository asset-register and settlement-touch + +- Owning subsystem: Depository / CSD layer +- Required integrations: participant model, custody model, settlement orchestration, reconciliation/evidence +- Canonical business object or event: asset position, transfer instruction, pledge/release instruction +- Reconciliation / evidence requirement: holdings register must reconcile to settlement state and custody reporting +- Production completion condition: one canonical asset flow proves issuance/transfer/settlement-touch behavior end to end + +## 11.2 Global custodian architecture + +### Current state + +- No explicit global custodian runtime or operating model is currently evidenced as active in the repo-backed deployment state. +- Custodian responsibilities are currently implied through correspondent-bank and safekeeping language, not frozen as one production role. +- The canonical production checklist row is: + - [Global custodian layer](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) + +### Target role + +- manage safekeeping accounts and sub-custody relationships +- coordinate global bank, correspondent, and asset-servicing obligations +- provide statement, confirmation, and reconciliation surfaces for institutional holdings + +### Required integrations + +- depository / CSD role +- correspondent and global-bank messaging lanes +- custody / safekeeping / asset-servicing lifecycle +- OMNL and RTGS reconciliation packages + +### Current gaps + +- No frozen custody account structure or reporting model exists yet. +- Corporate-action, entitlement, and asset-servicing obligations are not yet mapped into the RTGS program. + +### Execution artifacts + +- [docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md](docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md) +- [docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md](docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md) + +### Contract — Global custodian account, reporting, and reconciliation + +- Owning subsystem: Global custodian layer +- Required integrations: correspondent/global-bank path, depository role, custody operations, evidence package +- Canonical business object or event: custody account statement, holdings advice, settlement confirmation +- Reconciliation / evidence requirement: custodian statements must reconcile to OMNL and settlement state +- Production completion condition: one canonical custody flow includes account structure, reporting, and reconciliation outputs + +## 11.3 FX pricing and dealing architecture + +### Current state + +- FX pricing, valuation, and revaluation requirements are documented, but no single production pricing/dealing engine contract is yet frozen. +- Existing materials prove the need for FX handling, not a finalized runtime ownership model. +- The canonical production checklist row is: + - [FX pricing / dealing engine](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) + +### Target role + +- own quote generation or ingestion +- apply spread and pricing policy +- lock rates, value dates, and booking terms +- feed OMNL, treasury, and settlement services with the approved FX terms + +### Required integrations + +- treasury policy and limits +- participant / office / GL model +- `server-funds-sidecar` and `off-ledger-2-on-ledger-sidecar` +- reconciliation and evidence path + +### Current gaps + +- No frozen source hierarchy yet for rates, triangulation, and overrides. +- No canonical quote lifecycle is yet mapped from request to booking to reconciliation. + +### Execution artifacts + +- [docs/03-deployment/DBIS_RTGS_FX_TRANSACTION_CATALOG.md](docs/03-deployment/DBIS_RTGS_FX_TRANSACTION_CATALOG.md) +- [docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md](docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md) +- [docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md](docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md) + +### Sequence diagram + +```mermaid +sequenceDiagram + participant Client as Initiating System + participant ORCH as RTGS Orchestrator + participant FX as FX Pricing / Dealing Engine + participant TREASURY as Treasury / Funds + participant OMNL as OMNL / Fineract + participant SETTLE as Settlement Service + + Client->>ORCH: FX-backed payment request + ORCH->>FX: Quote request with currencies, amount, value date + FX-->>ORCH: Locked quote, spread, rate source, expiry + ORCH->>TREASURY: Liquidity and approval check + TREASURY-->>ORCH: Funding approval / rejection + ORCH->>OMNL: Post booked FX and settlement journals + OMNL-->>ORCH: Accounting confirmation + ORCH->>SETTLE: Trigger settlement leg with FX references + SETTLE-->>ORCH: Settlement reference and finality state +``` + +### Contract — FX quote, pricing, and booking + +- Owning subsystem: FX pricing / dealing engine +- Required integrations: treasury, OMNL, sidecars, settlement, reconciliation +- Canonical business object or event: FX quote, booked FX instruction, revaluation event +- Reconciliation / evidence requirement: rate source, booked rate, and realized/unrealized P&L must reconcile +- Production completion condition: one canonical FX transaction completes with frozen inputs, accounting, and reconciliation + +## 11.4 Liquidity pooling and aggregation architecture + +### Current state + +- Liquidity and prefunding checks are documented, but no explicit pooling/aggregation engine is yet modeled as a first-class production component. +- Liquidity sourcing is currently spread across treasury, correspondent, and optional on-chain discussions. +- The canonical production checklist row is: + - [Liquidity pooling and aggregation engine](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) + +### Target role + +- evaluate available liquidity sources +- apply prioritization and eligibility policy +- allocate funding across internal and external sources +- expose operator controls for override, hold, and audit + +### Required integrations + +- treasury account model +- reserve policy +- bank and correspondent source adapters +- optional on-chain liquidity and settlement lanes + +### Current gaps + +- No source-priority model is yet frozen. +- No operator control model is yet defined for overrides, holds, or emergency liquidity routing. + +### Execution artifacts + +- [docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md](docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md) +- [docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md](docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md) + +### Flowchart + +```mermaid +flowchart LR + REQUEST["Funding Request"] --> ENGINE["Liquidity Pooling / Aggregation Engine"] + ENGINE --> INTERNAL["Internal Treasury Pool"] + ENGINE --> BANKLINES["Bank Credit / Liquidity Lines"] + ENGINE --> CORR["Correspondent / Global Bank Sources"] + ENGINE --> ONCHAIN["Optional On-Chain Liquidity"] + INTERNAL --> DECISION["Funding Decision"] + BANKLINES --> DECISION + CORR --> DECISION + ONCHAIN --> DECISION + DECISION --> ORCH["RTGS Orchestrator"] + ORCH --> OMNL["OMNL / Fineract"] +``` + +### Contract — Liquidity-engine source selection and allocation + +- Owning subsystem: Liquidity pooling and aggregation engine +- Required integrations: treasury policy, source adapters, RTGS orchestrator, OMNL +- Canonical business object or event: funding request, allocation decision, liquidity hold/release +- Reconciliation / evidence requirement: chosen source and allocation rationale must be reconstructible +- Production completion condition: one canonical funding decision path is documented and validated + +## 11.5 Liquidity source adapter model + +### Current state + +- Source classes are referenced in treasury and correspondent-bank materials, but no canonical adapter model is yet frozen for each source family. +- The canonical production checklist row is: + - [Liquidity source adapters](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) + +### Target role + +- normalize access to internal treasury pools, bank lines, correspondent banks, and optional on-chain liquidity +- hide transport/auth differences behind one adapter family +- return funding availability, hold, release, and confirmation events into the liquidity engine + +### Required integrations + +- liquidity pooling and aggregation engine +- correspondent-bank and global-bank rails +- treasury controls and operator policies +- optional Chain 138 or sidecar/provider adapters + +### Current gaps + +- No adapter catalog yet exists for source families. +- No required minimum adapter contract is yet documented. + +### Execution artifacts + +- [docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md](docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md) +- [docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md](docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md) + +### Contract — Liquidity source adapter + +- Owning subsystem: Treasury / integrations layer +- Required integrations: liquidity engine, bank/correspondent paths, treasury controls +- Canonical business object or event: liquidity quote, hold confirmation, release confirmation, failure reason +- Reconciliation / evidence requirement: source selection and adapter result must be linked to the settlement package +- Production completion condition: each in-scope source class has a defined adapter contract and mandatory sources are validated + +## 11.6 Custody / safekeeping / asset servicing architecture + +### Current state + +- Custody and safekeeping obligations are referenced implicitly in correspondent-bank, securities, and evidence discussions, but not yet frozen as one canonical lifecycle. +- The canonical production checklist row is: + - [Custody / safekeeping / asset servicing flow](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) + +### Target role + +- manage safekeeping, transfer, entitlement, and servicing lifecycles +- bind depository positions, custodian reporting, and settlement state into one auditable trail +- produce holdings, statements, and servicing evidence for institutional participants + +### Required integrations + +- depository / CSD layer +- global custodian layer +- OMNL participant and account model +- RTGS settlement and evidence package + +### Current gaps + +- No canonical custody lifecycle is yet frozen. +- Corporate-action, entitlement, and servicing events are not yet mapped into reconciliation artifacts. + +### Execution artifacts + +- [docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md](docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md) +- [docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md](docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md) + +### Sequence and state view + +```mermaid +sequenceDiagram + participant DEP as Depository / CSD + participant CUST as Custodian + participant ORCH as RTGS Orchestrator + participant OMNL as OMNL / Fineract + participant EVIDENCE as Evidence Package + + DEP->>CUST: Position / entitlement update + CUST->>ORCH: Safekeeping or servicing instruction + ORCH->>OMNL: Accounting impact or fee posting + OMNL-->>ORCH: Posting confirmation + ORCH->>EVIDENCE: Reconciliation and servicing references + EVIDENCE-->>CUST: Statement / audit package references +``` + +```mermaid +stateDiagram-v2 + [*] --> Registered + Registered --> Safekept + Safekept --> Transferred + Safekept --> Serviced + Transferred --> Reconciled + Serviced --> Reconciled + Reconciled --> Reported + Reported --> [*] +``` + +### Contract — Custody, safekeeping, and asset-servicing lifecycle + +- Owning subsystem: Custody operations / product architecture layer +- Required integrations: depository, custodian, OMNL, evidence package +- Canonical business object or event: custody instruction, holdings statement, servicing event +- Reconciliation / evidence requirement: holdings, statements, and servicing events must reconcile to settlement and participant records +- Production completion condition: one end-to-end custody lifecycle is documented and validated with reconciliation/evidence output + +--- + +# SECTION 12 — CROSS-CHAIN INTEROPERABILITY DESIGN + +## Current state + +- CCIP relay and Chain 138 cross-chain infrastructure exist in the broader stack. +- Hyperledger Cacti is not currently proven as the live interoperability engine for DBIS in this environment. + +## Planning rule + +This plan must refer to Cacti as `future / optional` until a deployed and validated Cacti environment is evidenced in discovery artifacts. + +--- + +# SECTION 13 — DEVSECOPS PIPELINE + +## Required execution model + +1. Source control +2. Build / validation +3. Security and config review +4. Service verification +5. Deployment +6. Monitoring and readiness evidence + +## Repo-backed implementation + +- discovery scripts +- RPC health checks +- route / explorer verification +- operator runbooks +- submodule hygiene and deployment docs + +The pipeline is partially implemented via scripts and runbooks; it is not yet a single unified CI/CD system for every DBIS workload. + +--- + +# SECTION 14 — PERFORMANCE VALIDATION + +## Current state + +- Hyperledger Caliper is not vendored in this repo. +- A documented performance hook exists instead of a committed benchmark harness. + +## Canonical artifact + +- [docs/03-deployment/CALIPER_CHAIN138_PERF_HOOK.md](docs/03-deployment/CALIPER_CHAIN138_PERF_HOOK.md) + +## Interpretation rule + +Performance benchmarking is planned and documented, but not yet a routine automated readiness gate. + +--- + +# SECTION 15 — MONITORING AND OBSERVABILITY + +## Deployed now + +- Explorer / Blockscout +- Besu RPC health verification +- operational checks and route verification scripts + +## Partially deployed / under validation + +- Hyperledger-side service health beyond CT status +- unified status reporting for the broader DLT stack + +--- + +# SECTION 16 — DISASTER RECOVERY DESIGN + +## Target state + +- RPO / RTO by workload tier +- cross-site replication +- cold / standby recovery paths + +## Current state + +DR remains a program requirement, not a fully evidenced completed deployment capability. + +--- + +# SECTION 17 — PRODUCTION DEPLOYMENT WORKFLOW + +## Phase 1 — Reality mapping + +Canonical implementation: + +- [scripts/verify/run-phase1-discovery.sh](scripts/verify/run-phase1-discovery.sh) +- [docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md](docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md) + +## Phase 2 — Sovereignization roadmap + +Canonical implementation: + +- [docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md](docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md) + +## Phase 3 — Liveness and production-simulation wrapper + +Canonical implementation: + +- [scripts/verify/run-dbis-phase3-e2e-simulation.sh](scripts/verify/run-dbis-phase3-e2e-simulation.sh) +- [docs/03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md](docs/03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md) + +--- + +# SECTION 18 — END-TO-END PRODUCTION FLOW + +## Reference flow + +1. Identity issued +2. Credential verified +3. Workflow triggered +4. Settlement executed +5. Cross-chain sync +6. Compliance recorded +7. Final settlement confirmed + +## Current interpretation + +This is the target business flow. Current automation verifies only selected infrastructure slices of that flow: + +- Besu liveness +- optional FireFly HTTP +- operator-guided manual follow-ups for Indy / Fabric / CCIP +- the currently recommended narrow RTGS first slice documented in: + - [docs/03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md](docs/03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md) + - [docs/03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md](docs/03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md) + +It must not be represented as fully automated end-to-end execution today. + +--- + +# SECTION 19 — EXECUTION DIRECTIVES + +Cursor / operators should execute the following in order: + +1. Run Phase 1 discovery and review the critical failure summary. +2. Reconcile node-role matrix conflicts, especially duplicate IP planning entries. +3. Validate live Hyperledger CTs at the app layer, not only CT status. +4. Track sovereignization gaps in the Phase 2 roadmap. +5. Run the Phase 3 liveness wrapper and manual follow-ups. +6. Produce or refresh readiness evidence. + +These directives must map to repo scripts and docs, not hypothetical tooling. + +--- + +# SECTION 20 — EXPECTED DELIVERABLES + +The executable deliverables in this repository are: + +1. Infrastructure inventory report +2. Node role assignment map +3. Phase 2 sovereignization roadmap +4. Phase 3 liveness simulation runbook +5. Caliper performance hook +6. Operator readiness checklist + +Separate security compliance and benchmark reports remain future deliverables unless explicitly generated. + +--- + +# SECTION 21 — CURRENT GAPS + +## Infrastructure gaps + +- FireFly secondary `6201` is currently stopped and should be treated as retired / standby until intentionally rebuilt. +- Fabric CTs are present in inventory, but current app-level verification did not prove active Fabric peer or orderer services and did not show meaningful Fabric payloads; they are now intentionally stopped as reserved placeholders. +- Indy CTs are present in inventory, but current app-level verification did not prove active Indy validator listeners and did not show meaningful Indy payloads; they are now intentionally stopped as reserved placeholders. +- The current per-node app-level evidence table is maintained in [docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md](docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md). + +## Platform gaps + +- Ceph-backed distributed storage is still roadmap work. +- Full VLAN / sovereign network segmentation is still roadmap work. +- Final entity ownership assignments remain incomplete. +- The selected first-slice HYBX sidecars are now deployed internally on Proxmox VE and healthy at the runtime level. +- The `mifos-fineract-sidecar` lane has now completed at least one authenticated live OMNL / Fineract transfer posting, but the broader participant model, Chain 138 settlement leg, reconciliation/evidence output, and the `server-funds-sidecar` / `off-ledger-2-on-ledger-sidecar` business flows are still not frozen end to end. + +## Planning gaps + +- Future-state architecture items must remain clearly labeled as planned, not deployed. + +--- + +# SECTION 22 — IMPLEMENTATION ARTIFACTS + +Executable counterparts in this repository: + +| Deliverable | Location | +|-------------|----------| +| Node Role Matrix | `docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md` | +| Phase 1 discovery | `scripts/verify/run-phase1-discovery.sh`, `docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md`, `reports/phase1-discovery/` | +| Phase 2 roadmap | `docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md` | +| Phase 3 liveness wrapper | `scripts/verify/run-dbis-phase3-e2e-simulation.sh`, `docs/03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md` | +| Production gate | `docs/03-deployment/DBIS_PHASES_1_TO_3_PRODUCTION_GATE.md` | +| RTGS canonical production checklist | `docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md` | +| RTGS FX transaction catalog | `docs/03-deployment/DBIS_RTGS_FX_TRANSACTION_CATALOG.md` | +| RTGS depository and custody operating model | `docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md` | +| RTGS FX and liquidity operating model | `docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md` | +| RTGS control-plane deployment checklist | `docs/03-deployment/DBIS_RTGS_CONTROL_PLANE_DEPLOYMENT_CHECKLIST.md` | +| RTGS control-plane deployment scripts | `scripts/deployment/create-dbis-rtgs-control-plane-lxcs.sh`, `scripts/deployment/deploy-dbis-rtgs-control-plane.sh`, `scripts/verify/check-dbis-rtgs-control-plane.sh` | +| RTGS later-phase sidecars deployment checklist | `docs/03-deployment/DBIS_RTGS_LATER_PHASE_SIDECARS_DEPLOYMENT_CHECKLIST.md` | +| RTGS later-phase sidecars deployment scripts | `scripts/deployment/create-dbis-rtgs-later-phase-sidecar-lxcs.sh`, `scripts/deployment/deploy-dbis-rtgs-later-phase-sidecars.sh`, `scripts/verify/check-dbis-rtgs-later-phase-sidecars.sh` | +| Indonesia / BNI E2E integration blueprint | `docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md` | +| RTGS first-slice architecture | `docs/03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md` | +| RTGS first-slice deployment checklist | `docs/03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md` | +| Caliper hook | `docs/03-deployment/CALIPER_CHAIN138_PERF_HOOK.md`, `scripts/verify/print-caliper-chain138-stub.sh` | +| Operator readiness checklist | `docs/00-meta/OPERATOR_READY_CHECKLIST.md` section 10 | diff --git a/docs/00-meta/OPERATOR_READY_CHECKLIST.md b/docs/00-meta/OPERATOR_READY_CHECKLIST.md index 6c9c823..22b9a00 100644 --- a/docs/00-meta/OPERATOR_READY_CHECKLIST.md +++ b/docs/00-meta/OPERATOR_READY_CHECKLIST.md @@ -1,6 +1,6 @@ # Operator Ready Checklist — Copy-Paste Commands -**Last Updated:** 2026-03-27 +**Last Updated:** 2026-03-28 **Purpose:** Single page with exact commands to complete every pending todo. Run from **repo root** on a host with **LAN** access (and `smom-dbis-138/.env` with `PRIVATE_KEY`, `NPM_PASSWORD` where noted). **Do you have all necessary creds?** See [OPERATOR_CREDENTIALS_CHECKLIST.md](OPERATOR_CREDENTIALS_CHECKLIST.md) — per-task list of LAN, PRIVATE_KEY, NPM_PASSWORD, RPC_URL_138, SSH, LINK, gas, token balance. @@ -276,6 +276,21 @@ This is intentionally deferred with the rest of the Wemix path. If the chain is --- +## 10. DBIS Chain 138 — phased production path (matrix-driven) + +**Ref:** [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md), [DBIS_NODE_ROLE_MATRIX.md](../02-architecture/DBIS_NODE_ROLE_MATRIX.md) + +| Phase | Action | +|-------|--------| +| 1 — Reality mapping | `bash scripts/verify/run-phase1-discovery.sh` (optional: `HYPERLEDGER_PROBE=1`). Reports: `reports/phase1-discovery/`. Runbook: [PHASE1_DISCOVERY_RUNBOOK.md](../03-deployment/PHASE1_DISCOVERY_RUNBOOK.md). | +| 2 — Sovereignization roadmap | Read [DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md](../02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md); execute milestones (cluster expansion, Ceph, VLANs) as prioritized. | +| 3 — E2E simulation | `bash scripts/verify/run-dbis-phase3-e2e-simulation.sh` (optional: `RUN_CHAIN138_RPC_HEALTH=1`). Full flow + Indy/Fabric/CCIP manual steps: [DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md](../03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md). | +| Perf (Caliper) | `bash scripts/verify/print-caliper-chain138-stub.sh` — then [CALIPER_CHAIN138_PERF_HOOK.md](../03-deployment/CALIPER_CHAIN138_PERF_HOOK.md). | + +**Readiness:** Resolve critical **Entity owner** / **Region** **TBD** rows in the Node Role Matrix before claiming multi-entity production governance. + +--- + ## References - [COMPLETE_REQUIRED_OPTIONAL_RECOMMENDED_INDEX.md](COMPLETE_REQUIRED_OPTIONAL_RECOMMENDED_INDEX.md) — full plan (required, optional, recommended) diff --git a/docs/00-meta/ROOT_DIRTY_WORK_REVIEW_2026-03-29.md b/docs/00-meta/ROOT_DIRTY_WORK_REVIEW_2026-03-29.md new file mode 100644 index 0000000..13abbd9 --- /dev/null +++ b/docs/00-meta/ROOT_DIRTY_WORK_REVIEW_2026-03-29.md @@ -0,0 +1,117 @@ +# Root Dirty Work Review — 2026-03-29 + +**Purpose:** Separate unrelated root-repo dirty work from the DBIS RTGS tranche so operators can review risk and merge intent without conflating changes. + +## Scope reviewed + +From `git status --short` in the root repo: + +- modified: + - `docs/04-configuration/GITEA_ACT_RUNNER_SETUP.md` + - `pnpm-lock.yaml` + - `scripts/dev-vm/setup-act-runner.sh` + - `scripts/lib/load-project-env.sh` + - `smom-dbis-138` submodule pointer dirty +- untracked: + - `scripts/dev-vm/bootstrap-gitea-act-runner-site-wide.sh` + - `scripts/dev-vm/install-act-runner-systemd.sh` + +## Findings + +### 1. Gitea act-runner lane + +**Files** +- [GITEA_ACT_RUNNER_SETUP.md](../04-configuration/GITEA_ACT_RUNNER_SETUP.md) +- [setup-act-runner.sh](/home/intlc/projects/proxmox/scripts/dev-vm/setup-act-runner.sh) +- [bootstrap-gitea-act-runner-site-wide.sh](/home/intlc/projects/proxmox/scripts/dev-vm/bootstrap-gitea-act-runner-site-wide.sh) +- [install-act-runner-systemd.sh](/home/intlc/projects/proxmox/scripts/dev-vm/install-act-runner-systemd.sh) + +**What changed** +- docs now describe a site-wide bootstrap path +- setup defaults now target `http://127.0.0.1:3000` +- runner labels default to `ubuntu-latest` +- new scripts appear intended to: + - fetch a registration token via admin API + - install and daemonize the runner under systemd + +**Assessment** +- This is a coherent feature lane, not random drift. +- It should be reviewed and committed as a dedicated `act_runner bootstrap` tranche. +- It is unrelated to the RTGS sidecar work. + +**Risk** +- low to medium if kept isolated +- medium to high if accidentally bundled with RTGS or infra-truth updates + +### 2. `scripts/lib/load-project-env.sh` + +**What changed** +- VMID `5700` was added to the `r630-02` host mapping set. + +**Assessment** +- This appears to be a small support fix for the act-runner / dev-vm lane. +- It is logically related to the act-runner work above. +- It should ship with that lane, not with unrelated RTGS work. + +**Risk** +- low if correct +- moderate if `5700` is mobile and the mapping is treated as permanently canonical + +### 3. `pnpm-lock.yaml` + +**What changed** +- substantial additions for a root importer named `smom-dbis-138` +- many Hardhat / TypeChain / changesets and related packages + +**Assessment** +- this is a generated artifact from a package-manager operation +- it likely belongs to a separate `smom-dbis-138` JavaScript / Hardhat tooling lane +- it is not tied to the RTGS docs/deployment tranche + +**Risk** +- medium if committed without the matching `package.json` / workspace intent +- high if it accidentally lands in the root repo without the team intending to manage `smom-dbis-138` from the root PNPM workspace + +### 4. `smom-dbis-138` submodule dirty state + +**Observed** +- deleted contract files: + - `contracts/emoney/BridgeVault138.sol` + - `contracts/emoney/ComplianceRegistry.sol` + - `contracts/emoney/PolicyManager.sol` + - `contracts/emoney/TokenFactory138.sol` + - several `contracts/emoney/interfaces/*` +- modified: + - `package.json` + - `package-lock.json` +- untracked: + - `playwright-report/` + +**Assessment** +- this is high-risk and should be treated as a separate active refactor or deletion lane +- the deleted contracts are core-emoney-surface files, not cosmetic churn +- do not bundle or auto-commit this with unrelated infra/docs work + +**Risk** +- high + +## Recommended split + +1. **Act-runner tranche** + - docs + - setup script + - bootstrap script + - systemd install script + - `load-project-env.sh` mapping + +2. **Root JS / lockfile tranche** + - `pnpm-lock.yaml` + - only if intentionally paired with a matching workspace/package change + +3. **`smom-dbis-138` contract/package tranche** + - separate review required + - verify whether the deletions are intentional refactor, move, or accidental loss + +## Rule + +Until these lanes are reviewed and intentionally grouped, they should remain excluded from RTGS-sidecar, explorer, and DBIS master-plan commits. diff --git a/docs/00-meta/TODO_TASK_LIST_MASTER.md b/docs/00-meta/TODO_TASK_LIST_MASTER.md index c2e6832..dd20f71 100644 --- a/docs/00-meta/TODO_TASK_LIST_MASTER.md +++ b/docs/00-meta/TODO_TASK_LIST_MASTER.md @@ -175,7 +175,202 @@ --- -## 12. Maintenance (135–139) +## 12. DBIS RTGS / HYBX / Hyperledger E2E stack + +**Purpose:** Track everything required for a true end-to-end RTGS stack across DBIS Chain 138, HYBX sidecars, OMNL / Fineract, and the external banking / interoperability integrations we currently have access to. + +### 12.1 Participant / treasury / GL model + +- [ ] Finalize participant model for RTGS and settlement: + - central bank / RTGS operator + - HYBX participant + - Bank Kanaya and other offices / institutions +- [ ] Finalize treasury account model: + - settlement + - reserve + - nostro + - vostro + - liquidity / prefunding accounts +- [ ] Finalize GL mappings and JE flows for RTGS settlement in OMNL / Fineract. +- [ ] Freeze the canonical ID resolution flow using: + - `scripts/omnl/resolve_ids.sh` + - `scripts/omnl/omnl-office-create-*.sh` + - `scripts/omnl/omnl-pvp-post-clearing-bank-kanaya.sh` + +### 12.1A Depository / CSD layer + +- [ ] Define the depository / CSD operating model for in-scope DBIS instruments. +- [ ] Freeze whether the depository role is on-ledger, off-ledger, or hybrid. +- [ ] Freeze issuance, transfer, pledge, lien, and settlement-touch behavior for at least one canonical asset flow. +- [ ] Define participant-to-asset-register and custody relationships for depository-managed assets. + +### 12.1B Global custodian layer + +- [ ] Define the global custodian operating model and account structure. +- [ ] Freeze safekeeping, statement, and asset-servicing obligations across correspondent and global-bank paths. +- [ ] Define how custodian statements reconcile to OMNL and RTGS settlement state. + +### 12.1C FX pricing / dealing engine + +- [ ] Freeze the FX pricing hierarchy, approved rate sources, and quote-locking rules. +- [ ] Freeze the quote lifecycle from request to booking to reconciliation. +- [ ] Define how the FX engine integrates with OMNL, treasury, and HYBX sidecars. + +### 12.1D Liquidity pooling and aggregation engine + +- [ ] Define source prioritization, eligibility rules, allocation logic, and operator overrides. +- [ ] Freeze how liquidity decisions are recorded and reconciled against funding and settlement events. +- [ ] Decide when on-chain liquidity is part of the funding policy versus optional extension. + +### 12.1E Liquidity source adapters + +- [ ] Enumerate all in-scope liquidity source families: + - internal treasury pools + - bank credit / liquidity lines + - correspondent-bank sources + - optional on-chain liquidity +- [ ] Define one adapter contract per mandatory source class. +- [ ] Validate at least the mandatory source adapters used by the canonical RTGS rail. + +### 12.1F Custody / safekeeping / asset servicing flow + +- [ ] Define the canonical lifecycle for safekeeping, transfer, servicing, and statement production. +- [ ] Freeze custody-to-depository, custody-to-settlement, and custody-to-evidence relationships. +- [ ] Validate one end-to-end custody lifecycle with reconciliation and evidence output. + +### 12.2 Mifos / Fineract / OMNL banking rail + +- [ ] Freeze and execute the first-slice deployment checklist: + - `docs/03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md` + - `docs/03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md` +- [ ] Confirm production-grade Mifos/Fineract tenancy, credentials, API reachability, and operator runbook completeness for the current OMNL environment. +- [ ] Complete the full operator rail using: + - `scripts/omnl/omnl-operator-rail.sh` + - `scripts/omnl/omnl-reconciliation-office20.sh` + - `scripts/omnl/omnl-audit-packet-office20.sh` +- [ ] Complete the Indonesia / HYBX evidence path: + - `scripts/omnl/build-transaction-package-zip.sh` + - `scripts/omnl/verify-transaction-package-commitment.py` + - `scripts/omnl/check-transaction-package-4995-readiness.sh --strict` +- [ ] Freeze the source-of-truth API contract from `docs/11-references/API_DOCUMENTATION.md` and the OMNL OpenAPI snapshot. + +### 12.3 Mojaloop integration + +- [ ] Identify the exact Mojaloop deployment / switch endpoints currently available to HYBX. +- [ ] Document the live Mojaloop API contract and auth model: + - quote + - transfer + - callback / status + - settlement window / liquidity behavior + +### 12.4 First-slice HYBX sidecar promotion + +- [ ] Promote the selected first-slice sidecars from local build verification to real production runtime on Proxmox VE: + - `mifos-fineract-sidecar` + - `server-funds-sidecar` + - `off-ledger-2-on-ledger-sidecar` +- [ ] Freeze Proxmox runtime targets, Java baseline, secrets/env injection, and restart/logging policy. +- [ ] Validate each selected sidecar with a stable health/readiness path and one canonical RTGS flow before calling the first slice production-ready. +- [ ] Define the canonical mapping between Mojaloop events and: + - Fineract postings + - sidecar events + - on-chain settlement events +- [ ] Add a repo-backed Mojaloop integration runbook once endpoint details are confirmed. + +### 12.4 HYBX sidecar integration + +- [ ] Audit and document the currently accessible HYBX sidecars: + - `mifos-fineract-sidecar` + - `mt103-hardcopy-sidecar` + - `off-ledger-2-on-ledger-sidecar` + - `securitization-engine-sidecar` + - `card-networks-sidecar` + - `server-funds-sidecar` + - `securities-sidecar` (if in scope) + - `flash-loan-xau-sidecar` (if in scope) +- [ ] Define system boundaries and ownership for each sidecar: + - system-of-record + - message ingress / egress + - retry semantics + - auth and credential handling +- [ ] Create a canonical end-to-end sidecar matrix linked from the RTGS runbook. + +### 12.5 Chain 138 settlement rail + +- [ ] Freeze the canonical on-chain settlement path for RTGS: + - DBIS / compliant settlement tokens + - MerchantSettlementRegistry + - WithdrawalEscrow + - reserve / oracle dependencies where applicable +- [ ] Define the exact mapping from off-ledger settlement events to on-chain settlement confirmations. +- [ ] Decide when `alltra-lifi-settlement` is in the critical RTGS path versus optional cross-chain / liquidity extension. +- [ ] Produce a repo-backed RTGS settlement sequence diagram spanning Fineract ↔ sidecars ↔ Chain 138. + +### 12.6 Workflow and orchestration + +- [ ] Keep FireFly `6200` as the active primary workflow layer and preserve its config/image path. +- [ ] Decide whether to rebuild `6201` as a real secondary FireFly node for HA or leave it permanently retired. +- [ ] Define the event catalog and correlation model across: + - Fineract + - Mojaloop + - HYBX sidecars + - FireFly + - Chain 138 + - regulatory package generation +- [ ] Add compensating-action / retry design for cross-system failures. + +### 12.7 Additional Hyperledger layers needed + +- [ ] Decide whether **Hyperledger Aries** is required as an actual deployed identity / agent layer for DBIS RTGS. +- [ ] If Aries is in scope, define: + - agent placement + - wallet / DID model + - protocol flows + - relationship to Indy and credential verification +- [ ] Decide whether **Hyperledger AnonCreds** is required as part of the verifiable credential stack. +- [ ] If AnonCreds is in scope, define the issuer / holder / verifier model and where credential registries live. +- [ ] Decide whether **Hyperledger Ursa** is required as an explicit cryptographic dependency versus an indirect library/runtime concern. +- [ ] If Ursa is in scope, document where it is used in the identity / VC pipeline and what operational control it requires. +- [ ] Decide whether **Hyperledger Cacti** is actually needed in the RTGS interoperability path or remains optional / future-state. +- [ ] Keep **Hyperledger Caliper** in the program for RTGS performance validation and benchmark the final path when the stack is complete. + +### 12.8 Fabric / Indy runtime decision + +- [ ] If Fabric is required for the RTGS target architecture, deploy real workloads onto `6000-6002` and validate peer / orderer health. +- [ ] If Fabric is not required now, keep `6000-6002` classified as reserved placeholders and remove them from any “active stack” claims. +- [ ] If Indy is required for the RTGS target architecture, deploy real workloads onto `6400-6402` and validate validator / listener health. +- [ ] If Indy is not required now, keep `6400-6402` classified as reserved placeholders and remove them from any “active stack” claims. + +### 12.9 Regulatory / audit / ISO package + +- [ ] Finalize the institutional attestation and evidentiary package path for HYBX submissions. +- [ ] Finalize ISO 20022 vault manifest generation and hash anchoring policy. +- [ ] Finalize AML / sanctions / legal-finality memo workflow for production submissions. +- [ ] Ensure the RTGS path has a reproducible audit packet per settlement batch. + +### 12.10 Production gate + +- [x] Canonical RTGS production checklist created and now maintained in [DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](../03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) with columns: + - component + - current state + - required integration + - remaining task + - owner + - production gate +- [x] Initial HYBX sidecar boundary matrix created: [DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md](../03-deployment/DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md) +- [x] Initial Mojaloop status artifact created: [DBIS_MOJALOOP_INTEGRATION_STATUS.md](../03-deployment/DBIS_MOJALOOP_INTEGRATION_STATUS.md) +- [x] Initial identity-stack decision artifact created: [DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md](../03-deployment/DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md) +- [ ] Add a single “full RTGS E2E” production gate that only turns green when: + - Fineract / OMNL is complete + - HYBX sidecars are integrated + - Mojaloop integration is real and validated + - Chain 138 settlement path is validated + - required Hyperledger identity/workflow layers are deployed + - regulatory package generation passes + +--- + +## 13. Maintenance (135–139) - [x] **Runbook and script:** [OPERATIONAL_RUNBOOKS.md](../03-deployment/OPERATIONAL_RUNBOOKS.md) § Maintenance; `scripts/maintenance/daily-weekly-checks.sh [daily|weekly|all]` for 135–137. Schedule via cron (e.g. daily 08:00). - [x] **Script tested:** daily-weekly-checks.sh daily (explorer SKIP off-LAN, RPC OK). diff --git a/docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md b/docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md new file mode 100644 index 0000000..4038c10 --- /dev/null +++ b/docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md @@ -0,0 +1,169 @@ +# DBIS Node Role Matrix + +**Last updated:** 2026-03-29 (UTC) — regenerate machine-derived rows: `bash scripts/docs/generate-dbis-node-role-matrix-md.sh` +**Status:** Active — infrastructure constitution for DBIS Chain 138 and colocated workloads. + +## Purpose + +This matrix assigns **node type**, **preferred host placement**, **validator/signing role** (for Besu), and **security tier** per workload. It implements the entity-placement model in [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) (Sections 6–7) in a form operators can maintain. + +**Canonical pairs (keep in sync):** + +- Human detail and status: [ALL_VMIDS_ENDPOINTS.md](../04-configuration/ALL_VMIDS_ENDPOINTS.md) +- Machine-readable services: [config/proxmox-operational-template.json](../../config/proxmox-operational-template.json) + +When you change VMID, IP, hostname, or placement, update **ALL_VMIDS** and **operational-template.json** first, then regenerate the table below with this script (or edit the static sections manually). + +## Columns + +| Column | Meaning | +|--------|---------| +| **Entity owner** | DBIS Core, Central Bank, IFI, Regional Operator, etc. — use **TBD** until governance assigns. | +| **Region** | Geographic or site label — **TBD** until multi-site is formalized. | +| **IP note** | Flags duplicate IPv4 entries in the planning template. A duplicate means **shared or historical mapping**, not concurrent ownership — verify live owner in ALL_VMIDS or on-cluster. | +| **Runtime state** | Current disposition from the planning template, e.g. active, placeholder CT only, or retired standby. | +| **Preferred host** | Preferred Proxmox node (`r630-01`, `r630-02`, `ml110`, `any`). This is a planning target, not an assertion of current placement. | +| **Validator / signing** | For Chain 138 Besu: QBFT signer, sentry (no signer), RPC-only, or N/A. | +| **Security tier** | High-level zone: validator-tier, DMZ/RPC, edge ingress, identity/DLT, application, etc. | + +## Proxmox hypervisor nodes + +| Hostname | MGMT IP | Cluster | Role (summary) | +|----------|---------|---------|------------------| +| ml110 | 192.168.11.10 | h — verify | legacy_cluster_member_or_wan_aggregator | +| r630-01 | 192.168.11.11 | h | primary_compute_chain138_rpc_ccip_relay_sankofa | +| r630-02 | 192.168.11.12 | h | firefly_npmplus_secondary_mim4u_mifos_support | + +## Workloads (from operational template) + +Machine-derived rows below come from `services[]` in `config/proxmox-operational-template.json`. Duplicate IPv4 notes are warnings that the planning template still contains alternative or legacy ownership for the same address; they must not be read as concurrent live allocations. + +| VMID | Hostname | IPv4 | IP note | Node type | Runtime state | Entity owner | Region | Preferred host | Validator / signing | Security tier | +|------|----------|------|---------|-----------|---------------|--------------|--------|----------------|---------------------|---------------| +| — | order-redis-primary | 192.168.11.38 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 100 | proxmox-mail-gateway | 192.168.11.32 | unique in template | Infra LXC | unspecified | TBD | TBD | r630-02 | N/A | management / secrets | +| 101 | proxmox-datacenter-manager | 192.168.11.33 | unique in template | Infra LXC | unspecified | TBD | TBD | r630-02 | N/A | management / secrets | +| 102 | cloudflared | 192.168.11.34 | unique in template | Cloudflare tunnel | unspecified | TBD | TBD | r630-01 | N/A | edge ingress | +| 103 | omada | 192.168.11.30 | unique in template | Infra LXC | unspecified | TBD | TBD | r630-02 | N/A | management / secrets | +| 104 | gitea | 192.168.11.31 | unique in template | Infra LXC | unspecified | TBD | TBD | r630-02 | N/A | management / secrets | +| 105 | nginxproxymanager | 192.168.11.26 | unique in template | Legacy NPM | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 130 | monitoring-1 | 192.168.11.27 | unique in template | Monitoring | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 1000 | besu-validator-1 | 192.168.11.100 | unique in template | Besu validator | unspecified | TBD | TBD | r630-01 | QBFT signer | validator-tier | +| 1001 | besu-validator-2 | 192.168.11.101 | unique in template | Besu validator | unspecified | TBD | TBD | r630-01 | QBFT signer | validator-tier | +| 1002 | besu-validator-3 | 192.168.11.102 | unique in template | Besu validator | unspecified | TBD | TBD | r630-01 | QBFT signer | validator-tier | +| 1003 | besu-validator-4 | 192.168.11.103 | unique in template | Besu validator | unspecified | TBD | TBD | r630-01 | QBFT signer | validator-tier | +| 1004 | besu-validator-5 | 192.168.11.104 | unique in template | Besu validator | unspecified | TBD | TBD | r630-01 | QBFT signer | validator-tier | +| 1500 | besu-sentry-1 | 192.168.11.150 | unique in template | Besu sentry | unspecified | TBD | TBD | r630-01 | Sentry (no signer) | validator-tier | +| 1501 | besu-sentry-2 | 192.168.11.151 | unique in template | Besu sentry | unspecified | TBD | TBD | r630-01 | Sentry (no signer) | validator-tier | +| 1502 | besu-sentry-3 | 192.168.11.152 | unique in template | Besu sentry | unspecified | TBD | TBD | r630-01 | Sentry (no signer) | validator-tier | +| 1503 | besu-sentry-4 | 192.168.11.153 | unique in template | Besu sentry | unspecified | TBD | TBD | r630-01 | Sentry (no signer) | validator-tier | +| 1504 | besu-sentry-ali | 192.168.11.154 | unique in template | Besu sentry | unspecified | TBD | TBD | r630-01 | Sentry (no signer) | validator-tier | +| 1505 | besu-sentry-alltra-1 | 192.168.11.213 | unique in template | Besu sentry | unspecified | TBD | TBD | r630-01 | Sentry (no signer) | validator-tier | +| 1506 | besu-sentry-alltra-2 | 192.168.11.214 | unique in template | Besu sentry | unspecified | TBD | TBD | r630-01 | Sentry (no signer) | validator-tier | +| 1507 | besu-sentry-hybx-1 | 192.168.11.244 | unique in template | Besu sentry | unspecified | TBD | TBD | ml110 | Sentry (no signer) | validator-tier | +| 1508 | besu-sentry-hybx-2 | 192.168.11.245 | unique in template | Besu sentry | unspecified | TBD | TBD | ml110 | Sentry (no signer) | validator-tier | +| 2101 | besu-rpc-core-1 | 192.168.11.211 | unique in template | Besu RPC (rpc_core) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2102 | besu-rpc-core-2 | 192.168.11.212 | unique in template | Besu RPC (rpc_core) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2103 | besu-rpc-core-thirdweb | 192.168.11.217 | unique in template | Besu RPC (rpc_core) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2201 | besu-rpc-public-1 | 192.168.11.221 | unique in template | Besu RPC (rpc_public) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2301 | besu-rpc-private-1 | 192.168.11.232 | unique in template | Besu RPC (rpc_private) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2303 | besu-rpc-ali-0x8a | 192.168.11.233 | unique in template | Besu RPC (rpc_named) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2304 | besu-rpc-ali-0x1 | 192.168.11.234 | unique in template | Besu RPC (rpc_named) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2305 | besu-rpc-luis-0x8a | 192.168.11.235 | unique in template | Besu RPC (rpc_named) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2306 | besu-rpc-luis-0x1 | 192.168.11.236 | unique in template | Besu RPC (rpc_named) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2307 | besu-rpc-putu-0x8a | 192.168.11.237 | unique in template | Besu RPC (rpc_named) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2308 | besu-rpc-putu-0x1 | 192.168.11.238 | unique in template | Besu RPC (rpc_named) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2400 | thirdweb-rpc-1 | 192.168.11.240 | unique in template | Besu RPC (rpc_thirdweb) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2401 | besu-rpc-thirdweb-0x8a-1 | 192.168.11.241 | unique in template | Besu RPC (rpc_thirdweb) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2402 | besu-rpc-thirdweb-0x8a-2 | 192.168.11.242 | unique in template | Besu RPC (rpc_thirdweb) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2403 | besu-rpc-thirdweb-0x8a-3 | 192.168.11.243 | unique in template | Besu RPC (rpc_thirdweb) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2500 | besu-rpc-alltra-1 | 192.168.11.172 | unique in template | Besu RPC (rpc_alltra_hybx) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2501 | besu-rpc-alltra-2 | 192.168.11.173 | unique in template | Besu RPC (rpc_alltra_hybx) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2502 | besu-rpc-alltra-3 | 192.168.11.174 | unique in template | Besu RPC (rpc_alltra_hybx) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2503 | besu-rpc-hybx-1 | 192.168.11.246 | unique in template | Besu RPC (rpc_alltra_hybx) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2504 | besu-rpc-hybx-2 | 192.168.11.247 | unique in template | Besu RPC (rpc_alltra_hybx) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 2505 | besu-rpc-hybx-3 | 192.168.11.248 | unique in template | Besu RPC (rpc_alltra_hybx) | unspecified | TBD | TBD | r630-01 | RPC only | DMZ / RPC exposure | +| 3000 | ml-node-1 | 192.168.11.60 | unique in template | ML node | unspecified | TBD | TBD | ml110 | N/A | standard internal | +| 3001 | ml-node-2 | 192.168.11.61 | unique in template | ML node | unspecified | TBD | TBD | ml110 | N/A | standard internal | +| 3002 | ml-node-3 | 192.168.11.62 | unique in template | ML node | unspecified | TBD | TBD | ml110 | N/A | standard internal | +| 3003 | ml-node-4 | 192.168.11.63 | unique in template | ML node | unspecified | TBD | TBD | ml110 | N/A | standard internal | +| 3500 | oracle-publisher-1 | 192.168.11.29 | unique in template | Oracle publisher | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 3501 | ccip-monitor-1 | 192.168.11.28 | unique in template | CCIP monitor | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 5000 | blockscout-1 | 192.168.11.140 | unique in template | Blockscout | unspecified | TBD | TBD | r630-01 | N/A | standard internal | +| 5010 | tsunamiswap | 192.168.11.91 | unique in template | DeFi | unspecified | TBD | TBD | r630-01 | N/A | standard internal | +| 5200 | cacti-1 | 192.168.11.80 | unique in template | Cacti | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 5201 | cacti-alltra-1 | 192.168.11.177 | unique in template | Cacti | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 5202 | cacti-hybx-1 | 192.168.11.251 | unique in template | Cacti | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 5700 | dev-vm-gitops | 192.168.11.59 | unique in template | Dev | unspecified | TBD | TBD | any | N/A | standard internal | +| 5702 | ai-inf-1 | 192.168.11.82 | unique in template | AI infra | unspecified | TBD | TBD | r630-01 | N/A | standard internal | +| 5705 | ai-inf-2 | 192.168.11.86 | unique in template | AI infra | unspecified | TBD | TBD | r630-01 | N/A | standard internal | +| 5800 | mifos-fineract | 192.168.11.85 | unique in template | Mifos | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 5801 | dapp-smom | 192.168.11.58 | unique in template | DApp | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 6000 | fabric-1 | 192.168.11.65 | unique in template | Fabric | reserved_placeholder_stopped | TBD | TBD | r630-02 | N/A | identity / workflow DLT | +| 6001 | fabric-alltra-1 | 192.168.11.178 | unique in template | Fabric | reserved_placeholder_stopped | TBD | TBD | r630-02 | N/A | identity / workflow DLT | +| 6002 | fabric-hybx-1 | 192.168.11.252 | unique in template | Fabric | reserved_placeholder_stopped | TBD | TBD | r630-02 | N/A | identity / workflow DLT | +| 6200 | firefly-1 | 192.168.11.35 | shared / non-concurrent mapping — verify live owner | FireFly | active_minimal_gateway | TBD | TBD | r630-02 | N/A | identity / workflow DLT | +| 6201 | firefly-ali-1 | 192.168.11.57 | unique in template | FireFly | retired_standby_until_rebuilt | TBD | TBD | r630-02 | N/A | identity / workflow DLT | +| 6400 | indy-1 | 192.168.11.64 | unique in template | Indy | reserved_placeholder_stopped | TBD | TBD | r630-02 | N/A | identity / workflow DLT | +| 6401 | indy-alltra-1 | 192.168.11.179 | unique in template | Indy | reserved_placeholder_stopped | TBD | TBD | r630-02 | N/A | identity / workflow DLT | +| 6402 | indy-hybx-1 | 192.168.11.253 | unique in template | Indy | reserved_placeholder_stopped | TBD | TBD | r630-02 | N/A | identity / workflow DLT | +| 7800 | sankofa-api-1 | 192.168.11.50 | unique in template | Sankofa / Phoenix | unspecified | TBD | TBD | r630-01 | N/A | application | +| 7801 | sankofa-portal-1 | 192.168.11.51 | unique in template | Sankofa / Phoenix | unspecified | TBD | TBD | r630-01 | N/A | application | +| 7802 | sankofa-keycloak-1 | 192.168.11.52 | unique in template | Sankofa / Phoenix | unspecified | TBD | TBD | r630-01 | N/A | application | +| 7803 | sankofa-postgres-1 | 192.168.11.53 | unique in template | Sankofa / Phoenix | unspecified | TBD | TBD | r630-01 | N/A | application | +| 7804 | gov-portals-dev | 192.168.11.54 | unique in template | Sankofa / Phoenix | unspecified | TBD | TBD | r630-01 | N/A | application | +| 7805 | sankofa-studio | 192.168.11.72 | unique in template | Sankofa / Phoenix | unspecified | TBD | TBD | r630-01 | N/A | application | +| 7810 | mim-web-1 | 192.168.11.37 | shared / non-concurrent mapping — verify live owner | MIM4U | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 7811 | mim-api-1 | 192.168.11.36 | shared / non-concurrent mapping — verify live owner | MIM4U | unspecified | TBD | TBD | r630-02 | N/A | standard internal | +| 8640 | vault-phoenix-1 | 192.168.11.200 | unique in template | HashiCorp Vault | unspecified | TBD | TBD | r630-01 | N/A | management / secrets | +| 8641 | vault-phoenix-2 | 192.168.11.215 | unique in template | HashiCorp Vault | unspecified | TBD | TBD | r630-01 | N/A | management / secrets | +| 8642 | vault-phoenix-3 | 192.168.11.202 | unique in template | HashiCorp Vault | unspecified | TBD | TBD | r630-01 | N/A | management / secrets | +| 10030 | order-identity | 192.168.11.40 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10040 | order-intake | 192.168.11.41 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10050 | order-finance | 192.168.11.49 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10060 | order-dataroom | 192.168.11.42 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10070 | order-legal | 192.168.11.87 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10080 | order-eresidency | 192.168.11.43 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10090 | order-portal-public | 192.168.11.36 | shared / non-concurrent mapping — verify live owner | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10091 | order-portal-internal | 192.168.11.35 | shared / non-concurrent mapping — verify live owner | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10092 | order-mcp-legal | 192.168.11.37 | shared / non-concurrent mapping — verify live owner | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10100 | dbis-postgres-primary | 192.168.11.105 | unique in template | DBIS stack | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10101 | dbis-postgres-replica-1 | 192.168.11.106 | unique in template | DBIS stack | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10120 | dbis-redis | 192.168.11.125 | unique in template | DBIS stack | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10130 | dbis-frontend | 192.168.11.130 | unique in template | DBIS stack | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10150 | dbis-api-primary | 192.168.11.155 | unique in template | DBIS stack | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10151 | dbis-api-secondary | 192.168.11.156 | unique in template | DBIS stack | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10200 | order-prometheus | 192.168.11.46 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10201 | order-grafana | 192.168.11.47 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10202 | order-opensearch | 192.168.11.48 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10210 | order-haproxy | 192.168.11.39 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10230 | order-vault | 192.168.11.55 | unique in template | The Order service | unspecified | TBD | TBD | r630-01 | N/A | application | +| 10232 | ct10232 | 192.168.11.56 | unique in template | General CT | unspecified | TBD | TBD | r630-01 | N/A | standard internal | +| 10233 | npmplus-primary | 192.168.11.167 | unique in template | NPMplus ingress | unspecified | TBD | TBD | r630-01 | N/A | edge ingress | +| 10234 | npmplus-secondary | 192.168.11.168 | unique in template | NPMplus ingress | unspecified | TBD | TBD | r630-02 | N/A | edge ingress | +| 10235 | npmplus-alltra-hybx | 192.168.11.169 | unique in template | NPMplus ingress | unspecified | TBD | TBD | r630-02 | N/A | edge ingress | +| 10236 | npmplus-fourth-dev | 192.168.11.170 | unique in template | NPMplus ingress | unspecified | TBD | TBD | r630-02 | N/A | edge ingress | +| 10237 | npmplus-mifos | 192.168.11.171 | unique in template | NPMplus ingress | unspecified | TBD | TBD | r630-02 | N/A | edge ingress | + +## Supplementary rows (not in template JSON) + +These appear in [ALL_VMIDS_ENDPOINTS.md](../04-configuration/ALL_VMIDS_ENDPOINTS.md) but are not modeled as `services[]` entries in `proxmox-operational-template.json`. They are **manual supplements**, not generator-backed source of truth. + +| VMID | Hostname | IPv4 | IP note | Node type | Runtime state | Entity owner | Region | Preferred host | Validator / signing | Security tier | +|------|----------|------|---------|-----------|---------------|--------------|--------|----------------|---------------------|---------------| +| 106 | redis-rpc-translator | 192.168.11.110 | manual supplement | RPC translator (Redis) | manual supplement | TBD | TBD | r630-01 (per ALL_VMIDS) | N/A | DMZ / RPC exposure | +| 107 | web3signer-rpc-translator | 192.168.11.111 | manual supplement | RPC translator (Web3Signer) | manual supplement | TBD | TBD | r630-01 | N/A | DMZ / RPC exposure | +| 108 | vault-rpc-translator | 192.168.11.112 | manual supplement | RPC translator (Vault) | manual supplement | TBD | TBD | r630-01 | N/A | management / secrets | + +## Host-level services (no VMID) + +| Name | Location | Node type | Notes | +|------|----------|-----------|-------| +| CCIP relay | r630-01 host `/opt/smom-dbis-138/services/relay` | Cross-chain relay | Uses RPC (e.g. VMID 2201); see [NETWORK_CONFIGURATION_MASTER.md](../11-references/NETWORK_CONFIGURATION_MASTER.md), [docs/07-ccip/](../07-ccip/). | + +## Related + +- [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) +- [CHAIN138_CANONICAL_NETWORK_ROLES_VALIDATORS_SENTRY_AND_RPC.md](CHAIN138_CANONICAL_NETWORK_ROLES_VALIDATORS_SENTRY_AND_RPC.md) +- [VMID_ALLOCATION_FINAL.md](VMID_ALLOCATION_FINAL.md) + diff --git a/docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md b/docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md new file mode 100644 index 0000000..1826fc0 --- /dev/null +++ b/docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md @@ -0,0 +1,69 @@ +# DBIS Phase 2 — Proxmox sovereignization roadmap + +**Last updated:** 2026-03-28 +**Purpose:** Close the gap between **today’s** Proxmox footprint (2–3 active cluster nodes, ZFS/LVM-backed guests, VLAN 11 LAN) and the **target** in [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) Sections 4–5 and 8 (multi-node HA, Ceph-backed storage, stronger segmentation, standardized templates). + +**Current ground truth:** [PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md](../03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md), [config/proxmox-operational-template.json](../../config/proxmox-operational-template.json), [STORAGE_GROWTH_AND_HEALTH.md](../04-configuration/STORAGE_GROWTH_AND_HEALTH.md). + +--- + +## Current state (summary) + +| Area | As deployed (typical) | Master plan target | +|------|----------------------|-------------------| +| Cluster | Corosync cluster **h** on ml110 + r630-01 + r630-02 (ml110 **may** be repurposed — verify Phase 1) | 3+ control-oriented nodes, odd quorum, HA services | +| Storage | Local ZFS / LVM thin pools per host | Ceph OSD tier + pools for VM disks and/or RBD | +| Network | Primary **192.168.11.0/24**, VLAN 11, UDM Pro edge, NPMplus ingress | Additional VLANs: storage replication, validator-only, identity, explicit DMZ mapping | +| Workloads | Chain 138 Besu validators/RPC, Hyperledger CTs, apps — see [DBIS_NODE_ROLE_MATRIX.md](DBIS_NODE_ROLE_MATRIX.md) | Same roles, **template-standardized** provisioning | + +--- + +## Milestone 1 — Cluster quorum and fleet expansion + +- Bring **r630-03+** online per [R630_13_NODE_DOD_HA_MASTER_PLAN.md](R630_13_NODE_DOD_HA_MASTER_PLAN.md) and [11-references/13_NODE_AND_ASSETS_BRING_ONLINE_CHECKLIST.md](../11-references/13_NODE_AND_ASSETS_BRING_ONLINE_CHECKLIST.md). +- Maintain **odd** node count for Corosync quorum; use qdevice if temporarily even-count during ml110 migration ([UDM_PRO_PROXMOX_CLUSTER.md](../04-configuration/UDM_PRO_PROXMOX_CLUSTER.md)). + +--- + +## Milestone 2 — ML110 migration / WAN aggregator + +- **Before** repurposing ml110 to OPNsense/pfSense ([ML110_OPNSENSE_PFSENSE_WAN_AGGREGATOR.md](../11-references/ML110_OPNSENSE_PFSENSE_WAN_AGGREGATOR.md)): migrate all remaining CT/VM to R630s ([NETWORK_CONFIGURATION_MASTER.md](../11-references/NETWORK_CONFIGURATION_MASTER.md)). +- Re-document **physical inventory** row for `.10` after cutover ([PHYSICAL_HARDWARE_INVENTORY.md](PHYSICAL_HARDWARE_INVENTORY.md)). + +--- + +## Milestone 3 — Ceph introduction (decision + prerequisites) + +- **Decision record:** whether Ceph replaces or complements ZFS/LVM for new workloads; minimum network (10G storage net, jumbo frames if used), disk layout, and JBOD attachment per [HARDWARE_INVENTORY_MASTER.md](../11-references/HARDWARE_INVENTORY_MASTER.md). +- Pilot: non-production pool → migrate one test CT → expand OSD count. + +--- + +## Milestone 4 — Network segmentation (incremental) + +Map master plan layers to implementable steps: + +1. Dedicated **storage replication** VLAN (Ceph backhaul or ZFS sync). +2. **Validator / P2P** constraints (firewall rules between sentry and RPC tiers — align [CHAIN138_CANONICAL_NETWORK_ROLES_VALIDATORS_SENTRY_AND_RPC.md](CHAIN138_CANONICAL_NETWORK_ROLES_VALIDATORS_SENTRY_AND_RPC.md)). +3. **Identity / Indy** tier isolation when multi-entity governance requires it. + +--- + +## Milestone 5 — VM / CT templates (Section 7 of master plan) + +- Align [PROXMOX_VM_CREATION_RUNBOOK.md](../03-deployment/PROXMOX_VM_CREATION_RUNBOOK.md) with template types: Identity (Indy/Aries), Settlement (Besu), Institutional (Fabric), Workflow (FireFly), Observability (Explorer/monitoring). +- Encode **preferred_node** and sizing in [DBIS_NODE_ROLE_MATRIX.md](DBIS_NODE_ROLE_MATRIX.md) and sync [proxmox-operational-template.json](../../config/proxmox-operational-template.json). + +--- + +## Milestone 6 — Backup and DR alignment (master plan Sections 8, 16) + +- Hourly/daily snapshot policy per guest tier; cross-site replication targets (RPO/RTO) documented outside this file when available. +- Reference: existing backup scripts for NPMplus and operator checklist. + +--- + +## Related + +- [PHASE1_DISCOVERY_RUNBOOK.md](../03-deployment/PHASE1_DISCOVERY_RUNBOOK.md) +- [DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md](../03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md) diff --git a/docs/02-architecture/PHYSICAL_HARDWARE_INVENTORY.md b/docs/02-architecture/PHYSICAL_HARDWARE_INVENTORY.md index c9608e3..f2ebfa0 100644 --- a/docs/02-architecture/PHYSICAL_HARDWARE_INVENTORY.md +++ b/docs/02-architecture/PHYSICAL_HARDWARE_INVENTORY.md @@ -1,7 +1,7 @@ # Physical Hardware Inventory -**Last Updated:** 2026-02-13 -**Document Version:** 1.1 +**Last Updated:** 2026-03-28 +**Document Version:** 1.2 **Status:** Active Documentation --- @@ -14,12 +14,12 @@ This document is the placeholder for the physical hardware inventory (hosts, IPs | Host | IP | Role | NICs | |------|-----|------|------| -| ml110 | 192.168.11.10 | Proxmox, Besu nodes | 2× Broadcom BCM5717 1GbE | -| r630-01 | 192.168.11.11 | Infrastructure, RPC | 4× Broadcom BCM5720 1GbE | -| r630-02 | 192.168.11.12 | Firefly, NPMplus secondary | 4× Broadcom BCM57800 1/10GbE | +| ml110 | 192.168.11.10 | **Transitional:** historically Proxmox + Besu/sentry/ML workloads; **target** is OPNsense/pfSense WAN aggregator between cable modems and dual UDM Pro — see [NETWORK_CONFIGURATION_MASTER.md](../11-references/NETWORK_CONFIGURATION_MASTER.md). Confirm live role with `pvecm status` / `pct list` on `.10` (Phase 1: `scripts/verify/run-phase1-discovery.sh`). | 2× Broadcom BCM5717 1GbE | +| r630-01 | 192.168.11.11 | Infrastructure, Chain 138 RPC, Sankofa/Order, CCIP relay host | 4× Broadcom BCM5720 1GbE | +| r630-02 | 192.168.11.12 | FireFly, Fabric/Indy/Cacti, NPMplus instances, MIM4U, Mifos | 4× Broadcom BCM57800 1/10GbE | | UDM Pro (edge) | 76.53.10.34 | Edge router | — | -**See:** [PROXMOX_HOSTS_COMPLETE_HARDWARE_CONFIG.md](PROXMOX_HOSTS_COMPLETE_HARDWARE_CONFIG.md), [NETWORK_CONFIGURATION_MASTER.md](../11-references/NETWORK_CONFIGURATION_MASTER.md), [NETWORK_ARCHITECTURE.md](NETWORK_ARCHITECTURE.md), [VMID_ALLOCATION_FINAL.md](VMID_ALLOCATION_FINAL.md). +**See:** [PROXMOX_HOSTS_COMPLETE_HARDWARE_CONFIG.md](PROXMOX_HOSTS_COMPLETE_HARDWARE_CONFIG.md), [NETWORK_CONFIGURATION_MASTER.md](../11-references/NETWORK_CONFIGURATION_MASTER.md), [NETWORK_ARCHITECTURE.md](NETWORK_ARCHITECTURE.md), [VMID_ALLOCATION_FINAL.md](VMID_ALLOCATION_FINAL.md), [DBIS_NODE_ROLE_MATRIX.md](DBIS_NODE_ROLE_MATRIX.md). --- diff --git a/docs/03-deployment/CALIPER_CHAIN138_PERF_HOOK.md b/docs/03-deployment/CALIPER_CHAIN138_PERF_HOOK.md new file mode 100644 index 0000000..eb100e0 --- /dev/null +++ b/docs/03-deployment/CALIPER_CHAIN138_PERF_HOOK.md @@ -0,0 +1,23 @@ +# Caliper performance hook — Chain 138 (Besu) + +**Last updated:** 2026-03-28 +**Purpose:** Satisfy [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) Section 14 without vendoring Caliper into this repository. + +## Approach + +1. Use upstream [Hyperledger Caliper](https://github.com/hyperledger/caliper) (npm package `/@hyperledger/caliper-cli`). +2. Create a **separate** working directory (or CI job) with: + - `networkconfig.json` pointing `url` to Chain 138 HTTP RPC (prefer an isolated load-test node, not production public RPC). + - `benchmarks/` with a minimal `read` workload (`eth_blockNumber`, `eth_getBlockByNumber`) before write-heavy contracts. +3. Run: `npx caliper launch manager --caliper-workspace . --caliper-networkconfig networkconfig.json --caliper-benchconfig benchmarks/config.yaml` +4. Archive results (HTML/JSON) next to Phase 1 discovery reports if desired: `reports/phase1-discovery/` or `reports/caliper/`. + +## Safety + +- Use **low** transaction rates first; Besu validators and RPC tier are production assets. +- Do not point Caliper at **validator** JSON-RPC ports; use **RPC tier** only. +- Align gas and chain ID with `smom-dbis-138/.env` and [DEPLOYMENT_ORDER_OF_OPERATIONS.md](DEPLOYMENT_ORDER_OF_OPERATIONS.md). + +## Wrapper + +`bash scripts/verify/print-caliper-chain138-stub.sh` prints this path and suggested env vars (no network I/O). diff --git a/docs/03-deployment/DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md b/docs/03-deployment/DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md new file mode 100644 index 0000000..37ca6e0 --- /dev/null +++ b/docs/03-deployment/DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md @@ -0,0 +1,61 @@ +# DBIS HYBX Sidecar Boundary Matrix + +**Last updated:** 2026-03-28 +**Purpose:** Define the current boundary, role, and likely RTGS relevance of the HYBX sidecar repositories currently available in the local workspace. This is a repo-backed companion to the RTGS E2E requirements matrix. + +## Interpretation + +- `Available locally` means the repository exists in `/home/intlc/projects/HYBX_Sidecars`. +- `RTGS relevance` means whether the sidecar is likely part of the initial production RTGS slice, not whether it is interesting or strategically useful. +- `Boundary frozen` means the sidecar has a sufficiently clear place in the RTGS architecture to be used in implementation planning. + +## Matrix + +| Sidecar | Local repo state | Core purpose | Key internal modules / evidence | RTGS relevance | Boundary frozen? | Notes | +|---------|------------------|--------------|----------------------------------|----------------|------------------|-------| +| `mifos-fineract-sidecar` | Available locally | Compliance and settlement sidecar for Mifos/Fineract | `scsm-api`, `scsm-gateway`, `scsm-compliance`, `scsm-posting`, `scsm-fineract`, `scsm-settlement`, `scsm-reconciliation`, `scsm-audit`, `scsm-events`, `scsm-observability`, `scsm-app` | **High** | **Partial** | This is the strongest candidate for the canonical OMNL/HYBX RTGS sidecar because it already models compliance, posting, settlement, reconciliation, audit, and Fineract integration. | +| `mt103-hardcopy-sidecar` | Available locally | MT-103 hardcopy ingest and deposit-envelope correlation | Go service with document/deposit/payload/submit flows | **Medium** | Partial | Useful for evidence/audit and documentary payment flows, but not necessarily a mandatory first-slice RTGS core dependency. | +| `off-ledger-2-on-ledger-sidecar` | Available locally | XAU-collateralized off-ledger to on-ledger conversion | Collateral registry, orchestrator, ledger adapter, API plan | **High** | Partial | Strong candidate for the bridge between off-ledger payment events and on-ledger liquidity/settlement on Chain 138. | +| `securitization-engine-sidecar` | Available locally | Regulatory accounting and securitization engine | Asset classification, risk/capital, accounting, securitization, reporting | **Medium** | Partial | Important for structured products, capital treatment, and reporting, but likely adjacent to core RTGS rather than in the narrowest first production slice. | +| `card-networks-sidecar` | Available locally | Card auth, clearing, settlement, disputes | `cardnet-auth`, `cardnet-clearing`, `cardnet-fineract`, `cardnet-settlement`, `cardnet-reconciliation`, `cardnet-posting`, `cardnet-audit` | **Medium** | Partial | Highly relevant if card-network settlement is part of the DBIS/HYBX program; otherwise a later rail-specific extension. | +| `server-funds-sidecar` | Available locally | Multi-rail transfers and settlement events | `funds-api`, `funds-transfers`, `funds-settlement`, `funds-reconciliation`, `funds-posting`, `funds-fineract` | **High** | Partial | Strong candidate for server-to-server treasury/funds movement and may be central if the RTGS program uses server-funds orchestration. | +| `securities-sidecar` | Available locally | Securities instruction, settlement, and reconciliation | `securities-instruments`, `securities-instructions`, `securities-settlement`, `securities-reconciliation`, `securities-posting` | **Low/Medium** | Planned | More naturally a securities-settlement extension than a mandatory first RTGS slice. | +| `flash-loan-xau-sidecar` | Available locally | Atomic XAU / LiXAU flash-loan flows | `xau-atomic`, `xau-settlement`, `xau-reconciliation`, `xau-posting` | **Low/Medium** | Planned | Valuable for specialized liquidity/XAU flows, but not required for the narrowest RTGS baseline. | + +## Recommended first production slice + +For the narrowest credible RTGS implementation, the strongest initial sidecar candidates are: + +1. `mifos-fineract-sidecar` +2. `server-funds-sidecar` +3. `off-ledger-2-on-ledger-sidecar` + +Those three cover the most direct path across: + +- Fineract integration +- compliance / posting / settlement / reconciliation +- treasury/server-funds orchestration +- off-ledger to on-ledger conversion + +## Recommended later-phase sidecars + +- `mt103-hardcopy-sidecar` +- `card-networks-sidecar` +- `securitization-engine-sidecar` +- `securities-sidecar` +- `flash-loan-xau-sidecar` + +These should be added only when the RTGS program confirms those rails or reporting models are actually in scope. + +## Boundary decisions still needed + +1. Which sidecar owns the canonical settlement orchestration record? +2. Which sidecar owns final posting responsibility versus suggested-entry generation? +3. Which sidecar emits the canonical event consumed by FireFly or on-chain settlement? +4. Which sidecar is system-of-record versus adapter versus evidence generator? + +## Related artifacts + +- [DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [GITEA_HYBX_ORGANIZATION_AND_REPOS.md](../11-references/GITEA_HYBX_ORGANIZATION_AND_REPOS.md) +- [HYBX_BATCH_001_OPERATOR_CHECKLIST.md](../04-configuration/mifos-omnl-central-bank/HYBX_BATCH_001_OPERATOR_CHECKLIST.md) diff --git a/docs/03-deployment/DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md b/docs/03-deployment/DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md new file mode 100644 index 0000000..e5a8852 --- /dev/null +++ b/docs/03-deployment/DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md @@ -0,0 +1,98 @@ +# DBIS Hyperledger Identity Stack Decision + +**Last updated:** 2026-03-28 +**Purpose:** Make the Aries / AnonCreds / Ursa decision path explicit for the DBIS RTGS program so these layers do not remain vague “maybe required” items. + +## Current conclusion + +For the current DBIS RTGS program, the identity stack is **not yet frozen** beyond the placeholder Indy inventory. The repo and live environment do **not** currently prove: + +- a deployed Aries agent layer +- a deployed AnonCreds issuance / verification flow +- an explicit Ursa runtime dependency that operators must manage directly + +## Recommended decision framework + +### Option A — Minimal first production slice + +Use: + +- Chain 138 / Besu +- FireFly primary +- OMNL / Fineract +- selected HYBX sidecars + +Do **not** require Aries / AnonCreds / Ursa in the first production slice. + +Use this option if: + +- the initial RTGS program does not require decentralized credential exchange +- institution identity and compliance are satisfied through existing banking / regulatory processes +- the team wants to avoid holding up settlement on unresolved identity-stack deployment work + +### Option B — Identity-enhanced RTGS slice + +Include: + +- Aries agents +- AnonCreds issuer / holder / verifier roles +- Ursa-backed cryptographic path if required by the selected stack + +Use this option if: + +- the RTGS design requires verifiable credentials as a first-class runtime dependency +- participant admission, authorization, or compliance checks depend on decentralized identity flows +- DBIS wants credential verification to be part of the operational settlement path, not only a future capability + +## Recommended default + +**Recommended default:** Option A for the first production slice. + +Reason: + +- Aries / AnonCreds / Ursa are not currently deployed or proven in this environment. +- Requiring them now would expand the critical path materially. +- The current gating problems are still in banking-rail orchestration and interoperability, not identity-agent runtime. + +## What must be decided if Option B is chosen + +### Aries + +- agent placement +- wallet / DID model +- mediation / routing model +- protocol set used in production +- operational ownership and observability + +### AnonCreds + +- issuer role +- holder role +- verifier role +- schema and credential-definition lifecycle +- revocation model + +### Ursa + +- whether it is an explicit operator-managed dependency +- whether it is only an indirect library/runtime concern +- what cryptographic controls or attestations it adds to the program + +## Production-gate rule + +- If Option A is chosen: + - Aries / AnonCreds / Ursa are marked `out of scope for first production slice` + - they do not block first RTGS activation +- If Option B is chosen: + - none of them can stay as planning-only items + - they must have: + - deployment runbooks + - runtime health checks + - ownership + - end-to-end validation + +## Related artifacts + +- [DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) +- [TODO_TASK_LIST_MASTER.md](../00-meta/TODO_TASK_LIST_MASTER.md) diff --git a/docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md b/docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md new file mode 100644 index 0000000..fd42e60 --- /dev/null +++ b/docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md @@ -0,0 +1,68 @@ +# DBIS Hyperledger Runtime Status + +**Last Reviewed:** 2026-03-28 +**Purpose:** Concise app-level status table for the non-Besu Hyperledger footprint currently hosted on Proxmox. This complements the VMID inventory and discovery runbooks by recording what was actually verified inside the running containers. + +## Scope + +This document summarizes the latest operator verification for: + +- FireFly CTs: `6200`, `6201` +- Fabric CTs: `6000`, `6001`, `6002` +- Indy CTs: `6400`, `6401`, `6402` + +The checks were based on: + +- `pct status` +- in-container process checks +- in-container listener checks +- FireFly API / Postgres / IPFS checks where applicable + +## Current status table + +| VMID | Service family | CT status | App-level status | Listening ports / probe | Notes | +|------|----------------|-----------|------------------|--------------------------|-------| +| `6200` | FireFly primary | Running | Healthy minimal local gateway | `5000/tcp` FireFly API, `5432/tcp` Postgres, `5001/tcp` IPFS | `firefly-core` restored on `ghcr.io/hyperledger/firefly:v1.2.0`; `GET /api/v1/status` returned `200`; Postgres `pg_isready` passed; IPFS version probe passed | +| `6201` | FireFly secondary | Stopped | Formally retired until rebuilt | None verified | CT exists in inventory, but the rootfs is effectively empty and no valid FireFly deployment footprint was found. Treat this as retired / standby metadata only until it is intentionally rebuilt as a real secondary node. | +| `6000` | Fabric primary | Stopped | Reserved placeholder | None active | App-native checks found no active Fabric peer/orderer/couchdb processes, no expected listeners such as `7050` / `7051`, and no meaningful Fabric payload under `/opt`, `/etc`, or `/var`. The CT has now been stopped and retained only as a reserved placeholder. | +| `6001` | Fabric secondary | Stopped | Reserved placeholder | None active | Same disposition as `6000`: no proven Fabric application payload or listeners, now stopped and reserved only as placeholder inventory. | +| `6002` | Fabric tertiary | Stopped | Reserved placeholder | None active | Same disposition as `6000`: no proven Fabric application payload or listeners, now stopped and reserved only as placeholder inventory. | +| `6400` | Indy primary | Stopped | Reserved placeholder | None active | App-native checks found no active Indy-related processes, no expected listeners such as `9701`-`9708`, and no meaningful Indy payload under `/opt`, `/etc`, or `/var`. The CT has now been stopped and retained only as a reserved placeholder. | +| `6401` | Indy secondary | Stopped | Reserved placeholder | None active | Same disposition as `6400`: no proven Indy application payload or listeners, now stopped and reserved only as placeholder inventory. | +| `6402` | Indy tertiary | Stopped | Reserved placeholder | None active | Same disposition as `6400`: no proven Indy application payload or listeners, now stopped and reserved only as placeholder inventory. | + +## Interpretation + +### Confirmed working now + +- FireFly primary (`6200`) is restored enough to provide a working local FireFly API backed by Postgres and IPFS. + +### Present only as reserved placeholders right now + +- Fabric CTs (`6000`-`6002`) +- Indy CTs (`6400`-`6402`) + +These should be described as reserved placeholder inventory only, not as active Fabric or Indy application nodes. Current app-native validation found no meaningful service payload, processes, or expected listeners inside those CTs, and they have now been stopped to match that reality. + +### Not currently active + +- FireFly secondary (`6201`) should be treated as formally retired / standby metadata unless it is intentionally rebuilt and verified. + +## Operational follow-up + +1. Keep `6200` under observation and preserve its working config/image path. +2. Do not force `6201` online unless its intended role and deployment assets are re-established from scratch. +3. For Fabric and Indy, the next step is no longer generic validation. It is either: + - deploy real app payloads onto these reserved CTs and verify them, or + - leave them stopped and classified as reserved placeholders rather than active DLT workloads. +4. Any governance or architecture document should distinguish: + - `deployed and app-healthy` + - `container present only` + - `planned / aspirational` + +## Related artifacts + +- [docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md](../02-architecture/DBIS_NODE_ROLE_MATRIX.md) +- [docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md](PHASE1_DISCOVERY_RUNBOOK.md) +- [docs/03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md](DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md) +- [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) diff --git a/docs/03-deployment/DBIS_MOJALOOP_INTEGRATION_STATUS.md b/docs/03-deployment/DBIS_MOJALOOP_INTEGRATION_STATUS.md new file mode 100644 index 0000000..f8fc17f --- /dev/null +++ b/docs/03-deployment/DBIS_MOJALOOP_INTEGRATION_STATUS.md @@ -0,0 +1,71 @@ +# DBIS Mojaloop Integration Status + +**Last updated:** 2026-03-28 +**Purpose:** Record the current status of Mojaloop in the DBIS RTGS program so that the architecture does not imply a live switch integration that has not yet been evidenced in the repository or current environment. + +## Current conclusion + +**Mojaloop is in scope as a target payments interoperability layer, but it is not yet evidenced as a live, repo-backed DBIS integration path in this workspace.** + +## What is currently available + +- DBIS / OMNL / Fineract operator scripts and documentation +- HYBX sidecar repositories +- Chain 138 settlement baseline +- RTGS planning artifacts that mention Mojaloop as a required integration category + +## What is not yet evidenced + +- live Mojaloop switch endpoint URLs +- live auth / credential model +- callback contract for transfer state changes +- quote / transfer / settlement-window payload mapping +- a repo-backed Mojaloop runbook in this workspace +- a confirmed mapping from Mojaloop transfer lifecycle to: + - Fineract posting + - HYBX sidecar orchestration + - Chain 138 settlement confirmation + +## Required inputs before implementation + +1. Exact Mojaloop environment(s) available to HYBX / DBIS +2. Endpoint list: + - quotes + - transfers + - parties + - callbacks / notifications + - settlement or hub-side reporting endpoints +3. Auth model: + - mTLS + - bearer/token + - header and signature requirements +4. Canonical settlement semantics: + - prefunded or net settlement + - window / batch behavior + - reversal rules +5. Event ownership: + - which system is source of truth for transfer state + - which system triggers on-chain settlement + +## Decision rule + +Until the above is available, Mojaloop should be treated as: + +- `Planned` in the RTGS matrix +- `not yet a production blocker for the narrowest non-Mojaloop RTGS slice` +- `a production blocker for any RTGS claim that explicitly includes Mojaloop interoperability` + +## Next implementation steps + +1. Obtain the exact Mojaloop endpoint and auth contract currently available to HYBX. +2. Create a DBIS/HYBX Mojaloop integration runbook in this repo. +3. Freeze the message mapping between Mojaloop events and: + - Fineract / OMNL journal events + - HYBX sidecar events + - Chain 138 settlement events +4. Add a testable production gate row once a real endpoint contract exists. + +## Related artifacts + +- [DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [TODO_TASK_LIST_MASTER.md](../00-meta/TODO_TASK_LIST_MASTER.md) diff --git a/docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md b/docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md new file mode 100644 index 0000000..608ee9a --- /dev/null +++ b/docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md @@ -0,0 +1,433 @@ +# DBIS OMNL → Indonesia / BNI E2E Integration Blueprint + +**Last updated:** 2026-03-29 +**Purpose:** Detailed end-to-end integration blueprint for moving value from OMNL as central-bank / settlement authority to Indonesian beneficiary banks, with FX, correspondent banking, messaging, reconciliation, and optional Chain 138 settlement augmentation. + +## 1. Scope + +This blueprint covers the full production path for: + +- OMNL as settlement and banking ledger authority +- Indonesian beneficiary institutions such as Bank Kanaya and BNI-connected flows +- FX conversion and revaluation +- domestic and correspondent-bank settlement +- HYBX sidecar orchestration +- optional Chain 138 anchoring / settlement confirmation + +This document is intentionally broader than the current deployed slice. It defines everything required to make the system fully production-complete. + +## 2. Participant model + +### Mandatory participant classes + +1. **OMNL / central bank rail** + - source of settlement authority + - operator of Fineract ledger + - owner of reserve, treasury, and reporting policy + +2. **HYBX / sidecar integration layer** + - compliance + - posting + - settlement orchestration + - reconciliation + - audit package generation + +3. **Indonesian beneficiary bank** + - Bank Kanaya in current repo-backed materials + - BNI or BNI-connected domestic banking path for broader production rollout + +4. **Global correspondent / liquidity banks** + - USD / EUR / multicurrency correspondents + - nostro / vostro counterparties + - statement and confirmation providers + +5. **Chain 138 settlement lane** + - optional but strategically important if on-ledger finality is part of the regulated operating model + +6. **Depository / custody / liquidity-control layers** + - depository / CSD role for asset-register and settlement-touch scenarios + - global custodian role for safekeeping, statements, and asset servicing + - FX pricing / dealing engine for rate ownership and booking policy + - liquidity pooling and aggregation engine plus source adapters for funding decisions + +## 3. Full end-to-end stages + +### Stage 0 — Static setup + +Required before live value movement: + +- OMNL tenant and credentials frozen +- participant offices created +- beneficiary offices mapped +- GL chart complete +- FX reserve / revaluation accounts complete +- FX pricing hierarchy and quote-locking policy frozen +- liquidity source inventory and prioritization policy frozen +- depository / custody operating model frozen for any in-scope asset-backed or safekeeping flow +- payment types and maker-checker policy frozen +- sidecar-to-Fineract auth contract frozen +- external bank routing matrix frozen +- regulatory package template frozen + +### Stage 1 — Payment or settlement initiation + +Possible initiators: + +- OMNL treasury operator +- HYBX sidecar +- external banking system +- Mojaloop switch if later included +- ISO 20022 or MT gateway + +Required artifacts: + +- instruction id +- end-to-end id +- message id +- correlation id +- counterparty and beneficiary identifiers +- amount, currency, value date +- purpose / regulatory narrative + +### Stage 2 — Compliance and sanctions controls + +Required checks: + +- KYC / KYB status +- sanctions and watchlist screening +- limit checks +- liquidity and prefunding check +- source-of-liquidity selection and approval +- market conduct / rate authorization check +- jurisdictional eligibility + +Required outputs: + +- compliance decision +- reason codes +- approval / rejection metadata +- audit payload hash + +### Stage 3 — FX pricing and trade capture + +Required capabilities: + +- direct or triangulated rate determination +- XAU-based triangulation where required by current OMNL methodology +- rate-source reference +- trade timestamp and value date +- spread / fee logic +- approved trader / operator identity +- pricing-engine or dealer ownership of the approved quote + +Required records: + +- source currency +- destination currency +- quoted amount +- settlement amount +- rate +- fee / spread component +- realized vs unrealized P&L handling +- quote id and liquidity-decision reference + +### Stage 4 — OMNL accounting and posting + +Required journal-entry families: + +1. source debit +2. beneficiary or settlement credit +3. due-to / due-from interoffice leg where applicable +4. FX reserve / treasury leg where applicable +5. realized FX gain/loss leg where applicable +6. accrued fee leg where applicable + +Mandatory OMNL data points: + +- `officeId` +- `glAccountId` +- `transactionDate` +- `currencyCode` +- `comments` +- transaction reference + +### Stage 5 — External banking message exchange + +#### 5.1 Domestic Indonesia path + +For Bank Kanaya / BNI-style domestic beneficiary credit: + +- payment or settlement instruction generated +- local beneficiary validation completed +- beneficiary account / institution reference resolved +- domestic reporting obligations attached + +#### 5.2 Correspondent-bank path + +For global-bank and cross-border settlement: + +- route to correspondent or settlement bank selected +- nostro / vostro account chosen +- prefunding / cover logic confirmed +- message dispatched and acknowledged +- custody / safekeeping instructions attached where the flow involves held assets or global-custodian reporting + +### Stage 6 — Funds movement and settlement confirmation + +Required evidence: + +- bank acceptance / status message +- credit confirmation or rejection +- statement extract or advice +- confirmation of beneficiary-bank receipt +- unresolved exception queue if delayed +- custody statement / servicing reference where applicable + +### Stage 7 — Reconciliation and package generation + +Required reconciliations: + +1. sidecar request vs OMNL journal +2. OMNL journal vs office balances +3. FX trade blotter vs accounting postings +4. external bank confirmations vs OMNL settlement state +5. on-chain event vs OMNL event where chain leg exists +6. asset register / custody statement vs settlement state where depository flows apply +7. liquidity decision vs selected funding source vs actual settlement usage + +Required evidence outputs: + +- transaction package snapshot +- journal detail +- recent journal entries +- computed balances +- payload hash files +- ISO / SWIFT message archive references +- prudential and BI/OJK crosswalks + +## 4. Required message families + +### ISO 20022 + +| Message | Role in flow | +|--------|---------------| +| `pain.001` | customer or enterprise initiation | +| `pacs.008` | FI-to-FI customer credit transfer | +| `pacs.009` | interbank settlement | +| `pacs.002` | status | +| `camt.052` | intraday report | +| `camt.053` | statement | +| `camt.054` | debit/credit notification | + +### SWIFT Fin / legacy + +| Message | Role in flow | +|--------|---------------| +| `MT103` | customer transfer | +| `MT202` / `MT202 COV` | bank transfer / cover | +| `MT910` | credit confirmation where needed | +| `MT950` | statement where legacy paths require it | + +### Internal / platform-specific + +| Message | Role | +|--------|------| +| sidecar transfer envelope | canonical business payload | +| OMNL journal response | accounting confirmation | +| settlement reference manifest | cross-system correlation | +| chain settlement event | optional on-ledger finality evidence | + +## 5. BNI-specific and Indonesia-specific requirements + +### What must exist for a BNI-connected production path + +1. **BNI counterparty profile** + - institution identifiers + - beneficiary validation rules + - account structure + - allowed currency pairs + - reporting obligations + +2. **Domestic payment / settlement route definition** + - whether BNI is: + - direct beneficiary bank + - intermediary settlement bank + - correspondent / nostro bank + - final message set per route + +3. **Indonesia regulatory obligations** + - BI reporting crosswalk + - OJK prudential bridge + - FX reporting obligations + - large exposure / related-party handling + - settlement finality memo + +4. **Operational controls** + - cut-off times + - business dates / value dates + - holiday calendars + - exception and return handling + - maker-checker approvals + +### Current state + +- Bank Kanaya path is documented in repo-backed material. +- BNI-specific live endpoint, auth, and correspondent contract are not yet evidenced in this workspace. +- Therefore the BNI lane is a required integration blueprint item, not a completed deployment. + +## 6. Required funds-movement model + +### 6.1 OMNL-only book transfer + +Used when both parties settle on OMNL books. + +Required: +- interoffice mapping +- due-to / due-from treatment +- no external correspondent required + +### 6.2 OMNL to domestic beneficiary bank + +Required: +- beneficiary institution mapping +- outbound settlement message +- inbound confirmation +- domestic regulatory reference + +### 6.3 OMNL to global correspondent / global bank + +Required: +- nostro selection +- cover / prefunding policy +- message dispatch +- statement reconciliation +- confirmation of receipt / finality + +## 7. Required sidecar integrations + +### `mifos-fineract-sidecar` + +Required responsibilities: +- canonical transfer ingest +- compliance check invocation +- OMNL posting +- transaction status tracking +- audit payload preservation + +### `server-funds-sidecar` + +Required responsibilities: +- treasury approval and release +- limit checks +- prefunding and source-of-funds orchestration +- status / approval / exception workflow +- handoff to liquidity pooling and source-adapter decisions + +### `off-ledger-2-on-ledger-sidecar` + +Required responsibilities: +- translate approved off-ledger event into on-ledger settlement action +- attach rates, conversion basis, and settlement refs +- record chain transaction linkage +- preserve depository / custody / liquidity references where those roles are in scope + +### Additional required control layers + +Required responsibilities: +- FX pricing / dealing engine owns quote generation or approved rate ingest +- liquidity pooling and aggregation engine owns funding-source selection +- liquidity source adapters normalize bank-line, correspondent, internal-pool, and optional on-chain liquidity access +- depository / CSD layer owns asset-register and settlement-touch behavior for in-scope instruments +- global custodian layer owns safekeeping, statements, and asset-servicing obligations + +### Optional or later + +- `mt103-hardcopy-sidecar` +- `card-networks-sidecar` +- `securitization-engine-sidecar` +- Mojaloop connector + +## 8. Required Chain 138 integration + +If on-ledger settlement is in scope, the following must be true: + +1. settlement contract path is frozen +2. instrument selection is frozen +3. reserve / oracle dependencies are frozen +4. sidecar correlation id maps to chain tx hash +5. evidence package includes chain settlement proof +6. depository / CSD touch point is frozen where asset-backed flows exist +7. custody / safekeeping statement linkage is frozen where custody applies +8. liquidity-source decision reference is preserved in the evidence package + +## 9. Reconciliation requirements + +### Mandatory reconciliation layers + +1. **Accounting reconciliation** + - OMNL JEs vs intended posting matrix + +2. **FX reconciliation** + - rate source vs booked rate + - realized / unrealized P&L correctness + +3. **Bank reconciliation** + - statement / advice vs OMNL settlement state + +4. **Operational reconciliation** + - sidecar correlation IDs vs journal refs vs package refs + +5. **On-ledger reconciliation** + - chain tx vs off-ledger settlement event + +6. **Custody / depository reconciliation** + - asset register vs custody statement vs settlement state + +7. **Liquidity reconciliation** + - selected funding source vs liquidity decision vs actual settlement usage + +## 10. Full production-complete gate + +The OMNL → Indonesia / BNI → global-bank flow is only fully complete when: + +1. one domestic Indonesia beneficiary flow is live and repeatable +2. one BNI-connected path is live and repeatable +3. one global correspondent-bank flow is live and repeatable +4. FX pricing, accounting, and revaluation are frozen and audited +5. all required ISO/SWIFT messages are archived and correlated +6. reconciliation package is reproducible +7. if chain settlement is in scope, the chain leg is included in the same evidence package + +## 11. Current blockers + +- no live BNI endpoint/auth contract captured in repo-backed state +- no live global correspondent-bank endpoint/auth contract captured in repo-backed state +- treasury / funds sidecar lane not yet validated end to end +- on-ledger settlement leg not yet included in the canonical transaction +- participant / office / treasury model not yet frozen across all counterparties +- depository / custody operating model not yet frozen +- FX pricing engine and liquidity aggregation ownership not yet frozen + +## 12. Execution order + +1. freeze participant / office / GL / nostro-vostro model +2. freeze depository / custody / FX / liquidity-control layers +3. freeze OMNL operator runbook +4. validate `server-funds-sidecar` +5. validate `off-ledger-2-on-ledger-sidecar` +6. acquire and document BNI / correspondent-bank endpoint and auth contracts +7. run one domestic Indonesia beneficiary-bank flow +8. run one correspondent-bank flow +9. add Chain 138 settlement leg if in scope +10. generate and sign the final evidence package + +## Related artifacts + +- [DBIS_RTGS_FX_TRANSACTION_CATALOG.md](DBIS_RTGS_FX_TRANSACTION_CATALOG.md) +- [DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [HYBX_BATCH_001_OPERATOR_CHECKLIST.md](../04-configuration/mifos-omnl-central-bank/HYBX_BATCH_001_OPERATOR_CHECKLIST.md) +- [BANK_KANAYA_OFFICE_RUNBOOK.md](../04-configuration/mifos-omnl-central-bank/BANK_KANAYA_OFFICE_RUNBOOK.md) +- [PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md](../04-configuration/mifos-omnl-central-bank/PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md) +- [FX_AND_VALUATION.md](../04-configuration/mifos-omnl-central-bank/FX_AND_VALUATION.md) +- [INDONESIA_PACKAGE_4_995_EVIDENCE_STANDARD.md](../04-configuration/mifos-omnl-central-bank/INDONESIA_PACKAGE_4_995_EVIDENCE_STANDARD.md) +- [SMART_CONTRACTS_ISO20022_FIN_METHODOLOGY.md](../04-configuration/SMART_CONTRACTS_ISO20022_FIN_METHODOLOGY.md) diff --git a/docs/03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md b/docs/03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md new file mode 100644 index 0000000..cfd399f --- /dev/null +++ b/docs/03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md @@ -0,0 +1,76 @@ +# DBIS Phase 3 — End-to-end production simulation + +**Last updated:** 2026-03-28 +**Purpose:** Operationalize [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) Section 18 (example flow) and Sections 14, 17 as **repeatable liveness and availability checks** — not a single product build or a full business-E2E execution harness. + +**Prerequisites:** LAN access where noted; [DBIS_NODE_ROLE_MATRIX.md](../02-architecture/DBIS_NODE_ROLE_MATRIX.md) for IPs/VMIDs; operator env via `scripts/lib/load-project-env.sh` for on-chain steps. + +--- + +## Section 18 flow → concrete checks + +| Step | Master plan | Verification (repo-aligned) | +|------|-------------|-----------------------------| +| 1 | Identity issued (Indy) | Indy steward / node RPC on VMID **6400** (192.168.11.64); pool genesis tools — **manual** until automated issuer script exists. Current CTs `6400/6401/6402` are present, but app-level Indy listener verification is still pending. | +| 2 | Credential verified (Aries) | Aries agents (if colocated): confirm stack on Indy/FireFly integration path — **TBD** per deployment. | +| 3 | Workflow triggered (FireFly) | FireFly API on **6200** (currently restored as a minimal local gateway profile at `http://192.168.11.35:5000`). VMID **6201** is presently stopped / standby and should not be assumed active. | +| 4 | Settlement executed (Besu) | JSON-RPC `eth_chainId`, `eth_blockNumber`, optional test transaction via `smom-dbis-138` with `RPC_URL_138=http://192.168.11.211:8545`. PMM/oracle: [ORACLE_AND_KEEPER_CHAIN138.md](../../smom-dbis-138/docs/integration/ORACLE_AND_KEEPER_CHAIN138.md). | +| 5 | Cross-chain sync (Cacti) | Cacti = network monitoring here (VMID **5200**); **Hyperledger Cacti** interoperability is **future/optional** — track separately if deployed. **CCIP:** relay on r630-01 per [CCIP_RELAY_DEPLOYMENT.md](../07-ccip/CCIP_RELAY_DEPLOYMENT.md). | +| 6 | Compliance recorded (Fabric) | Fabric CTs `6000/6001/6002` are present, but current app-level verification has not yet proven active peer / orderer workloads inside those CTs. Treat Fabric business-flow validation as manual until that gap is closed. | +| 7 | Final settlement confirmed | Re-check Besu head on **2101** and **2201**; Blockscout **5000** for tx receipt if applicable. | + +--- + +## Automated wrapper (partial) + +From repo root: + +```bash +bash scripts/verify/run-dbis-phase3-e2e-simulation.sh +``` + +Optional: + +```bash +RUN_CHAIN138_RPC_HEALTH=1 bash scripts/verify/run-dbis-phase3-e2e-simulation.sh +``` + +The script **does not** replace Indy/Fabric business transactions; it proves **liveness** of RPC, optional FireFly HTTP, and prints manual follow-ups. Treat it as a wrapper for infrastructure availability, not as proof that the complete seven-step business flow succeeded. + +--- + +## Performance slice (Section 14 — Caliper) + +Hyperledger Caliper is **not** vendored in this repo. To add benchmarks: + +1. Install Caliper in a throwaway directory or CI image. +2. Point a Besu **SUT** at `http://192.168.11.211:8545` (deploy/core RPC only) or a dedicated load-test RPC. +3. Start with `simple` contract scenarios; record **TPS**, **latency p95**, and **error rate**. + +**Suggested initial thresholds (tune per governance):** + +| Metric | Initial gate (lab) | +|--------|-------------------| +| RPC error rate under steady load | less than 1% for 5 min | +| Block production | no stall > 30s (QBFT) | +| Public RPC `eth_blockNumber` lag vs core | within documented spread ([check-chain138-rpc-health.sh](../../scripts/verify/check-chain138-rpc-health.sh) defaults) | + +Details: [CALIPER_CHAIN138_PERF_HOOK.md](CALIPER_CHAIN138_PERF_HOOK.md). + +--- + +## Production readiness certification (matrix-driven) + +Use [OPERATOR_READY_CHECKLIST.md](../00-meta/OPERATOR_READY_CHECKLIST.md) section **10** plus: + +- Phase 1 report timestamped under `reports/phase1-discovery/`. +- Phase 2 milestones acknowledged (Ceph/segmentation may be partial). +- Node Role Matrix: no critical **TBD** for entity-owned validators without a documented interim owner. + +--- + +## Related + +- [PHASE1_DISCOVERY_RUNBOOK.md](PHASE1_DISCOVERY_RUNBOOK.md) +- [DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md](../02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md) +- [verify-end-to-end-routing.sh](../../scripts/verify/verify-end-to-end-routing.sh) — public/private ingress diff --git a/docs/03-deployment/DBIS_PHASES_1_TO_3_PRODUCTION_GATE.md b/docs/03-deployment/DBIS_PHASES_1_TO_3_PRODUCTION_GATE.md new file mode 100644 index 0000000..8b16e39 --- /dev/null +++ b/docs/03-deployment/DBIS_PHASES_1_TO_3_PRODUCTION_GATE.md @@ -0,0 +1,124 @@ +# DBIS Chain 138 — Phases 1-3 Production Gate + +**Last updated:** 2026-03-28 +**Purpose:** Convert the DBIS master plan into an operational production gate. This document records which Phase 1-3 conditions are currently satisfied, which are partially satisfied, and which still block an honest production declaration. + +## Overall status + +**Current conclusion:** DBIS Chain 138 has a healthy Besu production base and a working Phase 3 liveness slice, but it is **not yet fully production-complete across the broader DBIS Hyperledger stack**. + +### What is genuinely production-capable now + +- Chain 138 Besu validators / sentries / RPC tiers +- Public RPC feature baseline including: + - `eth_chainId` + - `eth_gasPrice` + - `eth_maxPriorityFeePerGas` + - `eth_feeHistory` + - trace methods used by the explorer +- Explorer / Blockscout surfaces +- FireFly primary minimal local gateway (`6200`) restored and serving API health + +### What is not yet proven production-ready + +- FireFly secondary failover footprint (`6201`) is not deployed; it is currently retired / standby until rebuilt +- Fabric peer / orderer workload health inside `6000-6002`; those CTs are now intentionally stopped as reserved placeholders +- Indy validator / node listener health inside `6400-6402`; those CTs are now intentionally stopped as reserved placeholders +- Sovereignized Phase 2 platform baseline: + - Ceph-backed storage + - final VLAN segmentation + - final entity ownership mapping + +## Phase 1 — Reality mapping gate + +### Required conditions + +| Gate | Status | Evidence | +|------|--------|----------| +| Proxmox discovery automation exists | Complete | [scripts/verify/run-phase1-discovery.sh](../../scripts/verify/run-phase1-discovery.sh) | +| Discovery has critical-failure exit semantics | Complete | same script now exits non-zero on critical failures | +| Node-role matrix is machine-regenerated | Complete | [scripts/docs/generate-dbis-node-role-matrix-md.sh](../../scripts/docs/generate-dbis-node-role-matrix-md.sh), [docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md](../02-architecture/DBIS_NODE_ROLE_MATRIX.md) | +| Duplicate-IP planning conflicts are explicitly labeled | Complete | [docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md](../02-architecture/DBIS_NODE_ROLE_MATRIX.md) | +| Hyperledger CT status is verified beyond `pct status` | Complete | [docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md](DBIS_HYPERLEDGER_RUNTIME_STATUS.md) | + +### Phase 1 conclusion + +**Phase 1 is operationally complete** as a discovery and truth-mapping phase. + +## Phase 2 — Sovereignization gate + +### Required conditions + +| Gate | Status | Evidence / blocker | +|------|--------|--------------------| +| Quorum / fleet expansion roadmap exists | Complete | [docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md](../02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md) | +| ML110 migration path documented | Complete | same roadmap + [docs/02-architecture/PHYSICAL_HARDWARE_INVENTORY.md](../02-architecture/PHYSICAL_HARDWARE_INVENTORY.md) | +| Ceph decision and pilot completed | Blocked | roadmap exists, but Ceph is not yet deployed | +| Network segmentation implemented | Blocked | roadmap exists, but full sovereign VLAN segmentation is not yet live | +| Final entity assignment completed | Blocked | still `TBD` in parts of the matrix | +| Template-standardized workload placement completed | Partial | preferred placement exists, but not all live placement is standardized | + +### Phase 2 conclusion + +**Phase 2 is planned but not complete.** The roadmap exists, but sovereignization has not yet been executed to the level required for a full production declaration. + +## Phase 3 — Production simulation gate + +### Required conditions + +| Gate | Status | Evidence / blocker | +|------|--------|--------------------| +| Automated liveness wrapper exists | Complete | [scripts/verify/run-dbis-phase3-e2e-simulation.sh](../../scripts/verify/run-dbis-phase3-e2e-simulation.sh) | +| Besu liveness passes | Complete | direct script output and [scripts/verify/check-chain138-rpc-health.sh](../../scripts/verify/check-chain138-rpc-health.sh) | +| FireFly HTTP liveness passes | Complete | `6200` returns `HTTP 200` on `/api/v1/status` | +| Fabric app-native business flow validation passes | Blocked | Current checks found no active Fabric payload, processes, or listeners; CTs are now intentionally stopped as reserved placeholders | +| Indy app-native business flow validation passes | Blocked | Current checks found no active Indy payload, processes, or listeners; CTs are now intentionally stopped as reserved placeholders | +| Cross-chain / Cacti business flow validation passes | Blocked | not currently proven as deployed live DBIS path | +| Full business E2E has been demonstrated | Blocked | current wrapper is intentionally liveness-only | + +### Phase 3 conclusion + +**Phase 3 is partially complete.** Infrastructure liveness is demonstrated for Besu and FireFly primary, but not full DBIS business E2E. + +## Production blockers + +The following items still prevent a full “DBIS Chain 138 production complete” declaration: + +1. `6201` is not a verified active secondary FireFly node and is currently treated as retired / standby until rebuilt. +2. Fabric `6000-6002` are not active peer/orderer workloads; current evidence showed placeholder CTs only, and they have now been stopped and retained as reserve inventory. +3. Indy `6400-6402` are not active validator workloads; current evidence showed placeholder CTs only, and they have now been stopped and retained as reserve inventory. +4. Phase 2 sovereignization is still roadmap work, not completed platform state. +5. The current Phase 3 wrapper is liveness validation, not end-to-end business certification. + +## What can be declared complete now + +It is accurate to declare: + +- **Chain 138 Besu production baseline:** complete and healthy +- **DBIS Phase 1 discovery / reality-mapping:** complete +- **DBIS Phase 3 liveness wrapper for Besu + FireFly primary:** complete + +It is **not** yet accurate to declare: + +- full DBIS Hyperledger production completion +- full multi-entity sovereignized infrastructure completion +- full end-to-end DBIS business workflow certification + +## Next production-closing actions + +1. Decide whether `6201` is to be rebuilt as a real secondary FireFly node or left retired as a reserve inventory slot. +2. Either deploy real Fabric workloads inside `6000-6002` and validate them, or leave those CTs stopped as reserved placeholders. +3. Either deploy real Indy workloads inside `6400-6402` and validate them, or leave those CTs stopped as reserved placeholders. +4. Execute the first real Phase 2 platform milestone: + - fleet expansion, or + - Ceph pilot, or + - VLAN segmentation tranche +5. Only after those steps, rerun Phase 1 and Phase 3 evidence and update the production gate. + +## Related artifacts + +- [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) +- [docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md](PHASE1_DISCOVERY_RUNBOOK.md) +- [docs/03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md](DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md) +- [docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md](DBIS_HYPERLEDGER_RUNTIME_STATUS.md) +- [docs/02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md](../02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md) diff --git a/docs/03-deployment/DBIS_RTGS_CONTROL_PLANE_DEPLOYMENT_CHECKLIST.md b/docs/03-deployment/DBIS_RTGS_CONTROL_PLANE_DEPLOYMENT_CHECKLIST.md new file mode 100644 index 0000000..fac7eb2 --- /dev/null +++ b/docs/03-deployment/DBIS_RTGS_CONTROL_PLANE_DEPLOYMENT_CHECKLIST.md @@ -0,0 +1,69 @@ +# DBIS RTGS Control Plane Deployment Checklist + +**Last updated:** 2026-03-29 +**Purpose:** Deployment checklist for the next RTGS control-plane services beyond the first-slice sidecars: + +- RTGS orchestrator +- FX pricing / dealing engine +- liquidity pooling and aggregation engine + +This checklist does not claim these services are already built. It exists so the platform can self-deploy them as soon as artifacts are available. + +## 1. Target components + +| Component | Default role | Expected health path | +|-----------|--------------|----------------------| +| `rtgs-orchestrator` | canonical transaction-state owner and cross-system workflow coordinator | `GET /actuator/health` | +| `rtgs-fx-engine` | quote generation / approved-rate ingest / booking references | `GET /actuator/health` | +| `rtgs-liquidity-engine` | funding-source selection, allocation, and adapter coordination | `GET /actuator/health` | + +## 2. Runtime expectations + +- Proxmox target host defaults to `r630-02` +- packaging expectation: Java application JAR per service +- runtime expectation: systemd-managed service with env file under `/etc/dbis-rtgs` +- health expectation: local HTTP readiness on port `8080` + +## 3. Required inputs before deployment + +- built JAR for each selected control-plane service +- OMNL / Fineract base URL and tenant/auth contract +- Redis and persistence choices +- per-service env vars for role-specific configuration +- decision on target CT VMIDs and host placement + +## 4. Deployment sequence + +1. create target CTs if they do not already exist +2. copy application artifact into `/opt/dbis-rtgs/` +3. push env file into `/etc/dbis-rtgs/.env` +4. install systemd unit +5. restart service +6. verify local health endpoint +7. verify Fineract or downstream reachability where applicable + +## 5. Validation checklist + +- [ ] `rtgs-orchestrator` artifact is present and versioned +- [ ] `rtgs-fx-engine` artifact is present and versioned +- [ ] `rtgs-liquidity-engine` artifact is present and versioned +- [ ] CT targets are chosen and reachable +- [ ] env files are frozen for the chosen environment +- [ ] health endpoints return `UP` +- [ ] Fineract/downstream reachability is verified +- [ ] operator can restart and inspect each service via systemd + +## 6. Scripts + +- [create-dbis-rtgs-control-plane-lxcs.sh](/home/intlc/projects/proxmox/scripts/deployment/create-dbis-rtgs-control-plane-lxcs.sh) +- [deploy-dbis-rtgs-control-plane.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-dbis-rtgs-control-plane.sh) +- [check-dbis-rtgs-control-plane.sh](/home/intlc/projects/proxmox/scripts/verify/check-dbis-rtgs-control-plane.sh) + +## 7. Production gate + +This control-plane tranche is only complete when: + +1. all selected services are deployed on Proxmox +2. health checks pass +3. their interfaces are frozen against the canonical RTGS docs +4. at least one canonical flow uses them end to end diff --git a/docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md b/docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md new file mode 100644 index 0000000..aa204c9 --- /dev/null +++ b/docs/03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md @@ -0,0 +1,162 @@ +# DBIS RTGS Depository and Custody Operating Model + +**Last updated:** 2026-03-29 +**Purpose:** Implementation-grade operating model for the depository / CSD, global custodian, and custody / safekeeping / asset-servicing layers referenced by the DBIS RTGS canonical production checklist. + +## 1. Scope + +This document freezes the intended runtime boundaries for: + +- `Depository / CSD layer` +- `Global custodian layer` +- `Custody / safekeeping / asset servicing flow` + +It does not claim these layers are already deployed. It defines the operating contract they must satisfy before they can be marked `Complete` in the canonical checklist. + +## 2. Canonical role split + +### Depository / CSD layer + +Owns: + +- authoritative asset register for in-scope instruments +- issuance, transfer, pledge, lien, and release semantics +- linkage between asset ownership state and RTGS settlement-touch state + +Does not own: + +- cash ledger posting +- bank-message transport +- treasury funding decisions + +### Global custodian layer + +Owns: + +- safekeeping account hierarchy +- sub-custody and global-bank/correspondent custody relationships +- statements, confirmations, and servicing references + +Does not own: + +- canonical cash settlement +- pricing or liquidity decisions +- on-chain anchoring + +### Custody / safekeeping / asset servicing flow + +Owns: + +- holdings lifecycle from registration to reporting +- statement generation and custody evidence references +- servicing events such as entitlement, transfer instruction, and post-settlement reporting + +Does not own: + +- participant master data +- FX price formation +- liquidity source selection + +## 3. Canonical business objects + +| Object | Primary owner | Required downstream link | +|--------|---------------|--------------------------| +| `asset_position` | Depository / CSD | custody statement, settlement-touch reference | +| `transfer_instruction` | Depository / CSD | RTGS orchestrator, OMNL posting reference | +| `custody_account` | Global custodian | participant / office / account mapping | +| `custody_statement` | Global custodian | reconciliation package, audit evidence | +| `servicing_event` | Custody flow | holdings state, evidence package | +| `settlement_touch_reference` | Depository / CSD | chain/off-ledger settlement evidence | + +## 4. Required integrations + +### Upstream + +- OMNL participant / office / treasury model +- RTGS orchestrator correlation IDs +- external institution master data + +### Downstream + +- OMNL / Fineract postings +- external bank statements or confirmations +- Chain 138 settlement evidence where on-ledger finality is in scope +- ISO 20022 / institutional evidence package + +## 5. Canonical flow + +```mermaid +flowchart LR + REQUEST["Transfer / Asset Instruction"] --> CSD["Depository / CSD"] + CSD -->|"asset ownership update"| CUST["Global Custodian"] + CSD -->|"settlement touch reference"| ORCH["RTGS Orchestrator"] + ORCH -->|"cash posting"| OMNL["OMNL / Fineract"] + ORCH -->|"bank or chain settlement"| SETTLE["Settlement Rail"] + CUST -->|"statement / servicing output"| EVIDENCE["Evidence / Reconciliation"] + OMNL --> EVIDENCE + SETTLE --> EVIDENCE +``` + +## 6. Minimum interface contract + +### Depository asset-register and settlement-touch contract + +- Input: + - participant reference + - instrument reference + - action type: issue / transfer / pledge / release + - quantity / amount + - correlation id +- Output: + - asset position id + - settlement-touch reference + - depository state +- Failure contract: + - reject with reason code and no settlement-touch reference + +### Global custodian account/reporting contract + +- Input: + - participant or sub-custody account reference + - position or servicing reference + - reporting period or event type +- Output: + - custody statement reference + - servicing reference + - reconciliation reference +- Failure contract: + - produce exception state with unresolved custody item + +### Custody lifecycle contract + +- Input: + - custody account reference + - position reference + - servicing action + - correlation id +- Output: + - custody lifecycle state + - statement/evidence reference +- Failure contract: + - unresolved servicing queue with operator-visible reason + +## 7. Deployment expectations + +Before these layers can be considered active: + +1. one implementation boundary must be selected for the depository role: + - on-ledger + - off-ledger + - hybrid +2. one implementation boundary must be selected for the global custodian role +3. one canonical custody flow must be bound to OMNL and RTGS settlement +4. reconciliation outputs must include holdings and statement references + +## 8. Production gate + +This operating model is complete only when: + +1. one canonical asset flow uses the depository touch point +2. one canonical custody flow generates statements/evidence +3. holdings, settlement, and accounting reconcile in the same package +4. the canonical production checklist rows for these layers can move from `Planned` to `Partial` or `Complete` with evidence diff --git a/docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md b/docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md new file mode 100644 index 0000000..7270097 --- /dev/null +++ b/docs/03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md @@ -0,0 +1,79 @@ +# DBIS RTGS Canonical Production Checklist + +**Last updated:** 2026-03-29 +**Purpose:** Canonical production-readiness checklist for the full DBIS RTGS stack across Chain 138, OMNL / Fineract, HYBX sidecars, Indonesia / BNI banking flows, and optional Hyperledger identity and interoperability layers. + +## Status guidance + +- Use `Complete` only for production-capable roles that are implemented and verified. +- Use `Partial` when a slice exists or works narrowly, but is not yet enough for full production use. +- Use `Planned` for intentionally in-scope components not yet deployed or validated. +- Use `Reserved placeholder` for inventory that exists but is not an active workload. +- Use `Retired / standby` for inventory that is intentionally inactive until rebuilt. + +## Canonical checklist + +| Component | Current state | Required integration | Remaining task | Owner | Production gate | +|-----------|---------------|----------------------|----------------|-------|-----------------| +| Chain 138 Besu validator / sentry / RPC baseline | Complete. Validator, sentry, core, public, and named RPC tiers are live and script-verified. | Ongoing RPC, validator, and public wallet/explorer compatibility only. | Maintain health, peer spread, fee support, and public RPC method coverage. | DBIS / infra ops | Public and core RPC healthy, head spread `0`, peer counts healthy, wallet/explorer-required methods working. | +| Explorer / Blockscout | Complete. Explorer routes, APIs, token metadata, and RPC capability metadata are live. | Ongoing explorer API, token metadata, and wallet metadata compatibility. | Maintain explorer health, indexing freshness, metadata accuracy, and route stability. | DBIS / explorer ops | Explorer routes, APIs, and metadata remain healthy and consistent with Chain 138 runtime. | +| FireFly primary `6200` | Partial. Restored as a minimal local FireFly API footprint, not yet a proven multiparty production workflow engine. | FireFly event/orchestration model, sidecar and banking workflow correlation, and HA strategy. | Define event model, validate orchestration role, and decide whether FireFly is mandatory in slice 1. | DBIS workflow / infra ops | API healthy, config preserved, orchestration role defined, and real cross-system workflow validated. | +| FireFly secondary `6201` | Retired / standby. Inventory exists, but current rootfs does not contain a valid deployment payload. | Rebuild contract for a real secondary FireFly node if HA is required. | Either rebuild as a true secondary and validate failover, or keep explicitly retired in all architecture claims. | DBIS workflow / infra ops | Either rebuilt and verified as a real secondary, or formally excluded from active-stack claims. | +| Fabric `6000-6002` | Reserved placeholder. VMIDs exist, but app-level verification did not show active peer / orderer services or meaningful Fabric payloads. | Actual Fabric peer/orderer deployment model if Fabric is required by the RTGS target architecture. | Either deploy real Fabric workloads and validate them, or keep them stopped and excluded from active-stack claims. | DBIS architecture / infra ops | Real Fabric workloads deployed and validated, or the footprint remains explicitly placeholder-only. | +| Indy `6400-6402` | Reserved placeholder. VMIDs exist, but app-level verification did not show active Indy listeners or meaningful Indy payloads. | Actual Indy validator / identity runtime only if Indy is required by the RTGS target architecture. | Either deploy real Indy workloads and validate them, or keep them stopped and excluded from active-stack claims. | DBIS architecture / infra ops | Real Indy workloads deployed and validated, or the footprint remains explicitly placeholder-only. | +| Aries | Planned. No deployed Aries runtime is currently evidenced. | Identity-agent model, DID/wallet strategy, and credential-exchange role in RTGS workflows. | Decide in or out of scope for production slice 1; if in, deploy agents and validate flows. | Identity architecture lead | Scope decision is frozen, and if in scope the deployed agent model and flows are validated. | +| AnonCreds | Planned. No deployed credential flow is currently evidenced. | Issuer / holder / verifier model and credential lifecycle. | Decide in or out of scope for production slice 1; if in, freeze schema and verification flow. | Identity architecture lead | Scope decision is frozen, and if in scope the credential lifecycle is validated end to end. | +| Ursa | Planned. No explicit runtime dependency or operating model is currently evidenced. | Cryptographic runtime role, library dependency model, and operational controls. | Decide in or out of scope; if in, document and validate the cryptographic dependency model. | Identity / cryptography architecture lead | Scope decision is frozen, and if in scope the cryptographic dependency is documented and validated. | +| Cacti | Planned. Not currently proven as a live interoperability engine. | Cross-ledger interoperability contract and deployment model. | Decide whether Cacti is needed for production slice 1; if in, deploy and validate the real path. | Interoperability architecture lead | Scope decision is frozen, and if in scope the live interoperability path is deployed and tested. | +| Caliper | Planned. Documentation hook exists, but no routine benchmark harness is active. | Benchmark workload definitions for RTGS and Chain 138 settlement paths. | Build the approved benchmark harness and run accepted workload profiles. | Performance / QA lead | Benchmark harness exists and approved RTGS workloads have been executed and recorded. | +| OMNL / Fineract API rail | Partial. Live tenant and authenticated posting path are now proven, but the canonical RTGS operator rail is not fully frozen. | Stable OMNL tenant/auth contract, operator flow, office/GL mapping, and reconciliation package path. | Freeze tenant, operator runbook, participant model, and reproducible OMNL settlement rail. | OMNL / banking ops | Office / GL / JE / snapshot / package flow runs cleanly and repeatably against the intended live tenant. | +| Mifos X frontend / Fineract tenant | Partial. Runtime is live and sidecars can authenticate, but production operator model is not fully frozen. | Stable UI/API tenant contract, secrets, and operator procedures. | Finalize tenant/auth, operator usage, and runbook completeness. | OMNL / banking ops | UI/API healthy, tenant/auth stable, and operator procedures are complete and repeatable. | +| HYBX participant / office / treasury model | Planned. Participant, office, reserve, settlement, and treasury roles are not yet frozen end to end. | OMNL participant model, office mappings, GL mappings, and treasury structure. | Freeze participant classes, office IDs, treasury accounts, and nostro/vostro model. | Banking architecture lead | Participant, treasury, reserve, and GL structures are documented, accepted, and used by the canonical rail. | +| Depository / CSD layer | Planned. No dedicated depository or CSD runtime and no frozen asset-register model are yet evidenced in the current RTGS stack. | Securities ownership model, settlement-finality link, asset register, and participant/custody relationships. | Define whether the depository role is on-ledger, off-ledger, or hybrid; freeze issuance, transfer, pledge, and settlement-touch points. | Securities / market-infrastructure architecture lead | Depository role, participant model, and settlement interaction are documented and validated in at least one canonical asset flow. | +| Global custodian layer | Planned. No explicit global custodian runtime, account model, or reporting path is yet frozen in repo-backed state. | Correspondent banks, global custodians, safekeeping accounts, corporate-action handling, and asset-servicing obligations. | Define the custody operating model, account structure, reporting obligations, and reconciliation with OMNL and RTGS settlement. | Custody / institutional banking integration lead | Custody account model, reconciliation path, and reporting obligations are frozen and tested in a canonical custody flow. | +| FX pricing / dealing engine | Planned. FX flow requirements are documented, but no single pricing/dealing engine contract is yet frozen as the production source of rates and booking rules. | Treasury policy, rate sources, quote locking, spreads, value dates, and gain/loss accounting. | Freeze the pricing hierarchy, quote lifecycle, booking rules, and integration into OMNL and sidecars. | FX / treasury architecture lead | One canonical FX transaction runs with frozen pricing inputs, accounting, and reconciliation. | +| Liquidity pooling and aggregation engine | Planned. Liquidity sourcing is implied across treasury and correspondent flows, but no explicit pooling/aggregation engine is yet modeled as a production component. | Treasury policy, reserve policy, liquidity providers, internal pools, external bank lines, and optional on-chain liquidity. | Define source prioritization, eligibility rules, allocation logic, and operator controls. | Liquidity architecture lead | Liquidity sourcing logic is documented and one canonical funding decision path is validated. | +| Liquidity source adapters | Planned. No source-by-source adapter contract has been frozen for bank lines, treasury pools, correspondent banks, or optional on-chain liquidity. | Bank lines, correspondent banks, internal treasury pools, optional on-chain pools, and optional sidecar/provider adapters. | Enumerate source families and define one adapter contract per source class. | Treasury / integrations lead | Each in-scope liquidity source class has a defined adapter contract and at least the mandatory sources are validated. | +| Custody / safekeeping / asset servicing flow | Planned. Custody, safekeeping, and servicing obligations are referenced indirectly through settlement and correspondent flows, but not yet modeled as one canonical lifecycle. | Depository, custodian, participant accounts, statements, corporate actions, holdings reconciliation, and evidence path. | Define the canonical lifecycle for safekeeping, transfer, servicing, and statement production. | Custody operations / product architecture lead | One end-to-end custody lifecycle is documented and validated with reconciliation/evidence output. | +| Mojaloop integration | Planned. No live Mojaloop switch endpoint/auth/callback contract is yet evidenced here. | Mojaloop quote, transfer, callback, and settlement-window contract. | Document live Mojaloop endpoints/auth and integrate them if Mojaloop remains in scope. | Payments interoperability lead | Endpoint/auth contract is frozen and quote/transfer/callback/settlement behavior is validated. | +| HYBX sidecar layer | Partial. Sidecar families are known, and first-slice sidecars are deployed, but full boundaries and ownership are not yet frozen. | Sidecar-by-sidecar ingress/egress, retries, auth, and system-of-record ownership. | Freeze sidecar boundaries, orchestration model, and canonical RTGS event path. | HYBX app / integration lead | Sidecar purposes, auth, retries, and system-of-record ownership are documented and validated. | +| `mifos-fineract-sidecar` | Partial. Deployed on Proxmox, healthy, and has completed an authenticated live OMNL posting. | OMNL/Fineract tenant contract and downstream settlement/evidence path. | Extend validation from posting success to the full settlement/evidence path. | HYBX integration lead | Sidecar API and event flow documented, and at least one authenticated live transfer completes through downstream settlement/evidence. | +| `server-funds-sidecar` | Partial. Deployed on Proxmox and healthy, but treasury/system-of-record boundaries are not yet frozen. | OMNL treasury/funding orchestration contract and participant model. | Freeze whether it is mandatory in the first RTGS slice and validate its business flow. | HYBX integration lead | Treasury/funding role is defined and a real authenticated business flow is validated. | +| `off-ledger-2-on-ledger-sidecar` | Partial. Deployed on Proxmox, healthy, and able to drive the first Chain 138 settlement leg with safe pending-anchor degradation. | Canonical off-ledger event source, OMNL/Fineract posting contract, and Chain 138 settlement finality path. | Freeze the canonical off-ledger source event and complete final receipt/finality handling. | HYBX integration lead | Off-ledger event to Chain 138 settlement is frozen and tested end to end with durable evidence output. | +| `mt103-hardcopy-sidecar` | Partial. Known sidecar, but not yet tied into the canonical RTGS path. | MT103 ingest, bank-message archive, and settlement/evidence mapping. | Decide whether it is in scope and, if yes, integrate MT103 ingest into the canonical RTGS flow. | HYBX integration lead | MT103 ingestion path is documented, integrated, and tested if in scope. | +| `securitization-engine-sidecar` | Partial. Known sidecar, but regulatory/accounting role in RTGS is not yet frozen. | Accounting, collateral, and reporting responsibilities in the RTGS operating model. | Define whether it participates in RTGS slice 1 and validate the required role if so. | HYBX integration lead | Its RTGS responsibility is either validated or explicitly out of scope. | +| `card-networks-sidecar` | Partial. Known sidecar, but not yet placed in the RTGS path. | Card-network settlement role only if card rails are included in scope. | Include only if card settlement is part of production scope; otherwise keep it out of the canonical path. | HYBX integration lead | Scope decision is frozen, and if included the settlement path is validated. | +| `securities-sidecar` | Partial. Known sidecar with runnable application shape, but its depository/custody placement in the RTGS architecture is not yet frozen. | Instrument resolution, securities instructions, settlement events, and position reconciliation linked to the depository/custody operating model. | Freeze whether it is the runtime boundary for depository/custody flows and validate one canonical securities/custody path if so. | HYBX integration lead | Scope decision is frozen, and if included one canonical securities or custody flow is validated. | +| `flash-loan-xau-sidecar` | Planned. Runnable sidecar exists locally, but its role in the RTGS production path is still specialized and optional. | XAU-specific liquidity, conversion, and settlement logic only if retained as part of the target architecture. | Decide whether it remains a specialized liquidity extension or enters the canonical RTGS path; validate if retained. | HYBX integration lead | Scope decision is frozen, and if included the XAU liquidity path is validated end to end. | +| Chain 138 settlement contracts | Partial. Contract families exist, but the exact RTGS contract path is not yet frozen as one canonical settlement lane. | Final contract path between OMNL-side events and on-chain settlement evidence. | Freeze the exact contract set and document how each business flow reaches Chain 138. | Chain 138 / settlement lead | Final contract set is frozen, deployed addresses are accepted, and the path is tested end to end. | +| MerchantSettlementRegistry | Partial. Available contract family, but exact placement in the canonical RTGS flow is not yet frozen. | RTGS settlement workflow and evidence mapping. | Decide exactly when and how the registry is invoked in RTGS settlement. | Chain 138 / settlement lead | Registry path is integrated into the business flow with verified inputs and outputs. | +| WithdrawalEscrow | Partial. Available contract family, but exact placement in RTGS withdrawal scenarios is not yet frozen. | Withdrawal / release / payout semantics in the RTGS model. | Freeze the escrow role for settlement and withdrawal scenarios. | Chain 138 / settlement lead | Escrow flow is validated in the chosen settlement and withdrawal scenarios. | +| DBIS / compliant settlement tokens | Partial. Candidate instruments exist, but the final RTGS instrument set is not yet frozen by use case. | Monetary architecture, reserve rules, mint/burn policy, and reconciliation policy. | Select the final RTGS instruments and freeze their control and reconciliation model. | Chain 138 / monetary architecture lead | Final instrument selection, reserve rules, and reconciliation path are documented and validated. | +| Reserve / oracle dependencies | Partial. Reserve and oracle systems exist, but the RTGS-specific dependency mapping is not yet frozen. | RTGS dependency model for reserve attestations, price references, and control policy. | Freeze which reserve/oracle controls are required for RTGS settlement and FX support. | Monetary controls lead | RTGS reserve/oracle dependencies are documented, accepted, and operational. | +| FireFly / sidecar / chain event model | Planned. No single canonical correlation and retry model is yet frozen. | Shared IDs, correlation, retry, compensating actions, and event archive policy. | Define one canonical event model across OMNL, sidecars, and Chain 138. | Workflow architecture lead | Event catalog, IDs, retries, and compensating actions are defined and validated. | +| ISO 20022 evidence and vault path | Partial. Evidence standard exists, but full institution-ready production completion is not yet frozen. | ISO 20022 archive, manifest, vaulting, and hash anchoring contract. | Complete ISO evidence packaging and archive references for the RTGS path. | Regulatory / compliance lead | ISO manifests, hashes, archive references, and legal evidence path are complete and reproducible. | +| Institutional 4.995 package path | Partial. Package standards and scripts exist, but real institution submission-grade completion is not yet frozen. | Institutional attestation, submission package, and strict readiness contract. | Complete the evidence path with real institution-ready materials and `--strict` readiness. | Regulatory / compliance lead | `--strict` readiness passes with real institution materials and reproducible evidence output. | +| Indonesia / BNI domestic banking path | Planned. Blueprint exists, but live BNI endpoint/auth/message contract is not yet evidenced. | BNI institution profile, domestic route definition, auth, account validation, and reporting obligations. | Freeze the BNI-connected route and message/auth contract for production. | Indonesia banking integration lead | Live BNI contract is documented, validated, and used in the canonical Indonesia payment flow. | +| Global correspondent / liquidity bank path | Planned. Blueprint exists, but live correspondent endpoint/auth/message contract is not yet evidenced. | SWIFT / ISO / correspondent-bank endpoint, auth, nostro/vostro, and confirmation contract. | Freeze the correspondent-bank route and integrate it with OMNL, sidecars, and reconciliation. | Cross-border banking integration lead | Live correspondent contract is documented and a real cross-border flow is validated. | +| RTGS production gate | Planned. The gate exists conceptually, but not all mandatory lanes are green yet. | All mandatory banking, sidecar, settlement, evidence, and external-bank integrations for the chosen production architecture. | Turn all mandatory rows for the chosen production architecture to `Complete`. | DBIS program owner | All mandatory checklist rows for the chosen RTGS production architecture are `Complete`. | + +## Immediate execution priority + +1. Freeze the canonical banking rail on the now-proven OMNL tenant/auth path. +2. Freeze the participant / treasury / GL model plus the depository, custody, FX, and liquidity-control layers. +3. Complete the canonical settlement path from HYBX sidecars into Chain 138 and evidence output. + +## Related artifacts + +- [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) +- [docs/00-meta/TODO_TASK_LIST_MASTER.md](../00-meta/TODO_TASK_LIST_MASTER.md) +- [docs/03-deployment/DBIS_PHASES_1_TO_3_PRODUCTION_GATE.md](DBIS_PHASES_1_TO_3_PRODUCTION_GATE.md) +- [docs/03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md](DBIS_HYPERLEDGER_RUNTIME_STATUS.md) +- [docs/04-configuration/mifos-omnl-central-bank/HYBX_BATCH_001_OPERATOR_CHECKLIST.md](../04-configuration/mifos-omnl-central-bank/HYBX_BATCH_001_OPERATOR_CHECKLIST.md) +- [docs/04-configuration/mifos-omnl-central-bank/INDONESIA_PACKAGE_4_995_EVIDENCE_STANDARD.md](../04-configuration/mifos-omnl-central-bank/INDONESIA_PACKAGE_4_995_EVIDENCE_STANDARD.md) +- [docs/11-references/GITEA_HYBX_ORGANIZATION_AND_REPOS.md](../11-references/GITEA_HYBX_ORGANIZATION_AND_REPOS.md) +- [DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md](DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md) +- [DBIS_MOJALOOP_INTEGRATION_STATUS.md](DBIS_MOJALOOP_INTEGRATION_STATUS.md) +- [DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md](DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md) +- [DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md](DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md) +- [DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md](DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md) diff --git a/docs/03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md b/docs/03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md new file mode 100644 index 0000000..46a603c --- /dev/null +++ b/docs/03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md @@ -0,0 +1,110 @@ +# DBIS RTGS First Slice Architecture + +**Last updated:** 2026-03-28 +**Purpose:** Freeze the narrowest credible first production slice for the DBIS RTGS program using the assets currently available in the repository and local environment. + +## Design principle + +The first production slice should avoid expanding the critical path with systems that are not yet deployed or not yet evidenced in the workspace. The goal is to get to a real end-to-end settlement slice with the minimum number of moving parts. + +## Included in first slice + +### Core runtime + +- Chain 138 / Hyperledger Besu +- Explorer / Blockscout +- FireFly primary (`6200`) +- Mifos / Fineract / OMNL rail + +### Selected HYBX sidecars + +- `mifos-fineract-sidecar` +- `server-funds-sidecar` +- `off-ledger-2-on-ledger-sidecar` + +## Deferred from first slice + +- FireFly secondary (`6201`) +- Fabric runtime +- Indy runtime +- Mojaloop integration +- Aries / AnonCreds / Ursa runtime +- Cacti runtime +- card-network, securities, and flash-loan specialized sidecars + +## Why these sidecars + +### `mifos-fineract-sidecar` + +This is the strongest general-purpose banking sidecar for the first slice because it already models: + +- compliance +- posting +- settlement +- reconciliation +- audit +- Fineract integration +- events +- observability + +### `server-funds-sidecar` + +This is the best fit for treasury/funds movement orchestration because it already exposes: + +- transfer initiation +- approvals +- settlement events +- reconciliation +- Fineract integration + +### `off-ledger-2-on-ledger-sidecar` + +This is the best fit for the off-ledger to on-ledger bridge because it already frames: + +- collateral or source-of-value lock +- conversion initiation +- settlement +- extinguish/release flow + +## First-slice interaction flow + +1. Participant/office exists in OMNL / Fineract. +2. A transfer or settlement intent enters through the canonical banking rail. +3. `mifos-fineract-sidecar` validates, screens, posts, and settles the banking-side event. +4. `server-funds-sidecar` coordinates treasury/server-funds transfer semantics where needed. +5. `off-ledger-2-on-ledger-sidecar` maps qualifying off-ledger value into the Chain 138 settlement path. +6. Chain 138 settlement contracts record or confirm the on-ledger leg. +7. Reconciliation and audit artifacts are produced. +8. The regulatory package path is generated from the resulting evidence set. + +## Immediate implementation sequence + +### Sequence A — Banking rail + +1. Freeze the OMNL / Fineract tenant and operator path. +2. Freeze the participant / treasury / GL model. +3. Validate the `HYBX-BATCH-001` operator flow end to end. + +### Sequence B — Sidecar baseline + +1. Build and validate `mifos-fineract-sidecar`. +2. Build and validate `server-funds-sidecar`. +3. Build and validate `off-ledger-2-on-ledger-sidecar`. +4. Record runtime dependencies, health endpoints, and deployment units. + +### Sequence C — Settlement mapping + +1. Freeze the canonical Chain 138 settlement contracts used by RTGS. +2. Define the exact event handoff from sidecars to on-chain settlement. +3. Define reconciliation outputs and operator evidence. + +## Exclusion rule + +Anything not listed in “Included in first slice” must not be treated as a blocker for initial RTGS activation unless governance explicitly changes scope. + +## Related artifacts + +- [DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md](DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md) +- [DBIS_MOJALOOP_INTEGRATION_STATUS.md](DBIS_MOJALOOP_INTEGRATION_STATUS.md) +- [DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md](DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md) diff --git a/docs/03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md b/docs/03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md new file mode 100644 index 0000000..bf7ef49 --- /dev/null +++ b/docs/03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md @@ -0,0 +1,283 @@ +# DBIS RTGS First Slice Deployment Checklist + +**Last updated:** 2026-03-29 +**Purpose:** Convert the first-slice RTGS architecture into a deployable checklist for Proxmox VE and live operator validation. This document is intentionally narrower than the full RTGS program. It only covers the components chosen for the initial production slice. + +## Scope + +This checklist applies to the following first-slice components: + +- Chain 138 / Hyperledger Besu +- Explorer / Blockscout +- FireFly primary (`6200`) +- OMNL / Fineract banking rail +- `mifos-fineract-sidecar` +- `server-funds-sidecar` +- `off-ledger-2-on-ledger-sidecar` + +This checklist does **not** assume that the following are part of the first production slice: + +- FireFly secondary (`6201`) +- Fabric runtime +- Indy runtime +- Mojaloop integration +- Aries / AnonCreds / Ursa runtime +- Cacti runtime +- specialized HYBX sidecars outside the chosen first slice + +## Build verification completed + +The following sidecars were built successfully on 2026-03-28 with Maven and `-DskipTests`: + +### `mifos-fineract-sidecar` + +- Build command: + - `cd /home/intlc/projects/HYBX_Sidecars/mifos-fineract-sidecar && mvn -q -DskipTests package` +- Verified runnable artifact: + - `/home/intlc/projects/HYBX_Sidecars/mifos-fineract-sidecar/scsm-app/target/scsm-app-1.0.0-SNAPSHOT.jar` + +### `server-funds-sidecar` + +- Build command: + - `cd /home/intlc/projects/HYBX_Sidecars/server-funds-sidecar && mvn -q -DskipTests package` +- Verified runnable artifact: + - `/home/intlc/projects/HYBX_Sidecars/server-funds-sidecar/funds-app/target/funds-app-1.0.0-SNAPSHOT.jar` + +### `off-ledger-2-on-ledger-sidecar` + +- Build command: + - `cd /home/intlc/projects/HYBX_Sidecars/off-ledger-2-on-ledger-sidecar && mvn -q -DskipTests package` +- Verified runnable artifact: + - `/home/intlc/projects/HYBX_Sidecars/off-ledger-2-on-ledger-sidecar/target/off-ledger-2-on-ledger-sidecar-0.1.0-SNAPSHOT.jar` + +## Current deployment status + +As of 2026-03-28/29: + +- `5802` `rtgs-scsm-1` is deployed on `r630-02` + - systemd: `dbis-rtgs-scsm` + - Redis: active + - health: `UP` +- `5803` `rtgs-funds-1` is deployed on `r630-02` + - systemd: `dbis-rtgs-funds` + - Redis: active + - health: `UP` +- `5804` `rtgs-xau-1` is deployed on `r630-02` + - systemd: `dbis-rtgs-xau` + - Redis: active + - health: `UP` + +What is now proven: + +- the canonical authenticated OMNL / Fineract tenant flow is live for the SCSM lane: + - base URL: `https://omnl.hybxfinance.io/fineract-provider/api/v1` + - tenant: `omnl` + - user: `app.omnl` +- `rtgs-scsm-1` can post authenticated journal-entry batches into OMNL / Fineract +- one canonical live transfer has completed through the deployed sidecar runtime: + - sidecar response: + - `messageId: c6e44bc8-aa04-4eba-b983-6293967f24b7` + - `transactionId: a16a10b3bc47` + - `status: COMPLETED` + - verified OMNL journal entries: + - debit `GL 1410` amount `1.11` + - credit `GL 2100` amount `1.11` + - comments `SCSM transfer c6e44bc8-aa04-4eba-b983-6293967f24b7` + +What is still not complete: + +- the participant / office / treasury / GL model is not yet frozen as the full RTGS production model +- `server-funds-sidecar` and `off-ledger-2-on-ledger-sidecar` are runtime-healthy, but do not yet have equivalent authenticated business-flow validation +- the canonical RTGS flow is not yet complete across OMNL / Fineract, sidecar logic, Chain 138 settlement, and final evidence output + +## Runtime deployment baseline + +### Besu / explorer / FireFly + +- [x] Chain 138 Besu production baseline is healthy +- [x] Explorer / Blockscout is live +- [x] FireFly primary `6200` is healthy enough to serve as the first-slice workflow/event anchor +- [ ] Freeze the exact role FireFly will play in the first slice: + - event broker only + - process/workflow orchestrator + - audit/event persistence layer + +### OMNL / Fineract + +- [x] Confirm the exact production tenant, auth path, and base URL +- [ ] Freeze the operator runbook and canonical batch flow +- [ ] Confirm the participant / office / treasury / GL model used by the sidecars + +## Sidecar runtime requirements + +### `mifos-fineract-sidecar` + +**Reference repo:** +- `/home/intlc/projects/HYBX_Sidecars/mifos-fineract-sidecar` + +**Run command:** +- `java -jar scsm-app/target/scsm-app-1.0.0-SNAPSHOT.jar` + +**Health endpoints:** +- `GET /actuator/health` +- `GET /actuator/health/liveness` +- `GET /actuator/health/readiness` + +**Key dependencies from repo-backed docs/config:** +- database: + - default H2 in-memory + - production path should use `DB_URL` for PostgreSQL +- Redis: + - required for idempotency + - `REDIS_HOST`, `REDIS_PORT` +- Kafka: + - optional + - `KAFKA_BOOTSTRAP_SERVERS` +- Fineract: + - `FINERACT_BASE_URL` + +**Deployment gate before Proxmox promotion:** +- [ ] Confirm production DB target +- [ ] Confirm Redis target +- [x] Confirm Fineract base URL and tenant/auth +- [x] Prove `/actuator/health/readiness` healthy with production-like dependencies +- [x] Validate one canonical transfer request path against the intended Fineract rail +- [ ] Eliminate the current hard-stop / forced-restart workaround needed for some jar upgrades on the SCSM systemd unit + +### `server-funds-sidecar` + +**Reference repo:** +- `/home/intlc/projects/HYBX_Sidecars/server-funds-sidecar` + +**Run command:** +- `java -jar funds-app/target/funds-app-1.0.0-SNAPSHOT.jar` + +**Health endpoints:** +- `GET /actuator/health/liveness` +- `GET /actuator/health/readiness` + +**Key dependencies from repo-backed docs/config:** +- database: + - default H2 in-memory + - production path should use `DB_URL` for PostgreSQL +- Redis: + - required for idempotency + - `REDIS_HOST` +- Kafka: + - optional + - `KAFKA_BOOTSTRAP_SERVERS` +- Fineract adapter: + - present in repo structure and must be wired to the selected banking rail + +**Deployment gate before Proxmox promotion:** +- [ ] Decide whether this sidecar is required in the initial funds/treasury path +- [ ] Freeze its system-of-record boundary versus `mifos-fineract-sidecar` +- [ ] Validate initiate/approve/status flow against the chosen RTGS participant model +- [ ] Validate settlement event and reconciliation behavior + +### `off-ledger-2-on-ledger-sidecar` + +**Reference repo:** +- `/home/intlc/projects/HYBX_Sidecars/off-ledger-2-on-ledger-sidecar` + +**Run command:** +- `mvn spring-boot:run` + - or use the built jar: + - `java -jar target/off-ledger-2-on-ledger-sidecar-0.1.0-SNAPSHOT.jar` + +**Current config evidence:** +- `config/application.yaml` +- default `server.port: 8080` +- `FINERACT_BASE_URL` +- `XAU_FEED_URL` +- GL and risk configuration under `config/` + +**Health/readiness note:** +- repo runbook calls out adding Spring Boot Actuator as optional +- production deployment should not proceed until there is a stable health/readiness path + +**Deployment gate before Proxmox promotion:** +- [ ] Confirm this sidecar’s role in the first production slice +- [ ] Freeze the exact off-ledger source event and on-ledger settlement target +- [ ] Confirm Fineract connectivity and XAU pricing/oracle strategy +- [ ] Add or verify production-grade health endpoint support +- [ ] Validate one canonical conversion/session flow end to end + +## Proxmox deployment checklist + +### Pre-deploy + +- [ ] Choose the actual Proxmox target(s) for the three sidecars +- [ ] Decide container vs VM packaging for each sidecar +- [ ] Freeze Java runtime baseline +- [ ] Freeze secrets/env injection method +- [ ] Freeze logging and restart policy + +### Dependency wiring + +- [ ] Postgres target(s) available if not using H2 +- [ ] Redis target(s) available +- [ ] Kafka decision made: + - optional and deferred, or + - required for the chosen event model +- [ ] Fineract API reachability proven from the chosen Proxmox runtime +- [ ] FireFly integration point frozen if FireFly is part of the event path + +### Runtime verification + +- [x] Process starts under systemd / container supervisor +- [x] Health endpoints return healthy +- [x] `mifos-fineract-sidecar` API base path responds for a canonical business flow +- [ ] `server-funds-sidecar` and `off-ledger-2-on-ledger-sidecar` API base paths respond for canonical business flows +- [x] Logs show no dependency boot failures for current runtime boot +- [x] Sidecar can reach Fineract at the HTTP layer +- [x] Sidecar can reach required local Redis dependency +- [ ] Sidecar can reach final production DB / Kafka dependencies if those are required by the chosen slice + +### Functional verification + +- [x] `mifos-fineract-sidecar` processes one canonical transfer +- [ ] `server-funds-sidecar` processes one canonical funds/approval flow if in scope +- [ ] `off-ledger-2-on-ledger-sidecar` processes one canonical conversion/settlement flow +- [ ] Chain 138 receives and records the intended settlement leg where applicable +- [ ] Reconciliation and audit outputs are captured + +## Verification command + +Use: + +```bash +bash scripts/verify/check-dbis-rtgs-first-slice.sh +``` + +This verifies: + +- CT status +- systemd service status +- local Redis status +- local actuator health +- live Fineract HTTP reachability from each sidecar CT + +## First-slice production gate + +The first RTGS production slice should be treated as deployable only when all of the following are true: + +1. Besu, explorer, and FireFly primary remain healthy. +2. OMNL / Fineract tenant, auth, and operator path are frozen. +3. The participant / treasury / GL model is frozen. +4. The selected sidecars are deployed to Proxmox VE or an equivalent production runtime. +5. Each selected sidecar has a stable health/readiness path. +6. One canonical RTGS flow executes successfully across: + - Fineract / OMNL + - selected sidecar(s) + - Chain 138 settlement path + - reconciliation / evidence output +7. Any deferred components remain explicitly deferred in architecture and runbooks. + +## Related artifacts + +- [DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md](DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md) +- [DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md](DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md) +- [DBIS_MOJALOOP_INTEGRATION_STATUS.md](DBIS_MOJALOOP_INTEGRATION_STATUS.md) +- [DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md](DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md) diff --git a/docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md b/docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md new file mode 100644 index 0000000..83313c1 --- /dev/null +++ b/docs/03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md @@ -0,0 +1,187 @@ +# DBIS RTGS FX and Liquidity Operating Model + +**Last updated:** 2026-03-29 +**Purpose:** Implementation-grade operating model for the FX pricing / dealing engine, liquidity pooling and aggregation engine, and liquidity source adapters referenced by the DBIS RTGS canonical production checklist. + +## 1. Scope + +This document freezes the intended runtime boundaries for: + +- `FX pricing / dealing engine` +- `Liquidity pooling and aggregation engine` +- `Liquidity source adapters` + +It defines the minimum behavior required before these layers can be promoted from architecture intent into a validated production lane. + +## 2. Canonical role split + +### FX pricing / dealing engine + +Owns: + +- quote generation or approved rate ingest +- source hierarchy for rates +- spread / fee policy application +- quote locking, expiry, and value-date semantics +- booking references for OMNL and settlement + +Does not own: + +- final accounting ledger +- final liquidity-source selection +- final settlement transport + +### Liquidity pooling and aggregation engine + +Owns: + +- evaluate available liquidity sources +- prioritize and allocate funding +- enforce eligibility and operator override rules +- emit the canonical funding decision reference + +Does not own: + +- FX quote formation +- bank-message transport +- settlement evidence packaging + +### Liquidity source adapters + +Owns: + +- normalize access to internal treasury pools +- normalize access to bank lines and correspondent-bank sources +- normalize access to optional on-chain liquidity +- return funding availability, hold, release, and failure states + +Does not own: + +- aggregate funding decisions +- journal posting +- orchestration state + +## 3. Canonical business objects + +| Object | Primary owner | Required downstream link | +|--------|---------------|--------------------------| +| `fx_quote` | FX engine | OMNL booking, settlement refs | +| `fx_booking_reference` | FX engine | journal refs, evidence package | +| `funding_request` | Liquidity engine | source adapter calls | +| `funding_decision` | Liquidity engine | OMNL posting, settlement rail, evidence package | +| `liquidity_adapter_result` | Source adapter | funding decision | +| `rate_source_reference` | FX engine | FX reconciliation | + +## 4. Required source classes + +Mandatory source classes to model: + +1. internal treasury pools +2. bank credit / liquidity lines +3. correspondent-bank liquidity +4. optional on-chain liquidity if it remains in the target production path + +Each class must have: + +- auth model +- request contract +- response contract +- failure code mapping +- hold/release semantics + +## 5. Canonical flow + +```mermaid +flowchart LR + REQ["Payment / Settlement Request"] --> ORCH["RTGS Orchestrator"] + ORCH --> FX["FX Pricing / Dealing Engine"] + FX -->|"locked quote"| ORCH + ORCH --> LQE["Liquidity Pooling / Aggregation Engine"] + LQE --> AD1["Internal Treasury Adapter"] + LQE --> AD2["Bank Line Adapter"] + LQE --> AD3["Correspondent Adapter"] + LQE --> AD4["Optional On-Chain Adapter"] + AD1 --> LQE + AD2 --> LQE + AD3 --> LQE + AD4 --> LQE + LQE -->|"funding decision"| ORCH + ORCH --> OMNL["OMNL / Fineract"] + ORCH --> SETTLE["Settlement Rail"] +``` + +## 6. Minimum interface contract + +### FX quote/pricing/booking contract + +- Input: + - source currency + - destination currency + - amount + - value date + - participant / route context +- Output: + - quote id + - rate + - spread / fee + - expiry + - booking reference +- Failure contract: + - reject quote with explicit reason and no booking reference + +### Liquidity-engine source-selection and allocation contract + +- Input: + - funding request id + - route context + - required amount / currency + - value date + - constraints / policy flags +- Output: + - funding decision id + - selected source set + - allocation amounts + - operator action requirement if needed +- Failure contract: + - insufficient-liquidity or policy-rejected state + +### Liquidity source adapter contract + +- Input: + - funding request + - hold/release action + - source account or line reference +- Output: + - adapter result id + - availability / hold / release confirmation + - failure code +- Failure contract: + - adapter error with retriable vs terminal distinction + +## 7. Reconciliation requirements + +Required reconciliations: + +1. rate source vs booked rate +2. quote id vs OMNL posting reference +3. funding decision vs selected source confirmations +4. source holds/releases vs actual settlement usage +5. FX gain/loss and fee treatment vs final accounting outputs + +## 8. Deployment expectations + +Before these layers can be considered active: + +1. the canonical rate hierarchy must be frozen +2. the canonical funding-source priority model must be frozen +3. mandatory source adapters must be enumerated and assigned +4. one canonical FX-backed transfer must run end to end with quote and funding references preserved + +## 9. Production gate + +This operating model is complete only when: + +1. one canonical FX transaction completes with frozen pricing inputs +2. one canonical funding decision is emitted and reconciled +3. mandatory liquidity source adapters are validated +4. the canonical production checklist rows for these layers can move from `Planned` to `Partial` or `Complete` with evidence diff --git a/docs/03-deployment/DBIS_RTGS_FX_TRANSACTION_CATALOG.md b/docs/03-deployment/DBIS_RTGS_FX_TRANSACTION_CATALOG.md new file mode 100644 index 0000000..69e58d4 --- /dev/null +++ b/docs/03-deployment/DBIS_RTGS_FX_TRANSACTION_CATALOG.md @@ -0,0 +1,285 @@ +# DBIS RTGS FX Transaction Catalog + +**Last updated:** 2026-03-29 +**Purpose:** Canonical transaction catalog for FX, cross-border banking, and RTGS-adjacent settlement flows across OMNL, HYBX sidecars, Chain 138, and Indonesia-facing beneficiary banks such as Bank Kanaya and BNI-connected correspondent paths. + +## Scope + +This document describes the full transaction families required for a production-grade FX and cross-border RTGS stack: + +- OMNL / Fineract journal-entry flows +- HYBX sidecar business flows +- ISO 20022 and SWIFT Fin message flows +- FX valuation and revaluation flows +- correspondent-banking and nostro / vostro flows +- Chain 138 settlement augmentation where on-ledger finality is in scope + +This document is not a statement that every flow is already deployed. It is the execution catalog for what must exist to call the stack fully end to end. + +## Status legend + +- `Implemented now` +- `Partially implemented` +- `Required next` + +## 1. Core transaction families + +| Family | Description | Current status | Primary systems | +|--------|-------------|----------------|-----------------| +| Opening balance / reserve migration | Initial OMNL funding and reserve booking | Implemented now | OMNL / Fineract | +| M0 to M1 conversion | Central-bank style monetary conversion and allocation | Implemented now | OMNL / Fineract | +| Interoffice settlement | HO to branch / institution due-to / due-from settlement | Implemented now | OMNL / Fineract | +| PvP multilateral net settlement | Beneficiary office receives net cleared position | Partially implemented | OMNL / Fineract | +| Sidecar-initiated RTGS posting | Business-side RTGS transfer posted into OMNL via sidecar | Partially implemented | `mifos-fineract-sidecar`, OMNL | +| Treasury / funding orchestration | Treasury approval, prefunding, limits, release | Required next | `server-funds-sidecar`, OMNL | +| Off-ledger to on-ledger conversion | External event to Chain 138 settlement leg | Required next | `off-ledger-2-on-ledger-sidecar`, Chain 138 | +| FX valuation / revaluation | Spot, triangulated, and end-of-day revaluation | Required next | OMNL, rate feeds | +| Correspondent-bank settlement | Nostro / vostro reconciliation with domestic / global banks | Required next | OMNL, bank APIs, ISO/SWIFT rails | +| Regulatory evidence package | Indonesia / institution-ready package and submission | Partially implemented | OMNL scripts, evidence tooling | + +## 2. Full FX transaction classes + +### 2.1 Internal treasury FX conversion + +**Purpose** +- Convert between currencies inside OMNL treasury books. +- Support central treasury reserve management and internal balance-sheet positioning. + +**Required legs** +1. Debit source currency reserve / treasury account. +2. Credit target currency reserve / treasury account. +3. Post realized or unrealized FX P&L where applicable. +4. Update revaluation basis and audit trail. + +**Key GL patterns** +- `12010` / `12020` / `12090` — FX reserve detail +- `13010` — FX settlement nostro +- `42000` / `51000` — realized FX gain / loss +- `42100` / `52100` — unrealized FX gain / loss + +**Required messages / records** +- Internal treasury instruction +- Rate source reference +- value date / trade date +- dealing reference +- settlement reference + +**Status** +- GL and valuation framework are documented. +- End-to-end booked treasury FX conversion flow is not yet proven in production. + +### 2.2 Domestic beneficiary settlement in Indonesia + +**Purpose** +- Credit Indonesian beneficiary institutions such as Bank Kanaya on OMNL books. +- Support domestic regulatory reporting and beneficiary balance confirmation. + +**Required legs** +1. Clear multilateral or bilateral obligation. +2. Post OMNL journal entries to beneficiary office. +3. Attach settlement reference and supporting evidence. +4. Reconcile beneficiary office balances and produce regulator-facing package. + +**Current repo-backed example** +- `HYBX-BATCH-001` +- beneficiary office `22` Bank Kanaya +- `USD 1,000,000,000.00` +- PvP multilateral net narrative in [PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md](../04-configuration/mifos-omnl-central-bank/PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md) + +**Status** +- Repo-backed posting and package path exists. +- Live authenticated sidecar-to-OMNL posting now exists. +- Full production beneficiary-bank operating model is still not frozen. + +### 2.3 Cross-border commercial-bank FX payment + +**Purpose** +- Move value from OMNL / central-bank context through a domestic or correspondent bank path to an external bank. + +**Required legs** +1. Payment initiation or settlement instruction received. +2. FX quote / rate locked. +3. Compliance and sanctions checks. +4. Nostro / vostro and prefunding checks. +5. Debit source balance / reserve. +6. Credit beneficiary bank or correspondent account. +7. Reconcile statement and confirmation messages. +8. Produce audit and regulatory evidence. + +**Required message families** +- ISO 20022: + - `pain.001` + - `pacs.008` + - `pacs.009` + - `pacs.002` + - `camt.052` + - `camt.053` + - `camt.054` +- SWIFT Fin where needed: + - `MT103` + - `MT202` / `MT202 COV` + - optionally statement or advice equivalents off-platform + +**Status** +- Message methodology is documented. +- A production cross-border message rail is not yet fully deployed in this workspace. + +### 2.4 Chain-anchored RTGS settlement + +**Purpose** +- Add on-ledger finality or settlement confirmation on Chain 138 after OMNL-side accounting. + +**Required legs** +1. Off-ledger business event finalized in OMNL. +2. Canonical settlement event created with stable identifiers. +3. Chain 138 contract path selected. +4. Settlement token / registry / escrow action executed. +5. On-chain transaction hash captured in evidence package. +6. Reconciliation ties OMNL transaction, sidecar correlation ID, and chain tx hash together. + +**Likely on-chain components** +- `MerchantSettlementRegistry` +- `WithdrawalEscrow` +- compliant settlement token set +- reserve / oracle controls where minting or conversion is involved + +**Status** +- Contract inventory exists. +- Canonical RTGS chain leg is not yet frozen end to end. + +## 3. Message-by-message transaction detail + +### 3.1 `pain.001` customer initiation + +**Used for** +- bank or enterprise payment initiation into the RTGS workflow + +**Minimum mapped fields** +- debtor / creditor +- debtor account / creditor account +- amount +- currency +- end-to-end id +- purpose + +**Downstream** +- mapped into canonical payload +- feeds compliance and posting workflow + +### 3.2 `pacs.008` FI-to-FI customer credit transfer + +**Used for** +- primary credit-settlement instruction between institutions + +**Required downstream records** +- instructionId +- MsgId +- UETR if available +- amount / currency +- settlement method +- account references + +**Expected system impacts** +- OMNL posting +- sidecar audit event +- optional chain settlement event + +### 3.3 `pacs.009` interbank settlement + +**Used for** +- bank-to-bank settlement leg +- high-value RTGS interbank flow + +**Indonesia / correspondent context** +- preferred for institution-facing settlement instruction where OMNL to beneficiary bank mapping exists + +### 3.4 `pacs.002` status reporting + +**Used for** +- accept / reject / pending / completed status + +**Required use** +- update business workflow state +- feed operator dashboards and evidence package + +### 3.5 `camt.053` / `camt.054` + +**Used for** +- statement and debit/credit advice reconciliation + +**Required use** +- external-bank and nostro/vostro reconciliation +- proof of receipt / settlement confirmation + +### 3.6 `MT103` / `MT202` + +**Used for** +- legacy correspondent banking or hybrid gateway participation + +**Required use** +- normalize into the same canonical struct as MX messages +- preserve raw message hash and field mapping in the evidence chain + +## 4. Required reconciliation outputs + +Every production FX / RTGS transaction family must produce: + +1. business request payload +2. authenticated API request / response evidence +3. OMNL journal-entry ids and journal-entry payload +4. sidecar correlation id / message id / idempotency key +5. rate source and value date +6. beneficiary / counterparty office and account mapping +7. statement / confirmation artifact where external banks are involved +8. on-chain tx hash where Chain 138 is involved +9. package-ready manifest entry + +## 5. Required identifiers + +The following identifiers must be stable across systems: + +- `instructionId` +- `messageId` +- `correlationId` +- `idempotencyKey` +- `settlementRef` +- `transactionId` (OMNL / Fineract) +- `UETR` where applicable +- chain transaction hash where applicable + +## 6. Minimum production-complete FX criteria + +The FX stack is not production-complete until all of the following are true: + +1. rate source and valuation policy are frozen +2. participant / office / treasury / GL model is frozen +3. domestic beneficiary-bank flow is repeatable +4. correspondent-bank flow is documented and tested +5. reconciliation captures all identifiers and statement evidence +6. regulatory package includes FX-specific reporting and prudential mapping +7. chain settlement leg is either fully implemented or explicitly out of scope + +## 7. Current truth + +### Proven now + +- OMNL tenant/auth is live and usable +- `mifos-fineract-sidecar` has completed an authenticated live OMNL posting +- the accounting side of a canonical business transfer can be initiated from a deployed sidecar on Proxmox VE + +### Still open + +- full treasury / funds orchestration +- off-ledger to Chain 138 settlement leg +- correspondent-bank and BNI-specific external settlement path +- full evidence package covering banking message + accounting + on-chain finality in one run + +## Related artifacts + +- [DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md) +- [DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md](DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md) +- [HYBX_BATCH_001_OPERATOR_CHECKLIST.md](../04-configuration/mifos-omnl-central-bank/HYBX_BATCH_001_OPERATOR_CHECKLIST.md) +- [OMNL_TRANSACTION_SEQUENCE_FULL.md](../04-configuration/mifos-omnl-central-bank/OMNL_TRANSACTION_SEQUENCE_FULL.md) +- [PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md](../04-configuration/mifos-omnl-central-bank/PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md) +- [FX_AND_VALUATION.md](../04-configuration/mifos-omnl-central-bank/FX_AND_VALUATION.md) +- [SMART_CONTRACTS_ISO20022_FIN_METHODOLOGY.md](../04-configuration/SMART_CONTRACTS_ISO20022_FIN_METHODOLOGY.md) diff --git a/docs/03-deployment/DBIS_RTGS_LATER_PHASE_SIDECARS_DEPLOYMENT_CHECKLIST.md b/docs/03-deployment/DBIS_RTGS_LATER_PHASE_SIDECARS_DEPLOYMENT_CHECKLIST.md new file mode 100644 index 0000000..61bcd43 --- /dev/null +++ b/docs/03-deployment/DBIS_RTGS_LATER_PHASE_SIDECARS_DEPLOYMENT_CHECKLIST.md @@ -0,0 +1,54 @@ +# DBIS RTGS Later-Phase Sidecars Deployment Checklist + +**Last updated:** 2026-03-29 +**Purpose:** Deployment checklist for the next sidecar tranche beyond the currently deployed first-slice services. + +## 1. Target sidecars + +| Sidecar | Runtime type | Expected artifact | Health path | +|---------|--------------|-------------------|-------------| +| `securities-sidecar` | Java / Spring Boot | `securities-app/target/securities-app-1.0.0-SNAPSHOT.jar` | `GET /actuator/health` | +| `card-networks-sidecar` | Java / Spring Boot | `cardnet-app/target/cardnet-app-1.0.0-SNAPSHOT.jar` | `GET /actuator/health` | +| `mt103-hardcopy-sidecar` | Go binary | `./server` built from `cmd/server` | `GET /health` | + +## 2. Intended role in DBIS RTGS + +- `securities-sidecar` + - later-phase depository / custody / securities instruction lane +- `card-networks-sidecar` + - later-phase card-rail settlement lane +- `mt103-hardcopy-sidecar` + - evidence and hardcopy-bank-message archive lane + +## 3. Target runtime + +Suggested default Proxmox targets on `r630-02`: + +- `5808` `rtgs-securities-1` +- `5809` `rtgs-cardnet-1` +- `5810` `rtgs-mt103-1` + +## 4. Required inputs before deployment + +- built JAR for `securities-sidecar` +- built JAR for `card-networks-sidecar` +- built Go binary for `mt103-hardcopy-sidecar` +- OMNL / Fineract tenant/auth contract for the Java sidecars +- PostgreSQL decision for `mt103-hardcopy-sidecar` +- Redis host/port for Java sidecars + +## 5. Deployment gates + +- [ ] each artifact exists and is versioned +- [ ] each target CT exists and is reachable +- [ ] each service can start under systemd +- [ ] health endpoint returns success +- [ ] Fineract reachability is proven for Java sidecars +- [ ] storage/database reachability is proven for the MT103 sidecar +- [ ] one canonical later-phase business flow is identified per sidecar before production claims are made + +## 6. Scripts + +- [create-dbis-rtgs-later-phase-sidecar-lxcs.sh](/home/intlc/projects/proxmox/scripts/deployment/create-dbis-rtgs-later-phase-sidecar-lxcs.sh) +- [deploy-dbis-rtgs-later-phase-sidecars.sh](/home/intlc/projects/proxmox/scripts/deployment/deploy-dbis-rtgs-later-phase-sidecars.sh) +- [check-dbis-rtgs-later-phase-sidecars.sh](/home/intlc/projects/proxmox/scripts/verify/check-dbis-rtgs-later-phase-sidecars.sh) diff --git a/docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md b/docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md new file mode 100644 index 0000000..33e7e5f --- /dev/null +++ b/docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md @@ -0,0 +1,119 @@ +# Phase 1 — Reality mapping runbook + +**Last updated:** 2026-03-28 +**Purpose:** Operational steps for [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) Sections 3 and 19.1–19.3: inventory Proxmox, Besu, optional Hyperledger CTs, and record dependency context. + +**Outputs:** Timestamped report under `reports/phase1-discovery/` (created by the orchestrator script). + +**Pass / fail semantics:** the orchestrator still writes a full evidence report when a critical section fails, but it now exits **non-zero** and appends a final **Critical failure summary** section. Treat the markdown as evidence capture, not automatic proof of success. + +--- + +## Prerequisites + +- Repo root; `jq` recommended for template audit. +- **LAN:** SSH keys to Proxmox nodes (default `192.168.11.10`, `.11`, `.12` from `config/ip-addresses.conf`). +- Optional: `curl` for RPC probe. + +--- + +## One-command orchestrator + +```bash +bash scripts/verify/run-phase1-discovery.sh +``` + +Optional Hyperledger container smoke checks (SSH to r630-02, `pct exec`): + +```bash +HYPERLEDGER_PROBE=1 bash scripts/verify/run-phase1-discovery.sh +``` + +Each run writes: + +- `reports/phase1-discovery/phase1-discovery-YYYYMMDD_HHMMSS.md` — human-readable report with embedded diagram and command output. +- `reports/phase1-discovery/phase1-discovery-YYYYMMDD_HHMMSS.log` — same content log mirror. + +Critical sections for exit status: + +- Proxmox template audit +- `pvecm` / `pvesm` / `pct list` / `qm list` +- Chain 138 core RPC quick probe +- `check-chain138-rpc-health.sh` +- `verify-besu-enodes-and-ips.sh` +- optional Hyperledger CT probe when `HYPERLEDGER_PROBE=1` + +See also `reports/phase1-discovery/README.md`. + +--- + +## Dependency graph (logical) + +Ingress → RPC/sentries/validators → explorer; CCIP relay on r630-01 uses public RPC; FireFly/Fabric/Indy are optional DLT sides for the Section 18 flow. + +```mermaid +flowchart TB + subgraph edge [EdgeIngress] + CF[Cloudflare_DNS] + NPM[NPMplus_LXC] + end + subgraph besu [Chain138_Besu] + RPCpub[RPC_public_2201] + RPCcore[RPC_core_2101] + Val[Validators_1000_1004] + Sen[Sentries_1500_1508] + end + subgraph observe [Observability] + BS[Blockscout_5000] + end + subgraph relay [CrossChain] + CCIP[CCIP_relay_r63001_host] + end + subgraph dlt [Hyperledger_optional] + FF[FireFly_6200_6201] + Fab[Fabric_6000_plus] + Indy[Indy_6400_plus] + end + CF --> NPM + NPM --> RPCpub + NPM --> RPCcore + NPM --> BS + RPCpub --> Sen + RPCcore --> Sen + Sen --> Val + CCIP --> RPCpub + FF --> Fab + FF --> Indy +``` + +**References:** [PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md](PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md), [ALL_VMIDS_ENDPOINTS.md](../04-configuration/ALL_VMIDS_ENDPOINTS.md), [NETWORK_CONFIGURATION_MASTER.md](../11-references/NETWORK_CONFIGURATION_MASTER.md). + +--- + +## Manual follow-ups + +| Task | Command / doc | +|------|----------------| +| Template vs live VMIDs | `bash scripts/verify/audit-proxmox-operational-template.sh` | +| Besu configs | `bash scripts/audit-besu-configs.sh` (review before running; LAN) | +| IP audit | `bash scripts/audit-all-vm-ips.sh` | +| Node role constitution | [DBIS_NODE_ROLE_MATRIX.md](../02-architecture/DBIS_NODE_ROLE_MATRIX.md) | + +--- + +## ML110 documentation reconciliation + +**Physical inventory** summary must match **live** role: + +- If `192.168.11.10` still runs **Proxmox** and hosts guests, state that explicitly. +- If migration to **OPNsense/pfSense WAN aggregator** is in progress or complete, align with [NETWORK_CONFIGURATION_MASTER.md](../11-references/NETWORK_CONFIGURATION_MASTER.md) and [PHYSICAL_HARDWARE_INVENTORY.md](../02-architecture/PHYSICAL_HARDWARE_INVENTORY.md). + +Use `pvecm status` and `pct list` on `.10` from the orchestrator output as evidence. + +--- + +## Related + +- [DBIS_NODE_ROLE_MATRIX.md](../02-architecture/DBIS_NODE_ROLE_MATRIX.md) +- [DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md](../02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md) +- [DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md](DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md) diff --git a/docs/04-configuration/ALL_VMIDS_ENDPOINTS.md b/docs/04-configuration/ALL_VMIDS_ENDPOINTS.md index dd605c6..05a31b8 100644 --- a/docs/04-configuration/ALL_VMIDS_ENDPOINTS.md +++ b/docs/04-configuration/ALL_VMIDS_ENDPOINTS.md @@ -180,6 +180,21 @@ The following VMIDs have been permanently removed: --- +### DBIS RTGS first-slice sidecars + +| VMID | IP Address | Hostname | Status | Endpoints | Purpose | +|------|------------|----------|--------|-----------|---------| +| 5802 | 192.168.11.89 | rtgs-scsm-1 | ✅ Running | App: 8080, Redis: 6379 | DBIS RTGS `mifos-fineract-sidecar` / SCSM | +| 5803 | 192.168.11.90 | rtgs-funds-1 | ✅ Running | App: 8080, Redis: 6379 | DBIS RTGS `server-funds-sidecar` | +| 5804 | 192.168.11.92 | rtgs-xau-1 | ✅ Running | App: 8080, Redis: 6379 | DBIS RTGS `off-ledger-2-on-ledger-sidecar` | + +**Operational note (2026-03-28/29):** +- These three sidecars are deployed internally on `r630-02` and return local actuator health. +- They can reach the live Mifos / Fineract surface on VMID `5800` at the HTTP layer. +- Canonical authenticated RTGS flow is still pending final Fineract tenant/auth freeze, so these should currently be treated as `runtime deployed, functionally partial`. + +--- + ### Hyperledger Fabric | VMID | IP Address | Hostname | Status | Endpoints | Purpose | diff --git a/docs/04-configuration/E2E_ENDPOINTS_LIST.md b/docs/04-configuration/E2E_ENDPOINTS_LIST.md index 06deff8..e48f82f 100644 --- a/docs/04-configuration/E2E_ENDPOINTS_LIST.md +++ b/docs/04-configuration/E2E_ENDPOINTS_LIST.md @@ -4,7 +4,8 @@ **List from CLI (public):** `./scripts/verify/verify-end-to-end-routing.sh --list-endpoints --profile=public` **List from CLI (private/admin):** `./scripts/verify/verify-end-to-end-routing.sh --list-endpoints --profile=private` **Run E2E (public profile recommended):** `./scripts/verify/verify-end-to-end-routing.sh --profile=public` (from LAN with DNS or use `E2E_USE_SYSTEM_RESOLVER=1` and `/etc/hosts` per [E2E_DNS_FROM_LAN_RUNBOOK.md](E2E_DNS_FROM_LAN_RUNBOOK.md)). -**Run E2E (private/admin):** `./scripts/verify/verify-end-to-end-routing.sh --profile=private`. +**Run E2E (private/admin):** `./scripts/verify/verify-end-to-end-routing.sh --profile=private`. +**Gitea Actions (umbrella / cc-*):** no stable unauthenticated REST for all Gitea versions — print UI URLs with `./scripts/verify/print-gitea-actions-urls.sh` and confirm jobs in the browser after push. **What each hostname should present (operator narrative):** [FQDN_EXPECTED_CONTENT.md](FQDN_EXPECTED_CONTENT.md). @@ -38,6 +39,12 @@ | the-order.sankofa.nexus | web | https://the-order.sankofa.nexus | OSJ management portal (secure auth); app **the_order** at `~/projects/the_order`. NPM upstream default: **order-haproxy** VMID **10210** `http://192.168.11.39:80` → portal **192.168.11.51:3000** (`provision-order-haproxy-10210.sh`). Override with `THE_ORDER_UPSTREAM_*` for direct portal if 10210 is down. | | www.the-order.sankofa.nexus | web | https://www.the-order.sankofa.nexus | **301** to `https://the-order.sankofa.nexus` (canonical apex; NPM `advanced_config`). | | studio.sankofa.nexus | web | https://studio.sankofa.nexus | Sankofa Studio (FusionAI Creator) at VMID 7805. | +| keycloak.sankofa.nexus | web | https://keycloak.sankofa.nexus | Keycloak IdP (VMID 7802); client SSO for admin/portal. | +| admin.sankofa.nexus | web | https://admin.sankofa.nexus | Client SSO: access administration (hostname intent; NPM upstream TBD). | +| portal.sankofa.nexus | web | https://portal.sankofa.nexus | Client SSO: portal / marketplace (typical upstream VMID 7801). | +| dash.sankofa.nexus | web | https://dash.sankofa.nexus | Operator systems dashboard (IP allowlist + MFA intent; upstream TBD). | +| docs.d-bis.org | web | https://docs.d-bis.org | Docs on explorer nginx where configured. | +| blockscout.defi-oracle.io | web | https://blockscout.defi-oracle.io | Generic Blockscout hostname (often VMID 5000); not canonical Chain 138 **explorer.d-bis.org**. | | cacti-alltra.d-bis.org | web | https://cacti-alltra.d-bis.org | Cacti monitoring UI for Alltra. | | cacti-hybx.d-bis.org | web | https://cacti-hybx.d-bis.org | Cacti monitoring UI for HYBX. | | mifos.d-bis.org | web | https://mifos.d-bis.org | Mifos X / Fineract banking and microfinance platform (VMID 5800). | @@ -85,6 +92,12 @@ | the-order.sankofa.nexus | https://the-order.sankofa.nexus | | www.the-order.sankofa.nexus | https://www.the-order.sankofa.nexus | | studio.sankofa.nexus | https://studio.sankofa.nexus | +| keycloak.sankofa.nexus | https://keycloak.sankofa.nexus | +| admin.sankofa.nexus | https://admin.sankofa.nexus | +| portal.sankofa.nexus | https://portal.sankofa.nexus | +| dash.sankofa.nexus | https://dash.sankofa.nexus | +| docs.d-bis.org | https://docs.d-bis.org | +| blockscout.defi-oracle.io | https://blockscout.defi-oracle.io | | cacti-alltra.d-bis.org | https://cacti-alltra.d-bis.org | | cacti-hybx.d-bis.org | https://cacti-hybx.d-bis.org | | mifos.d-bis.org | https://mifos.d-bis.org | @@ -169,6 +182,8 @@ When running from outside LAN or when backends are down, the following endpoints | studio.sankofa.nexus | Historically 404 when the proxy misses `/studio/` or backend `192.168.11.72:8000`; verifier checks `/studio/`. Passed on 2026-03-26 after the NPMplus host update | | phoenix.sankofa.nexus, www.phoenix.sankofa.nexus | (Resolved in verifier) Phoenix API (7800) is API-first; `verify-end-to-end-routing.sh` checks `https://…/health` (200), not `/`. A separate **marketing** site on the apex hostname (if desired) needs another upstream or app routes—NPM still points `phoenix.sankofa.nexus` at the Fastify API today. | | the-order.sankofa.nexus | 502 if **10210** HAProxy or backend portal is down. NPM defaults upstream to **192.168.11.39:80** (order-haproxy). Fallback: `THE_ORDER_UPSTREAM_IP` / `THE_ORDER_UPSTREAM_PORT` = portal **192.168.11.51:3000** | +| keycloak.sankofa.nexus, admin.sankofa.nexus, portal.sankofa.nexus, dash.sankofa.nexus | DNS/SSL/HTTPS **warn** or **skip** when NPM or backends are unwired; listed in `E2E_OPTIONAL_WHEN_FAIL` so the public profile still exits **0**. | +| docs.d-bis.org, blockscout.defi-oracle.io | Same optional-when-fail behavior; **blockscout.defi-oracle.io** also runs optional `/api/v2/stats` like **explorer.d-bis.org**. | **Verifier behavior (2026-03):** `openssl s_client` is wrapped with `timeout` (`E2E_OPENSSL_TIMEOUT` default 15s, `E2E_OPENSSL_X509_TIMEOUT` default 5s) so `--profile=private` / `--profile=all` cannot hang. **`--profile=all`** merges private and public `E2E_OPTIONAL_WHEN_FAIL` lists for temporary regressions. Install **`wscat`** (`npm install -g wscat`) for full WSS JSON-RPC checks; the script uses `wscat -n` to match `curl -k`, and now treats a clean `wscat` exit as a successful full WebSocket check even when the tool prints no JSON output. diff --git a/docs/04-configuration/FQDN_EXPECTED_CONTENT.md b/docs/04-configuration/FQDN_EXPECTED_CONTENT.md index e0b550d..ce82f9c 100644 --- a/docs/04-configuration/FQDN_EXPECTED_CONTENT.md +++ b/docs/04-configuration/FQDN_EXPECTED_CONTENT.md @@ -1,10 +1,11 @@ # FQDN expected content (what users and clients should see) -**Last Updated:** 2026-03-27 (Sankofa hostname tiers: public / SSO / dash) +**Last Updated:** 2026-03-27 (aligned with EXPECTED_WEB_CONTENT deployment table v1.5) **Purpose:** One-page description of **what should be presented** at each public NPM-routed hostname after HTTPS. Use this before pruning evidence or changing proxies so expectations stay aligned with product intent. **Canonical routing (IPs, VMIDs, ports):** [ALL_VMIDS_ENDPOINTS.md](ALL_VMIDS_ENDPOINTS.md), [RPC_ENDPOINTS_MASTER.md](RPC_ENDPOINTS_MASTER.md). **Product depth (Sankofa / Phoenix / explorer narrative):** [EXPECTED_WEB_CONTENT.md](../02-architecture/EXPECTED_WEB_CONTENT.md). +**Deployment status (VMID / upstream matrix):** same doc, section **Deployment Status** (authoritative for `portal` / `admin` / `dash` / `blockscout.defi-oracle.io` rows). **Automated checks:** [E2E_ENDPOINTS_LIST.md](E2E_ENDPOINTS_LIST.md), `scripts/verify/verify-end-to-end-routing.sh`. --- @@ -42,12 +43,22 @@ | `admin.sankofa.nexus` | Web | **Client SSO:** administer access (users, roles, org access policy). | | `portal.sankofa.nexus` | Web | **Client SSO:** Phoenix cloud services, Sankofa Marketplace subscriptions, and other **client-facing** services. | +**Typical upstream (when NPM is wired)** — see [EXPECTED_WEB_CONTENT.md](../02-architecture/EXPECTED_WEB_CONTENT.md) **Deployment Status**: + +| FQDN | VMID / target | Notes | +|------|---------------|--------| +| `keycloak.sankofa.nexus` | **7802** (detail in [ALL_VMIDS_ENDPOINTS.md](ALL_VMIDS_ENDPOINTS.md)) | IdP + `/admin` for platform operators | +| `portal.sankofa.nexus` | **7801** · `192.168.11.51:3000` | ✅ **Active** when NPM routes here; public OIDC / `NEXTAUTH_URL` via `scripts/deployment/sync-sankofa-portal-7801.sh` | +| `admin.sankofa.nexus` | 🔶 **Not pinned** in VM inventory | Hostname **intent**; NPM + app upstream TBD; may share **7801** until split | + ### Operator / systems (IP-gated + MFA) | FQDN | Kind | What should be displayed or returned | |------|------|--------------------------------------| | `dash.sankofa.nexus` | Web | **IP allowlisting** + **system authentication** + **MFA:** unified admin for Sankofa, Phoenix, Gitea, and related systems (not the client self-service portal). | +**Typical upstream:** 🔶 **Not pinned** in VM inventory until NPM and operator dash app are authoritative (same **Deployment Status** table). + ### Other properties on the zone | FQDN | Kind | What should be displayed or returned | @@ -93,8 +104,7 @@ | `rpc.public-0138.defi-oracle.io` | RPC-HTTP | **ThirdWeb-style HTTPS RPC** terminator on VMID 2400; JSON-RPC to Chain 138. | | `rpc.defi-oracle.io` | RPC-HTTP | Public JSON-RPC alias (same Besu public stack as `rpc.d-bis.org` family when healthy). | | `wss.defi-oracle.io` | RPC-WS | Public WebSocket RPC companion. | - -**Note:** `blockscout.defi-oracle.io` is a **separate Blockscout** hostname (generic / reference). Not the canonical DBIS explorer; same class of **web** explorer UI as Blockscout. See EXPECTED_WEB_CONTENT. +| `blockscout.defi-oracle.io` | Web | **Blockscout** explorer UI (generic / reference). When NPM proxies here, routing summaries align with **VMID 5000** (`192.168.11.140:80`, TLS at NPM). **Not** canonical **SolaceScanScout / Chain 138** branding—that is **`explorer.d-bis.org`**. Confirm live NPM if behavior differs. | --- @@ -116,4 +126,4 @@ --- -**Inventory alignment:** Public hostnames above follow `DOMAIN_TYPES_ALL` in `scripts/verify/verify-end-to-end-routing.sh` plus `keycloak.sankofa.nexus`, `docs.d-bis.org`, `blockscout.defi-oracle.io`, and xom-dev hosts. **`admin.sankofa.nexus`**, **`portal.sankofa.nexus`**, and **`dash.sankofa.nexus`** are **product-intent** hostnames—add to NPM and the E2E script when upstreams are wired. Add new rows here when you add NPM hosts. +**Inventory alignment:** `DOMAIN_TYPES_ALL` in `scripts/verify/verify-end-to-end-routing.sh` includes **`keycloak.sankofa.nexus`**, **`admin.sankofa.nexus`**, **`portal.sankofa.nexus`**, **`dash.sankofa.nexus`**, **`docs.d-bis.org`**, and **`blockscout.defi-oracle.io`** (see [E2E_ENDPOINTS_LIST.md](E2E_ENDPOINTS_LIST.md); `--list-endpoints --profile=public`). They are in **`E2E_OPTIONAL_WHEN_FAIL`** so unwired NPM or off-LAN runs still exit **0**. **`portal.sankofa.nexus`** is expected on **VMID 7801** when NPM is configured ( **Deployment Status** in [EXPECTED_WEB_CONTENT.md](../02-architecture/EXPECTED_WEB_CONTENT.md)). **`admin.sankofa.nexus`** and **`dash.sankofa.nexus`** remain **hostname intent** until pinned in [ALL_VMIDS_ENDPOINTS.md](ALL_VMIDS_ENDPOINTS.md). **`blockscout.defi-oracle.io`** aligns with **VMID 5000** in routing summaries (not **`explorer.d-bis.org`** branding). **xom-dev** hostnames are not in the E2E list yet—add when NPM routes are stable. diff --git a/docs/04-configuration/GITEA_ACT_RUNNER_SETUP.md b/docs/04-configuration/GITEA_ACT_RUNNER_SETUP.md index 0d385ad..1ccb81d 100644 --- a/docs/04-configuration/GITEA_ACT_RUNNER_SETUP.md +++ b/docs/04-configuration/GITEA_ACT_RUNNER_SETUP.md @@ -1,8 +1,18 @@ # Gitea act_runner Setup -**Last Updated:** 2026-02-10 +**Last Updated:** 2026-03-27 (bootstrap script + `ubuntu-latest` labels) **Gitea:** https://gitea.d-bis.org -**Runner host:** dev-vm (VMID 5700) at 192.168.11.59 +**Runner host:** dev-vm (VMID 5700) at 192.168.11.59 (Gitea HTTP on that host: port 3000) + +**Which Proxmox node?** VMID 5700 is not fixed to one server—confirm before `pct exec`: + +```bash +ssh root@192.168.11.10 'pct list | grep 5700' +ssh root@192.168.11.11 'pct list | grep 5700' +ssh root@192.168.11.12 'pct list | grep 5700' +``` + +Use the node where 5700 is **running** (often r630-02 / 192.168.11.12). --- @@ -18,48 +28,66 @@ ## Install act_runner +### Site-wide (admin API token, recommended) + +From the **proxmox** repo root, with **`GITEA_TOKEN`** (admin) in root `.env`: + ```bash -# From Proxmox host, run inside dev-vm: -GITEA_RUNNER_REGISTRATION_TOKEN= ssh root@192.168.11.11 "pct exec 5700 -- bash -s" < scripts/dev-vm/setup-act-runner.sh +bash scripts/dev-vm/bootstrap-gitea-act-runner-site-wide.sh +``` + +This calls `GET /api/v1/admin/runners/registration-token`, registers **act_runner** on CT **5700** with label **`ubuntu-latest`** (daemon shows `ubuntu-latest:host`, which matches workflow `runs-on: ubuntu-latest`), installs **systemd**, and starts the service. To re-register (e.g. change labels), run with `RUNNER_FORCE_REREGISTER=1`. + +### Manual registration token + +```bash +# From repo root; replace with the node that hosts 5700 (e.g. 192.168.11.12): +GITEA_RUNNER_REGISTRATION_TOKEN= ssh root@ "pct exec 5700 -- bash -s" < scripts/dev-vm/setup-act-runner.sh ``` Or SSH into dev-vm (192.168.11.59) and run manually: ```bash -cd /opt # or preferred dir +cd /opt/act_runner GITEA_RUNNER_REGISTRATION_TOKEN= bash /path/to/setup-act-runner.sh -cd /opt/act_runner && ./act_runner daemon +``` + +**Instance URL for `register`:** From inside dev-vm, Gitea is usually reachable as `http://127.0.0.1:3000` (same host). The setup script defaults to `http://192.168.11.59:3000`; override if needed: + +```bash +INSTANCE=http://127.0.0.1:3000 GITEA_RUNNER_REGISTRATION_TOKEN= bash setup-act-runner.sh ``` --- ## Run as systemd service -Create `/etc/systemd/system/act-runner.service`: - -```ini -[Unit] -Description=Gitea act_runner -After=network.target - -[Service] -Type=simple -User=root -WorkingDirectory=/opt/act_runner -ExecStart=/opt/act_runner/act_runner daemon -Restart=on-failure -RestartSec=10 -Environment=GITEA_ACTION_URL=http://192.168.11.59:3000 - -[Install] -WantedBy=multi-user.target -``` +Prefer the install script (writes the unit, enables and starts the service): ```bash -systemctl daemon-reload -systemctl enable --now act-runner +ssh root@ "pct exec 5700 -- bash -s" < scripts/dev-vm/install-act-runner-systemd.sh ``` +Optional: if Gitea is not on localhost from the runner’s view, set `GITEA_ACTION_URL` (must match a URL the runner can reach): + +```bash +ssh root@ "pct exec 5700 -- env GITEA_ACTION_URL=http://192.168.11.59:3000 bash -s" < scripts/dev-vm/install-act-runner-systemd.sh +``` + +Manual unit (equivalent): `/etc/systemd/system/act-runner.service` with `Environment=GITEA_ACTION_URL=http://127.0.0.1:3000`, then `systemctl daemon-reload && systemctl enable --now act-runner`. + +--- + +## Troubleshooting + +| Symptom | Check | +|--------|--------| +| Jobs queued, never start | Gitea **Admin → Actions → Runners**: at least one runner **online**. Repo **Settings → Enable Repository Actions**. | +| “No matching runner” / label errors | Workflow `runs-on:` must match runner labels (e.g. `ubuntu-latest`). In Gitea, open the runner details and compare labels. | +| Runner exits / register errors | Ensure `INSTANCE` URL is reachable from the container (`curl -sS -o /dev/null -w '%{http_code}\n' http://127.0.0.1:3000/`). Re-register with a **new** token if the old one was rotated (remove `.runner` first, then run `setup-act-runner.sh` again). | +| Docker steps fail | Install Docker on dev-vm and ensure the `act_runner` user (or root) can run `docker`. | +| Binary but no service | If `/opt/act_runner/act_runner` exists but there is **no** `/opt/act_runner/.runner`, registration never completed—run `setup-act-runner.sh` with a token. If `.runner` exists but no unit, run `install-act-runner-systemd.sh`. | + --- ## Enable Actions per repository diff --git a/docs/04-configuration/RPC_ENDPOINTS_MASTER.md b/docs/04-configuration/RPC_ENDPOINTS_MASTER.md index 502f32d..f0c5900 100644 --- a/docs/04-configuration/RPC_ENDPOINTS_MASTER.md +++ b/docs/04-configuration/RPC_ENDPOINTS_MASTER.md @@ -31,6 +31,25 @@ This is the **authoritative source** for all RPC endpoint configurations. All ot - Set in `config/ip-addresses.conf` or `smom-dbis-138/.env`. In smom `.env`, **`RPC_URL`** is an accepted alias for **Core** and is normalized to `RPC_URL_138`. `CHAIN138_RPC_URL` / `CHAIN138_RPC` are derived from `RPC_URL_138`. `WS_URL_138_PUBLIC` is the WebSocket for Public (e.g. `ws://192.168.11.221:8546`). - **Core RPC (VMID 2101) for deploy:** Use **IP and port**, not FQDN. Set `RPC_URL_138=http://192.168.11.211:8545` in `smom-dbis-138/.env` for contract deployment and gas checks. Do not use `https://rpc-core.d-bis.org` for deployment (avoids DNS/tunnel dependency; direct IP is reliable from LAN). See [TODOS_CONSOLIDATED](../00-meta/TODOS_CONSOLIDATED.md) § First (0b). +### Public RPC capability baseline + +The public Chain 138 RPC tier is expected to provide the following wallet-grade baseline: + +- `eth_chainId` +- `eth_blockNumber` +- `eth_syncing` +- `eth_gasPrice` +- `eth_feeHistory` +- `eth_maxPriorityFeePerGas` +- `eth_estimateGas` +- `eth_getCode` +- `trace_block` +- `trace_replayBlockTransactions` + +Use [scripts/verify/check-chain138-rpc-health.sh](/home/intlc/projects/proxmox/scripts/verify/check-chain138-rpc-health.sh) for the live health and capability probe. + +If `eth_maxPriorityFeePerGas` is missing, the first fix path is the public node version on VMID `2201`. Besu `24.7.0+` adds support for that method; use [upgrade-public-rpc-vmid2201.sh](/home/intlc/projects/proxmox/scripts/besu/upgrade-public-rpc-vmid2201.sh) to perform the targeted public-RPC upgrade. + | Variable / use | Canonical value | Notes | |----------------|-----------------|--------| | **RPC_URL_138** (Core) | `http://192.168.11.211:8545` | **Prefer IP:port for admin/deploy.** Fallback from off-LAN: `https://rpc-core.d-bis.org` | diff --git a/docs/04-configuration/STORAGE_GROWTH_AND_HEALTH.md b/docs/04-configuration/STORAGE_GROWTH_AND_HEALTH.md index 7090bdc..9a847f0 100644 --- a/docs/04-configuration/STORAGE_GROWTH_AND_HEALTH.md +++ b/docs/04-configuration/STORAGE_GROWTH_AND_HEALTH.md @@ -5,6 +5,11 @@ ### Recent operator maintenance (2026-03-28) +- **Fleet checks (same day, follow-up):** Ran `collect-storage-growth-data.sh --append`, `storage-monitor.sh check`, `proxmox-host-io-optimize-pass.sh` (swappiness/sysstat; host `fstrim` N/A on LVM root). **Load:** ml110 load dominated by **Besu (Java)** and **cloudflared**; r630-01 load improved after earlier spike (still many CTs). **r630-01 `data` thin:** after guest `fstrim` fleet, **pvesm** used% dropped slightly (e.g. **~71.6% → ~70.2%** on 2026-03-28 — reclaim varies by CT). **ZFS:** r630-01 / r630-02 `rpool` ONLINE; last scrub **2026-03-08**, 0 errors. **`/proc/mdstat` (r630-01):** RAID devices present and active (no resync observed during check). +- **ml110 `cloudflared`:** If the process still runs as `tunnel run --token …` on the **command line**, the token is visible in **`ps`** and process listings. Prefer **`credentials-file` + YAML** (`scripts/cloudflare-tunnels/systemd/cloudflared-ml110.service`) or **`EnvironmentFile`** with tight permissions; **rotate the tunnel token** in Cloudflare if it was ever logged. +- **CT 7811 (r630-02, thin4):** Root was **100%** full (**~44 GiB** in `/var/log/syslog` + rotated `syslog.1`). **Remediation:** truncated `syslog` / `syslog.1` and restarted `rsyslog`; root **~6%** after fix. **`/etc/logrotate.d/rsyslog`** updated to **`daily` + `maxsize 200M`** (was weekly-only) and **`su root syslog`** so rotation runs before another multi-GB spike. If logs still grow fast, reduce app/rsyslog **facility** noise at the source. +- **CT 10100 (r630-01, thin1):** Root **WARN** (**~88–90%** on 8 GiB); growth mostly **`/var/lib/postgresql` (~5 GiB)**. **Remediation:** `pct resize 10100 rootfs +4G` + `resize2fs`; root **~57%** after. **Note:** Proxmox warned thin **overcommit** vs VG — monitor `pvesm` / `lvs` and avoid excessive concurrent disk expansions without pool growth. +- **`storage-monitor.sh`:** Fixed **`set -e` abort** on unreachable optional nodes and **pipe-subshell** so `ALERTS+=` runs in the main shell (alerts and summaries work). - **r630-01 `pve/data` (local-lvm):** Thin pool extended (+80 GiB data, +512 MiB metadata earlier); **LVM thin auto-extend** enabled in `lvm.conf` (`thin_pool_autoextend_threshold = 80`, `thin_pool_autoextend_percent = 20`); **dmeventd** must stay active. - **r630-01 `pve/thin1`:** Pool extended (+48 GiB data, +256 MiB metadata) to reduce pressure; metadata percent dropped accordingly. - **r630-01 `/var/lib/vz/dump`:** Removed obsolete **2026-02-15** vzdump archives/logs (~9 GiB); newer logs from 2026-02-28 retained. @@ -61,10 +66,10 @@ Fill and refresh from real data. **Est. monthly growth** and **Growth factor** s | Host / VM | Storage / path | Current used | Capacity | Growth factor | Est. monthly growth | Threshold | Action when exceeded | |-----------|----------------|--------------|----------|---------------|---------------------|-----------|----------------------| -| **r630-01** | data (LVM thin) | _e.g. 74%_ | pool size | Thin provisioned | VMs + compaction | **80%** warn, **95%** crit | fstrim CTs, migrate VMs, expand pool | -| **r630-01** | local-lvm | _%_ | — | — | — | 80 / 95 | Same | -| **r630-02** | thin1 / data | _%_ | — | — | — | 80 / 95 | Same | -| **ml110** | thin1 | _%_ | — | — | — | 80 / 95 | Same | +| **r630-01** | data (LVM thin) | **~70%** (pvesm after fstrim pass, 2026-03-28) | ~360G pool | Thin provisioned | VMs + compaction | **80%** warn, **95%** crit | fstrim CTs, migrate VMs, expand pool | +| **r630-01** | thin1 | **~48%** | ~256G pool | CT root disks on thin1 | Same | 80 / 95 | Same; watch overcommit vs `vgs` | +| **r630-02** | thin1–thin6 (`thin1-r630-02` …) | **~1–27%** per pool (2026-03-28) | ~226G each | Mixed CTs | Same | 80 / 95 | **VG free ~0.12 GiB per thin VG** — expand disk/PV before growing LVs | +| **ml110** | data / local-lvm | **~15%** | ~1.7T thin | Besu CTs | High | 80 / 95 | Same | | **2101** | / (root) | _%_ | 200G | Besu DB + logs | High (RocksDB) | 85 warn, 95 crit | e2fsck, make writable, free /data | | **2101** | /data/besu | _du_ | same as / | RocksDB + compaction | ~1–5% block growth | — | Resync or expand disk | | **2500–2505** | /, /data/besu | _%_ | — | Besu | Same | 85 / 95 | Same as 2101 | @@ -72,6 +77,8 @@ Fill and refresh from real data. **Est. monthly growth** and **Growth factor** s | **10130, 10150, 10151** | / | _%_ | — | Logs, app data | Low–medium | 85 / 95 | Logrotate, clean caches | | **5000** (Blockscout) | /, DB volume | _%_ | — | Postgres + indexer | Medium | 85 / 95 | VACUUM, archive old data | | **10233, 10234** (NPMplus) | / | _%_ | — | Logs, certs | Low | 85 / 95 | Logrotate | +| **7811** (r630-02) | /, `/var/log` | **~6%** after cleanup | 50G | Runaway **syslog** | Low if rotated | 85 / 95 | Truncate/rotate syslog; fix rsyslog/logrotate | +| **10100** (r630-01) | / | **~57%** after +4G | **12G** | **PostgreSQL** under `/var/lib` | DB growth | 85 / 95 | VACUUM/archive; resize cautiously (thin overcommit) | **Growth factor** short reference: diff --git a/docs/MASTER_INDEX.md b/docs/MASTER_INDEX.md index ab8b91a..10fe1e8 100644 --- a/docs/MASTER_INDEX.md +++ b/docs/MASTER_INDEX.md @@ -1,6 +1,6 @@ # Documentation — Master Index -**Last Updated:** 2026-03-27 +**Last Updated:** 2026-03-28 **Purpose:** Single entry point for all project documentation. Use this index to find canonical sources and avoid deprecated or duplicate content. **Status:** Preflight and Chain 138 next steps completed (59/59 on-chain per [check-contracts-on-chain-138.sh](../../scripts/verify/check-contracts-on-chain-138.sh), 12 c* GRU-registered). **2026-03-06:** Contract check list expanded to 59 addresses (PMM, vault/reserve, CompliantFiatTokens); doc refs updated. **2026-03-04:** Celo CCIP bridges deployed; Phase A–D tracked in [03-deployment/REMAINING_DEPLOYMENTS_FOR_FULL_NETWORK_COVERAGE.md](03-deployment/REMAINING_DEPLOYMENTS_FOR_FULL_NETWORK_COVERAGE.md). Phase C: [PHASE_C_CW_AND_EDGE_POOLS_RUNBOOK.md](03-deployment/PHASE_C_CW_AND_EDGE_POOLS_RUNBOOK.md); Phase D: [PHASE_D_OPTIONAL_CHECKLIST.md](03-deployment/PHASE_D_OPTIONAL_CHECKLIST.md). **On-chain verification:** DODOPMMIntegration canonical cUSDT/cUSDC — [EXPLORER_TOKEN_LIST_CROSSCHECK](11-references/EXPLORER_TOKEN_LIST_CROSSCHECK.md) §8. **Remaining:** Wemix 0.4 WEMIX, LINK fund, cW* + edge pools — see [00-meta/TODOS_CONSOLIDATED.md](00-meta/TODOS_CONSOLIDATED.md). @@ -57,8 +57,8 @@ | Area | Index / key doc | |------|-----------------| | **00-meta** (tasks, next steps, phases) | [00-meta/NEXT_STEPS_INDEX.md](00-meta/NEXT_STEPS_INDEX.md), [00-meta/PHASES_AND_TASKS_MASTER.md](00-meta/PHASES_AND_TASKS_MASTER.md) | -| **02-architecture** | [02-architecture/](02-architecture/) — **Public sector + Phoenix catalog baseline:** [02-architecture/PUBLIC_SECTOR_TENANCY_MARKETPLACE_AND_DEPLOYMENT_BASELINE.md](02-architecture/PUBLIC_SECTOR_TENANCY_MARKETPLACE_AND_DEPLOYMENT_BASELINE.md); **non-goals (incl. catalog vs marketing §9):** [02-architecture/NON_GOALS.md](02-architecture/NON_GOALS.md) | -| **03-deployment** | [03-deployment/OPERATIONAL_RUNBOOKS.md](03-deployment/OPERATIONAL_RUNBOOKS.md), [03-deployment/DEPLOYMENT_ORDER_OF_OPERATIONS.md](03-deployment/DEPLOYMENT_ORDER_OF_OPERATIONS.md), **Public sector live checklist:** [03-deployment/PUBLIC_SECTOR_LIVE_DEPLOYMENT_CHECKLIST.md](03-deployment/PUBLIC_SECTOR_LIVE_DEPLOYMENT_CHECKLIST.md), **Proxmox VE ops template:** [03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md](03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md) · [`config/proxmox-operational-template.json`](config/proxmox-operational-template.json) | +| **02-architecture** | [02-architecture/](02-architecture/) — **Public sector + Phoenix catalog baseline:** [02-architecture/PUBLIC_SECTOR_TENANCY_MARKETPLACE_AND_DEPLOYMENT_BASELINE.md](02-architecture/PUBLIC_SECTOR_TENANCY_MARKETPLACE_AND_DEPLOYMENT_BASELINE.md); **non-goals (incl. catalog vs marketing §9):** [02-architecture/NON_GOALS.md](02-architecture/NON_GOALS.md); **DBIS Chain 138:** [dbis_chain_138_technical_master_plan.md](../dbis_chain_138_technical_master_plan.md), [02-architecture/DBIS_NODE_ROLE_MATRIX.md](02-architecture/DBIS_NODE_ROLE_MATRIX.md), [02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md](02-architecture/DBIS_PHASE2_PROXMOX_SOVEREIGNIZATION_ROADMAP.md) | +| **03-deployment** | [03-deployment/OPERATIONAL_RUNBOOKS.md](03-deployment/OPERATIONAL_RUNBOOKS.md), [03-deployment/DEPLOYMENT_ORDER_OF_OPERATIONS.md](03-deployment/DEPLOYMENT_ORDER_OF_OPERATIONS.md), **Public sector live checklist:** [03-deployment/PUBLIC_SECTOR_LIVE_DEPLOYMENT_CHECKLIST.md](03-deployment/PUBLIC_SECTOR_LIVE_DEPLOYMENT_CHECKLIST.md), **Proxmox VE ops template:** [03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md](03-deployment/PROXMOX_VE_OPERATIONAL_DEPLOYMENT_TEMPLATE.md) · [`config/proxmox-operational-template.json`](config/proxmox-operational-template.json); **DBIS Phase 1–3:** [03-deployment/PHASE1_DISCOVERY_RUNBOOK.md](03-deployment/PHASE1_DISCOVERY_RUNBOOK.md), [03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md](03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md), [03-deployment/CALIPER_CHAIN138_PERF_HOOK.md](03-deployment/CALIPER_CHAIN138_PERF_HOOK.md), [03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md](03-deployment/DBIS_HYPERLEDGER_RUNTIME_STATUS.md), [03-deployment/DBIS_PHASES_1_TO_3_PRODUCTION_GATE.md](03-deployment/DBIS_PHASES_1_TO_3_PRODUCTION_GATE.md), **RTGS canonical production checklist and institutional-finance layers:** [03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md](03-deployment/DBIS_RTGS_E2E_REQUIREMENTS_MATRIX.md), [03-deployment/DBIS_RTGS_FX_TRANSACTION_CATALOG.md](03-deployment/DBIS_RTGS_FX_TRANSACTION_CATALOG.md), [03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md](03-deployment/DBIS_RTGS_DEPOSITORY_AND_CUSTODY_OPERATING_MODEL.md), [03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md](03-deployment/DBIS_RTGS_FX_AND_LIQUIDITY_OPERATING_MODEL.md), [03-deployment/DBIS_RTGS_CONTROL_PLANE_DEPLOYMENT_CHECKLIST.md](03-deployment/DBIS_RTGS_CONTROL_PLANE_DEPLOYMENT_CHECKLIST.md), [03-deployment/DBIS_RTGS_LATER_PHASE_SIDECARS_DEPLOYMENT_CHECKLIST.md](03-deployment/DBIS_RTGS_LATER_PHASE_SIDECARS_DEPLOYMENT_CHECKLIST.md), [03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md](03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_INTEGRATION_BLUEPRINT.md), [03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md](03-deployment/DBIS_RTGS_FIRST_SLICE_ARCHITECTURE.md), [03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md](03-deployment/DBIS_RTGS_FIRST_SLICE_DEPLOYMENT_CHECKLIST.md), [03-deployment/DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md](03-deployment/DBIS_HYBX_SIDECAR_BOUNDARY_MATRIX.md), [03-deployment/DBIS_MOJALOOP_INTEGRATION_STATUS.md](03-deployment/DBIS_MOJALOOP_INTEGRATION_STATUS.md), [03-deployment/DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md](03-deployment/DBIS_HYPERLEDGER_IDENTITY_STACK_DECISION.md) | | **04-configuration** | [04-configuration/README.md](04-configuration/README.md), [04-configuration/ADDITIONAL_PATHS_AND_EXTENSIONS.md](04-configuration/ADDITIONAL_PATHS_AND_EXTENSIONS.md) (paths, registry, token-mapping, LiFi/Jumper); **Chain 138 wallets:** [04-configuration/CHAIN138_WALLET_CONFIG_VALIDATION.md](04-configuration/CHAIN138_WALLET_CONFIG_VALIDATION.md); **Chain 2138 testnet wallets:** [04-configuration/CHAIN2138_WALLET_CONFIG_VALIDATION.md](04-configuration/CHAIN2138_WALLET_CONFIG_VALIDATION.md); **OMNL Indonesia / HYBX-BATCH-001:** [04-configuration/mifos-omnl-central-bank/HYBX_BATCH_001_OPERATOR_CHECKLIST.md](04-configuration/mifos-omnl-central-bank/HYBX_BATCH_001_OPERATOR_CHECKLIST.md), [04-configuration/mifos-omnl-central-bank/INDONESIA_PACKAGE_4_995_EVIDENCE_STANDARD.md](04-configuration/mifos-omnl-central-bank/INDONESIA_PACKAGE_4_995_EVIDENCE_STANDARD.md) | | **06-besu** | [06-besu/MASTER_INDEX.md](06-besu/MASTER_INDEX.md) | | **Testnet (2138)** | [testnet/DEFI_ORACLE_META_TESTNET_2138_RUNBOOK.md](testnet/DEFI_ORACLE_META_TESTNET_2138_RUNBOOK.md), [testnet/TESTNET_DEPLOYMENT.md](testnet/TESTNET_DEPLOYMENT.md) | diff --git a/docs/TODO.md b/docs/TODO.md index 9355076..bc7b3f8 100644 --- a/docs/TODO.md +++ b/docs/TODO.md @@ -5,6 +5,13 @@ ## Repository-wide TODO - **[00-meta/TODO_TASK_LIST_MASTER.md](00-meta/TODO_TASK_LIST_MASTER.md)** — Consolidated fixes, enhancements, gas steps, known issues, and recommendations for the Proxmox/Chain138 deployment and related repos. + - Includes the DBIS RTGS / HYBX / Hyperledger E2E stack tasks: + - Mifos / Fineract / OMNL + - Mojaloop integration + - HYBX sidecars + - Chain 138 settlement rail + - FireFly / Fabric / Indy runtime decisions + - additional Hyperledger layers such as Aries, AnonCreds, Ursa, Cacti, and Caliper ## Subproject TODOs diff --git a/explorer-monorepo b/explorer-monorepo index 630021c..3bca539 160000 --- a/explorer-monorepo +++ b/explorer-monorepo @@ -1 +1 @@ -Subproject commit 630021c04318f9db9e2623bee66d0d36841c17cd +Subproject commit 3bca5394fc25c30812f5883420553e7221eb9824 diff --git a/hybx_compliance_routing_sidecar_technical_plan.md b/hybx_compliance_routing_sidecar_technical_plan.md new file mode 100644 index 0000000..069f38f --- /dev/null +++ b/hybx_compliance_routing_sidecar_technical_plan.md @@ -0,0 +1,834 @@ +# HYBX Compliance & Routing Sidecar — Technical Plan + +## Purpose + +Design a dedicated **Compliance and Routing Sidecar** that integrates with the Transaction Composer engine to evaluate regulatory constraints, liquidity routing, fee paths, and settlement feasibility **before execution**. + +The sidecar acts as a **decision intelligence layer**, ensuring that all designed transactions are compliant, optimally routed, and operationally executable. + +--- + +# Core Concept + +The Compliance & Routing Sidecar functions as: + +1. Policy Engine +2. Compliance Validator +3. Routing Optimizer +4. Liquidity Resolver +5. Fee Path Generator +6. Risk Decision Engine + +It operates alongside the orchestration engine but **does not execute transactions**. + +It evaluates and returns **decisions, recommendations, and constraints**. + +--- + +# High-Level Architecture + +## Primary Engines + +The sidecar consists of six major subsystems: + +1. Policy Engine +2. Compliance Engine +3. Routing Engine +4. Liquidity Engine +5. Fee Engine +6. Risk Engine + +Each subsystem runs independently but communicates via shared context. + +--- + +# Integration Model + +## Deployment Pattern + +Sidecar runs adjacent to orchestration engine. + +Architecture Pattern: + +Composer UI → Orchestration Engine → Sidecar → Decision Response + +Not: + +Composer UI → Execution + +Execution is blocked until sidecar approval. + +--- + +# Core Responsibilities + +## 1 — Validate Transaction Design + +Evaluate: + +- Entity eligibility +- Jurisdiction constraints +- Currency permissions +- Cross-border restrictions +- Liquidity availability +- Fee compliance + +Return: + +PASS +WARN +FAIL + +--- + +## 2 — Generate Routing Paths + +Determine optimal route across: + +- Correspondent banks +- Nostro/Vostro accounts +- Settlement rails +- Liquidity partners + +Routing considers: + +- Latency +- Liquidity availability +- Regulatory constraints +- Fee efficiency + +--- + +## 3 — Enforce Regulatory Constraints + +Validate compliance against: + +- AML rules +- KYC requirements +- Sanctions lists +- FX regulations +- Settlement permissions + +--- + +## 4 — Compute Liquidity Strategy + +Determine: + +- Liquidity source +- FX conversion path +- Net settlement amount + +--- + +## 5 — Build Fee Distribution Logic + +Construct: + +- Fee tiers +- Fee sequence +- Net disbursement schedule + +--- + +## 6 — Assess Risk + +Calculate: + +- Transaction exposure +- Counterparty risk +- Settlement risk + +--- + +# Core Data Flow + +Transaction → Sidecar → Decision Graph → Orchestrator + +Sidecar returns: + +- Decision Status +- Route Plan +- Liquidity Plan +- Fee Plan +- Risk Score + +--- + +# API Design + +## Core Endpoint + +POST /evaluate-transaction + +Request: + +{ + transactionGraph, + participants, + accounts, + liquidity, + fees +} + +Response: + +{ + status, + routingPlan, + liquidityPlan, + feePlan, + complianceResults, + riskScore +} + +--- + +# Policy Engine Design + +## Role + +Policy engine defines regulatory logic. + +Policies stored as rule sets. + +--- + +## Policy Types + +### Jurisdiction Policies + +Examples: + +- Allowed currency pairs +- Cross-border transfer limits + +--- + +### Institution Policies + +Examples: + +- Settlement permissions +- Nostro relationship validity + +--- + +### Transaction Policies + +Examples: + +- Maximum transfer size +- Required approvals + +--- + +# Compliance Engine Design + +## Function + +Validates all nodes and edges in transaction graph. + +Checks include: + +- AML +- Sanctions +- FX permissions +- Participant eligibility + +--- + +# Routing Engine Design + +## Routing Model + +Graph-based pathfinding. + +Uses: + +Weighted directed graph. + +Weights include: + +- Fee cost +- Latency +- Liquidity availability + +Algorithm: + +Multi-criteria shortest path. + +--- + +# Transaction Composer Mapping + +## Role of `transaction-composer/` + +The package at `transaction-composer/` should be treated as the **operator and developer-facing UI/control surface** for the Compliance & Routing Sidecar, not as a separate orchestration product. + +The intended relationship is: + +Transaction Composer UI → Orchestration API → Compliance & Routing Sidecar → Decision Output → Approved Execution Plan + +The composer is responsible for: + +- designing transaction topology +- capturing user intent +- validating graph structure +- previewing compliance and routing outcomes +- running dry-run simulation +- exporting graph and compiled transaction payloads + +The sidecar is responsible for: + +- policy evaluation +- jurisdiction checks +- route generation +- liquidity resolution +- fee planning +- risk scoring +- final decision output + +The composer **does not replace** the sidecar. +The sidecar **does not replace** the composer. +Together they form the design-time and pre-execution decision layer. + +## Current Composer Capabilities Already Aligned + +The current `transaction-composer/` MVP already provides the correct surface for several sidecar inputs: + +- typed transaction graph with financial node kinds +- graph edge validation +- transaction compilation into structured payloads +- compliance pre-checks +- dry-run simulation +- AI-assisted graph creation from natural-language prompts +- project save/load and JSON export + +This means the composer can already act as: + +- graph authoring client +- transaction payload builder +- initial validation client +- operator review console + +## Functional Mapping: Composer → Sidecar + +| Composer capability | Current package location | Sidecar responsibility it should feed | +|---|---|---| +| Node type system | `transaction-composer/src/types/nodeTypes.ts` | Canonical transaction graph schema consumed by sidecar | +| Edge validation | `transaction-composer/src/graph/validateConnection.ts` | Structural pre-validation before policy/routing evaluation | +| Transaction compiler | `transaction-composer/src/orchestration/transactionCompiler.ts` | Normalized request body for `/evaluate-transaction` | +| Local compliance checks | `transaction-composer/src/compliance/complianceEngine.ts` | Fast UI pre-check before full sidecar decision pass | +| Dry-run simulation | `transaction-composer/src/orchestration/dryRunEngine.ts` | Operator preview before execution approval | +| JSON export | `transaction-composer/src/export/exportTransaction.ts` | Offline review / audit / handoff artifact | +| AI prompt interpreter | `transaction-composer/src/ai/promptInterpreter.ts` | Rapid route drafting before formal sidecar evaluation | +| Action bar / state machine | `transaction-composer/src/components/BottomExecutionBar.tsx`, `transaction-composer/src/state/transactionMachine.ts` | User workflow for build → evaluate → simulate → approve | + +## Required Sidecar API Alignment + +The composer should become the primary client of the sidecar endpoint: + +`POST /evaluate-transaction` + +Recommended request model: + +```json +{ + "graph": { + "nodes": [], + "edges": [] + }, + "compiledTransaction": {}, + "context": { + "jurisdictions": [], + "participants": [], + "operator": {}, + "executionMode": "dry-run" + } +} +``` + +Recommended response model: + +```json +{ + "status": "PASS", + "routingPlan": {}, + "liquidityPlan": {}, + "feePlan": {}, + "complianceResults": [], + "riskScore": 0, + "warnings": [], + "blockingIssues": [] +} +``` + +The local `complianceEngine.ts` should remain as a fast client-side validation layer, while the sidecar becomes the authoritative server-side evaluator. + +## UI-to-Sidecar Screen Mapping + +### 1. Left Component Palette + +Maps to: + +- canonical transaction grammar +- supported route primitives +- institution and settlement building blocks + +### 2. Center Graph Canvas + +Maps to: + +- transaction topology authoring +- route visualization +- structural review before evaluation + +### 3. Right AI Chat Panel + +Maps to: + +- prompt-to-graph generation +- future policy explanations +- route recommendation explanations +- jurisdictional warnings returned by sidecar + +### 4. Bottom Execution Bar + +Maps to: + +- Build → local compile +- Validate → local pre-check plus sidecar evaluation +- Dry Run → local simulation plus optional sidecar route simulation +- Execute → only after sidecar approval and orchestration confirmation + +## Orchestrator Boundary + +The composer should call an orchestration API, not external rails directly. + +Recommended control sequence: + +1. Composer builds graph. +2. Composer compiles transaction. +3. Composer sends evaluation request to Compliance & Routing Sidecar. +4. Sidecar queries: + - jurisdictional cheat sheets + - institution policy registry + - liquidity adapters + - fee rules + - risk engines +5. Sidecar returns decision package. +6. Composer renders: + - PASS / WARN / FAIL + - route recommendation + - liquidity strategy + - fee path + - risk notes +7. Orchestrator receives approved decision package for execution. + +## Jurisdictional Cheat Sheet Integration + +The sidecar should not hardcode jurisdiction rules. + +Instead: + +- composer submits transaction context +- sidecar resolves all participating jurisdictions +- sidecar queries the Jurisdictional Intelligence System +- returned jurisdiction constraints become part of the decision package + +That means `hybx_jurisdictional_cheat_sheets_technical_plan.md` is a direct dependency of this sidecar and should be treated as its policy-knowledge backend. + +## Recommended Immediate Integration Tasks + +1. Add a new composer-side API client for `POST /evaluate-transaction`. +2. Add a sidecar decision panel in the composer UI for: + - decision status + - routing plan + - liquidity plan + - fee plan + - risk score +3. Split local validation into: + - `local graph validation` + - `authoritative sidecar evaluation` +4. Freeze a single canonical request/response schema between composer and sidecar. +5. Add sidecar-backed node and edge annotations so returned warnings/errors paint directly onto the graph. +6. Extend the AI panel so it can also explain sidecar findings, not just create graphs. + +## Production Architecture Decision + +`transaction-composer/` should be recognized as: + +- the design studio +- the operator console +- the pre-execution validation client + +The Compliance & Routing Sidecar should be recognized as: + +- the authoritative policy and routing decision service + +This mapping allows the broader DBIS/HYBX stack to answer questions about: + +- whether a route is legal +- whether liquidity is sufficient +- whether fees are valid +- whether settlement is feasible +- which path should be chosen before execution + +--- + +# Liquidity Engine Design + +## Responsibilities + +Determine liquidity sources. + +Resolve: + +- FX path +- Liquidity pool selection + +--- + +## Liquidity Model + +Liquidity stored as pools. + +Each pool includes: + +{ + currency, + amount, + provider, + expiry +} + +--- + +# Fee Engine Design + +## Responsibilities + +Build complete fee map. + +Includes: + +- Fee percentage +- Flat fees +- Conditional fees + +--- + +# Risk Engine Design + +## Risk Factors + +- Counterparty reliability +- Currency volatility +- Liquidity exposure + +Output: + +Risk Score: 0–100 + +--- + +# Decision Output Model + +Sidecar produces a Decision Graph. + +Structure: + +{ + complianceStatus, + routingGraph, + liquidityGraph, + feeGraph, + riskProfile +} + +--- + +# Routing Intelligence Model + +Routing is dynamic. + +Supports: + +- Multi-hop routing +- Failover paths +- Parallel routing + +--- + +# Execution Blocking Logic + +Execution permitted only if: + +Compliance PASS +Risk acceptable +Liquidity available + +--- + +# Sidecar Communication Protocol + +Transport Options: + +Preferred: + +- gRPC + +Alternative: + +- REST + +--- + +# State Management Model + +Each transaction tracked through lifecycle. + +States: + +Draft +Validated +Routed +Approved +Ready +Executed +Failed + +--- + +# Storage Architecture + +## Databases + +Primary: + +PostgreSQL + +Secondary: + +Redis (cache) + +Optional: + +Graph Database (Neo4j) + +--- + +# Decision Caching Model + +Cache previous routing decisions. + +Speeds repeated transactions. + +Cache Key: + +Transaction Signature Hash + +--- + +# Policy Storage Model + +Policies stored as structured JSON. + +Supports: + +Versioning +Rollback +Audit + +--- + +# Compliance Logging + +Every validation logged. + +Includes: + +Timestamp +Policy Version +Result + +--- + +# Observability Architecture + +Metrics captured: + +- Decision latency +- Compliance failures +- Routing complexity + +--- + +# Monitoring Tools + +Recommended: + +Prometheus +Grafana +OpenTelemetry + +--- + +# Performance Targets + +Validation latency target: + +< 200 ms + +Routing latency target: + +< 500 ms + +--- + +# Horizontal Scaling Model + +Sidecar must scale independently. + +Scaling method: + +Container-based horizontal scaling. + +--- + +# Container Architecture + +Each subsystem deployable independently. + +Services: + +policy-service +compliance-service +routing-service +liquidity-service +fee-service +risk-service + +--- + +# Event Model + +Sidecar reacts to events. + +Events: + +TransactionCreated +TransactionUpdated +TransactionValidated +TransactionApproved + +--- + +# Failure Handling Model + +Failures categorized as: + +Soft Fail +Hard Fail +Retryable Fail + +--- + +# Security Architecture + +Authentication: + +mTLS + +Authorization: + +RBAC + +--- + +# Audit Model + +Full audit trail required. + +Tracks: + +All policy decisions +All routing changes +All risk evaluations + +--- + +# Decision Transparency Model + +Every decision must be explainable. + +Outputs include: + +Rule triggered +Reason +Outcome + +--- + +# AI Integration Capability + +Sidecar optionally supports AI-based recommendations. + +Use cases: + +Suggest routing optimizations +Suggest liquidity paths +Flag anomalous patterns + +--- + +# Future Extensions + +Planned capabilities: + +Adaptive routing +Dynamic policy learning +Real-time liquidity discovery +Predictive compliance risk scoring + +--- + +# Minimum Viable Sidecar Components + +Required for MVP: + +Policy Engine +Compliance Engine +Routing Engine +Liquidity Engine +Risk Engine +Decision API + +--- + +# Production Readiness Milestones + +Phase 1: + +Basic compliance validation. + +Phase 2: + +Routing logic integration. + +Phase 3: + +Liquidity optimization. + +Phase 4: + +Full decision intelligence. + +--- + +# Final Outcome + +When complete, the Compliance & Routing Sidecar becomes: + +A deterministic decision engine that transforms transaction designs into validated, routable, executable workflows with full regulatory assurance. diff --git a/hybx_jurisdictional_cheat_sheets_technical_plan.md b/hybx_jurisdictional_cheat_sheets_technical_plan.md new file mode 100644 index 0000000..61775fa --- /dev/null +++ b/hybx_jurisdictional_cheat_sheets_technical_plan.md @@ -0,0 +1,698 @@ +# HYBX Jurisdictional Cheat Sheets — Technical Plan + +## Purpose + +Design a comprehensive **Jurisdictional Intelligence System (JIS)** that functions as the financial equivalent of a global intelligence reference, similar in concept to the CIA World Factbook but specialized for banking, payments, liquidity movement, settlement routing, and regulatory execution. + +This system provides **deterministic jurisdiction knowledge** used by the Compliance & Routing Sidecar to ensure that every transaction is legally executable within applicable jurisdictions. + +--- + +# Core Objective + +Create a structured, versioned, queryable **Book of Jurisdictional Cheat Sheets** covering every jurisdiction where financial activity may occur. + +Each jurisdiction profile must provide: + +- Regulatory constraints +- Currency permissions +- Settlement rules +- Liquidity restrictions +- Licensing requirements +- Cross-border permissions +- Fee constraints +- Compliance requirements + +This becomes the **ground truth registry** for jurisdictional logic. + +--- + +# Core Concept + +The Jurisdictional Cheat Sheets system acts as: + +1. Jurisdiction Knowledge Base +2. Compliance Reference Library +3. Routing Constraint Source +4. FX Permission Authority +5. Settlement Legality Validator +6. Institutional Licensing Reference + +--- + +# System Role in Overall Architecture + +Primary Consumers: + +- Compliance Sidecar +- Routing Engine +- Liquidity Engine +- Transaction Composer +- Risk Engine + +Integration Flow: + +Transaction Graph → Sidecar → Jurisdiction Lookup → Decision Output + +## Mapping to `transaction-composer/` + +`transaction-composer/` should be treated as the human-facing authoring client for jurisdiction-aware transaction design. + +The practical flow is: + +1. Composer generates the transaction graph. +2. Composer compiles the graph into a normalized transaction request. +3. Compliance & Routing Sidecar resolves the relevant jurisdictions. +4. Jurisdictional Cheat Sheets return: + - currency permissions + - cross-border restrictions + - licensing requirements + - settlement legality + - reporting thresholds +5. Sidecar returns the decision package back to the composer UI for operator review. + +This means the cheat-sheet system is not a standalone reporting tool only. It is an online policy backend for the sidecar and a visible explanation source for the composer. + +--- + +# System Architecture Overview + +## Primary Subsystems + +1. Jurisdiction Registry +2. Policy Knowledge Base +3. Currency Rules Engine +4. Licensing Database +5. Cross-Border Rule Engine +6. Reporting and Visualization Layer + +--- + +# Jurisdiction Registry Design + +## Purpose + +Maintain canonical jurisdiction definitions. + +Each jurisdiction receives a **Jurisdiction ID**. + +--- + +## Core Fields + +Each jurisdiction record includes: + +Jurisdiction ID +Country Name +ISO Country Code +Region +Capital City +Primary Financial Regulator +Secondary Regulators +Legal System Type +Central Bank Name +Currency +Time Zones +Languages +Political Risk Tier +Financial Risk Tier +Sanctions Status + +--- + +# Regulatory Metadata Layer + +## Purpose + +Capture jurisdiction-specific financial regulations. + +--- + +## Regulatory Categories + +### Banking Regulations + +Fields: + +Bank Licensing Requirements +Minimum Capital Requirements +Reserve Requirements +Reporting Requirements +Audit Requirements + +--- + +### Payments Regulations + +Fields: + +Allowed Payment Types +Domestic Settlement Systems +Real-Time Gross Settlement Availability +Instant Payment Availability +Payment Network Participation + +--- + +### FX Regulations + +Fields: + +FX Convertibility Status +Allowed Currency Pairs +Capital Controls +FX Approval Requirements +Maximum FX Limits + +--- + +### Cross-Border Regulations + +Fields: + +Outbound Transfer Permissions +Inbound Transfer Permissions +Restricted Jurisdictions +Reporting Thresholds +Documentation Requirements + +--- + +# Currency Rules Engine + +## Purpose + +Define currency behavior within jurisdiction. + +--- + +## Currency Model + +Currency Code +Convertibility Level +Settlement Type +Liquidity Availability +Central Bank Restrictions +Digital Currency Support + +--- + +# Licensing Database Design + +## Purpose + +Track institutional permissions. + +--- + +## License Types + +Commercial Bank License +Remittance License +Money Services License +Broker License +Liquidity Provider License + +--- + +## License Fields + +License ID +License Type +Issuing Authority +Validity Period +Operational Scope +Restrictions + +--- + +# Cross-Border Rule Engine + +## Purpose + +Define international transaction permissions. + +--- + +## Rule Types + +Country-to-Country Transfer Rules +Currency Export Rules +Sanctions Restrictions +Dual-Control Requirements +Capital Flow Restrictions + +--- + +# Settlement Infrastructure Registry + +## Purpose + +Catalog financial settlement systems. + +--- + +## Settlement System Fields + +System Name +Settlement Type +Operating Hours +Supported Currencies +Settlement Finality Model +Participation Requirements + +Examples: + +RTGS +ACH +Instant Payment Systems +Cross-border Clearing Networks + +--- + +# Liquidity Infrastructure Registry + +## Purpose + +Identify available liquidity channels. + +--- + +## Liquidity Fields + +Liquidity Providers +Supported Currency Pools +Liquidity Windows +Collateral Requirements +Liquidity Limits + +--- + +# Fee Governance Model + +## Purpose + +Define jurisdictional fee constraints. + +--- + +## Fee Fields + +Maximum Allowed Fees +Regulated Fee Categories +Mandatory Fee Disclosures +Taxation Rules +Stamp Duty Rules + +--- + +# Risk Intelligence Layer + +## Purpose + +Provide jurisdictional risk indicators. + +--- + +## Risk Indicators + +Political Stability Index +Regulatory Stability Index +Financial Crime Risk +AML Risk Tier +Sanctions Risk Tier +Currency Volatility Index + +--- + +# Sanctions Intelligence Module + +## Purpose + +Track restricted jurisdictions. + +--- + +## Sanctions Fields + +Sanctioning Authority +Sanctions Type +Restricted Activities +Blocked Entities +Sector Restrictions + +--- + +# Documentation Requirements Engine + +## Purpose + +Define required transaction documentation. + +--- + +## Documentation Types + +Customer Identification +Beneficiary Documentation +Source of Funds Declaration +Purpose of Payment +Regulatory Filings + +--- + +# Data Model Design + +## Core Structure + +Each jurisdiction stored as structured object. + +Example: + +{ + jurisdictionId, + regulator, + currencyRules, + fxRules, + settlementSystems, + licensingRules, + crossBorderRules, + riskProfile +} + +--- + +# Versioning Architecture + +Every jurisdiction profile is versioned. + +Supports: + +Historical snapshots +Policy rollback +Audit tracking + +--- + +# Update Model + +Updates triggered by: + +Regulatory change +Policy update +Market changes + +--- + +# Data Acquisition Model + +Sources include: + +Official regulatory publications +Central bank releases +Financial supervisory authorities +International financial organizations +Licensed legal intelligence providers + +--- + +# Data Normalization Layer + +Normalize all incoming jurisdiction data. + +Ensure consistent schema alignment. + +--- + +# Validation Model + +Each jurisdiction profile must pass validation checks. + +Checks include: + +Field completeness +Logical consistency +Policy compatibility + +--- + +# Query Interface Design + +Provide high-speed lookup capability. + +Example Queries: + +Get FX permissions for USD → IDR +Check cross-border transfer legality +Retrieve settlement options +Fetch licensing requirements + +--- + +# API Architecture + +Primary Endpoint: + +GET /jurisdiction/{id} + +Returns full jurisdiction profile. + +--- + +# Search Capability + +Enable multi-dimensional search. + +Search filters: + +Country +Currency +Regulator +Risk Tier +Settlement Availability + +--- + +# Integration with Sidecar + +Sidecar queries jurisdiction data during validation. + +Used for: + +Routing validation +Compliance validation +Liquidity validation + +--- + +# Caching Model + +Frequently accessed jurisdictions cached. + +Cache TTL configurable. + +--- + +# Storage Architecture + +Primary Storage: + +PostgreSQL + +Secondary Storage: + +ElasticSearch (search index) + +Optional: + +Graph Database + +--- + +# Visualization Interface + +Provide UI dashboard. + +Displays: + +Jurisdiction Overview +Currency Rules +Risk Indicators +Settlement Systems + +--- + +# Dashboard Components + +Interactive maps +Jurisdiction comparison tables +Risk heat maps +Compliance summaries + +--- + +# Policy Linking Model + +Each jurisdiction links to policy references. + +Policies stored separately. + +Referenced dynamically. + +--- + +# Multi-Jurisdiction Simulation Support + +Support modeling of multi-country flows. + +Example: + +USD → Indonesia → Botswana → Malta + +Simulate legal pathway. + +--- + +# Localization Support + +Support multilingual output. + +Languages configurable. + +--- + +# Audit Trail Model + +All changes logged. + +Includes: + +User +Timestamp +Change Summary +Previous Version + +--- + +# Security Architecture + +Access Control: + +Role-Based Access Control (RBAC) + +Encryption: + +Data-at-rest encryption +Data-in-transit encryption + +--- + +# Observability Architecture + +Track: + +Lookup latency +Query frequency +Update frequency + +--- + +# Performance Targets + +Lookup latency target: + +< 100 ms + +Search latency target: + +< 300 ms + +--- + +# Horizontal Scaling Model + +Scale by: + +Jurisdiction clusters + +Regions grouped geographically. + +--- + +# Global Coverage Requirement + +System must support: + +All sovereign states +Dependent territories +Special financial jurisdictions + +--- + +# Jurisdiction Classification Model + +Classify jurisdictions into tiers. + +Example: + +Tier 1 — Major Financial Centers +Tier 2 — Regional Banking Hubs +Tier 3 — Restricted or Emerging Jurisdictions + +--- + +# Deployment Model + +Deploy as independent service. + +Accessible via API. + +--- + +# Backup Strategy + +Nightly backups required. + +Geo-redundant storage recommended. + +--- + +# Minimum Viable Jurisdiction Dataset + +Initial coverage: + +Top 50 global financial jurisdictions. + +Includes: + +United States +Indonesia +Singapore +Switzerland +Malta +United Kingdom +European Union jurisdictions + +--- + +# Production Expansion Plan + +Phase 1: + +Core jurisdictions. + +Phase 2: + +Full global coverage. + +Phase 3: + +Real-time regulatory updates. + +--- + +# Final Outcome + +When complete, the Jurisdictional Cheat Sheets system becomes: + +A continuously updated, machine-readable global financial intelligence reference that enables compliant, optimized, jurisdiction-aware financial transaction execution. diff --git a/reports/phase1-discovery/README.md b/reports/phase1-discovery/README.md new file mode 100644 index 0000000..ada874d --- /dev/null +++ b/reports/phase1-discovery/README.md @@ -0,0 +1,7 @@ +# Phase 1 discovery reports + +Timestamped artifacts from `bash scripts/verify/run-phase1-discovery.sh`. + +- **Naming:** `phase1-discovery-YYYYMMDD_HHMMSS.md` (report) and `.log` (mirror). +- **Runbook:** [docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md](../../docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md) +- **Git:** `reports/phase1-discovery/phase1-discovery-*.md` is listed in repo `.gitignore` (timestamped noise). `*.log` is already ignored globally. Commit only when you intentionally remove a report from the pattern or store evidence elsewhere. diff --git a/scripts/besu/upgrade-public-rpc-vmid2201.sh b/scripts/besu/upgrade-public-rpc-vmid2201.sh new file mode 100755 index 0000000..421922b --- /dev/null +++ b/scripts/besu/upgrade-public-rpc-vmid2201.sh @@ -0,0 +1,204 @@ +#!/usr/bin/env bash +# Upgrade the public Chain 138 RPC node (VMID 2201) to a Besu version that supports +# eth_maxPriorityFeePerGas. Default target is the current fleet baseline. +# +# Usage: +# bash scripts/besu/upgrade-public-rpc-vmid2201.sh +# bash scripts/besu/upgrade-public-rpc-vmid2201.sh --dry-run +# BESU_VERSION=25.12.0 bash scripts/besu/upgrade-public-rpc-vmid2201.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true + +VMID="${RPC_VMID_2201:-2201}" +RPC_HOST="${RPC_VM_2201_HOST:-root@${PROXMOX_R630_02:-192.168.11.12}}" +[[ "$RPC_HOST" != *"@"* ]] && RPC_HOST="root@$RPC_HOST" + +BESU_VERSION="${BESU_VERSION:-25.12.0}" +BESU_TAR="besu-${BESU_VERSION}.tar.gz" +BESU_DIR="/opt/besu-${BESU_VERSION}" +DOWNLOAD_URL="${BESU_DOWNLOAD_URL:-https://github.com/hyperledger/besu/releases/download/${BESU_VERSION}/${BESU_TAR}}" +JAVA21_FALLBACK_URL="${JAVA21_FALLBACK_URL:-https://api.adoptium.net/v3/binary/latest/21/ga/linux/x64/jre/hotspot/normal/eclipse}" +RPC_HTTP_MAX_ACTIVE_CONNECTIONS="${RPC_HTTP_MAX_ACTIVE_CONNECTIONS:-256}" +RPC_WS_MAX_ACTIVE_CONNECTIONS="${RPC_WS_MAX_ACTIVE_CONNECTIONS:-256}" +LOCAL_CACHE="${LOCAL_CACHE:-/tmp}" +DRY_RUN=false +[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true + +SSH_OPTS=(-o StrictHostKeyChecking=accept-new -o ConnectTimeout=15) +RPC_IP="${RPC_PUBLIC_1:-192.168.11.221}" + +run_on_host() { + ssh "${SSH_OPTS[@]}" "$RPC_HOST" "$@" +} + +run_in_vmid() { + local cmd="$1" + if command -v pct >/dev/null 2>&1 && pct list 2>/dev/null | grep -q "^${VMID} "; then + pct exec "$VMID" -- bash -lc "$cmd" + else + run_on_host "pct exec ${VMID} -- bash -lc $(printf '%q' "$cmd")" + fi +} + +push_to_vmid() { + local src="$1" + local dest="$2" + if command -v pct >/dev/null 2>&1 && pct list 2>/dev/null | grep -q "^${VMID} "; then + pct push "$VMID" "$src" "$dest" + else + local host_tmp="/tmp/$(basename "$src")" + scp "${SSH_OPTS[@]}" "$src" "${RPC_HOST}:${host_tmp}" + run_on_host "pct push ${VMID} $(printf '%q' "$host_tmp") $(printf '%q' "$dest") && rm -f $(printf '%q' "$host_tmp")" + fi +} + +rpc_request() { + local method="$1" + local params="${2:-[]}" + curl -sS --max-time 20 -X POST "http://${RPC_IP}:8545" \ + -H "Content-Type: application/json" \ + -d "{\"jsonrpc\":\"2.0\",\"method\":\"${method}\",\"params\":${params},\"id\":1}" +} + +echo "==============================================" +echo "Upgrade public Chain 138 RPC (VMID ${VMID})" +echo "Host: ${RPC_HOST}" +echo "Target Besu version: ${BESU_VERSION}" +echo "==============================================" +if $DRY_RUN; then + echo "[dry-run] No changes will be made." +fi + +run_on_host "echo connected >/dev/null" + +run_in_vmid " + set -euo pipefail + if [[ ! -e /opt/besu ]]; then + fallback=\$(find /opt -maxdepth 1 -type d -name 'besu-*' | sort -V | tail -1) + if [[ -n \"\${fallback:-}\" ]]; then + ln -sfn \"\$fallback\" /opt/besu + chown -h besu:besu /opt/besu 2>/dev/null || true + fi + fi +" + +CURRENT_VERSION="$(run_in_vmid '/opt/besu/bin/besu --version 2>/dev/null || besu --version 2>/dev/null || true' | head -1 || true)" +JAVA_VERSION_RAW="$(run_in_vmid 'java -version 2>&1 | head -1' || true)" +echo "Current version: ${CURRENT_VERSION:-unknown}" +echo "Current Java: ${JAVA_VERSION_RAW:-unknown}" + +if $DRY_RUN; then + echo "[dry-run] Would download ${DOWNLOAD_URL}" + echo "[dry-run] Would stage ${BESU_TAR} in VMID ${VMID}, extract to ${BESU_DIR}, switch /opt/besu, restart besu-rpc." + exit 0 +fi + +mkdir -p "$LOCAL_CACHE" +if [[ ! -f "${LOCAL_CACHE}/${BESU_TAR}" ]]; then + echo "Downloading ${DOWNLOAD_URL} ..." + curl -fsSL -o "${LOCAL_CACHE}/${BESU_TAR}" "${DOWNLOAD_URL}" +fi + +echo "Pushing tarball into VMID ${VMID} ..." +push_to_vmid "${LOCAL_CACHE}/${BESU_TAR}" "/tmp/${BESU_TAR}" + +echo "Ensuring Java 21 runtime is present ..." +run_in_vmid " + set -euo pipefail + java_major=\$(java -version 2>&1 | sed -n '1s/.*version \"\\([0-9][0-9]*\\).*/\\1/p') + if [[ -z \"\${java_major:-}\" || \"\$java_major\" -lt 21 ]]; then + export DEBIAN_FRONTEND=noninteractive + apt-get update -qq + apt-get install -y -qq openjdk-21-jre-headless || true + java_major=\$(java -version 2>&1 | sed -n '1s/.*version \"\\([0-9][0-9]*\\).*/\\1/p') + if [[ -z \"\${java_major:-}\" || \"\$java_major\" -lt 21 ]]; then + command -v curl >/dev/null 2>&1 || apt-get install -y -qq curl ca-certificates + tmp_jre=/tmp/java21-jre.tar.gz + curl -fsSL -o \"\$tmp_jre\" '${JAVA21_FALLBACK_URL}' + tar -tzf \"\$tmp_jre\" > /tmp/java21-jre.list + extracted_dir=\$(head -1 /tmp/java21-jre.list | cut -d/ -f1) + rm -f /tmp/java21-jre.list + tar -xzf \"\$tmp_jre\" -C /opt + rm -f \"\$tmp_jre\" + ln -sfn \"/opt/\${extracted_dir}\" /opt/java-21 + update-alternatives --install /usr/bin/java java /opt/java-21/bin/java 2100 + fi + fi + config_file=\$(systemctl cat besu-rpc.service | sed -n 's/.*--config-file=\\([^ ]*\\).*/\\1/p' | tail -1) + if [[ -n \"\${config_file:-}\" && -f \"\$config_file\" ]]; then + find /etc/besu -maxdepth 1 -type f -name '*.toml' -print0 2>/dev/null | while IFS= read -r -d '' toml; do + sed -i \ + -e '/^[[:space:]]*miner-enabled[[:space:]]*=.*/d' \ + -e '/^[[:space:]]*privacy-enabled[[:space:]]*=.*/d' \ + \"\$toml\" + if grep -q '^rpc-http-enabled=true' \"\$toml\" && ! grep -q '^rpc-http-max-active-connections=' \"\$toml\"; then + tmp=\$(mktemp) + awk '1; /^rpc-http-port=/{print \"rpc-http-max-active-connections=${RPC_HTTP_MAX_ACTIVE_CONNECTIONS}\"}' \"\$toml\" > \"\$tmp\" + cat \"\$tmp\" > \"\$toml\" + rm -f \"\$tmp\" + fi + if grep -q '^rpc-ws-enabled=true' \"\$toml\" && ! grep -q '^rpc-ws-max-active-connections=' \"\$toml\"; then + tmp=\$(mktemp) + awk '1; /^rpc-ws-port=/{print \"rpc-ws-max-active-connections=${RPC_WS_MAX_ACTIVE_CONNECTIONS}\"}' \"\$toml\" > \"\$tmp\" + cat \"\$tmp\" > \"\$toml\" + rm -f \"\$tmp\" + fi + done + if ! grep -q '^data-storage-format=' \"\$config_file\"; then + tmp=\$(mktemp) + awk '1; /^sync-mode=/{print \"data-storage-format=\\\"FOREST\\\"\"}' \"\$config_file\" > \"\$tmp\" + cat \"\$tmp\" > \"\$config_file\" + rm -f \"\$tmp\" + fi + fi +" + +echo "Installing Besu ${BESU_VERSION} inside VMID ${VMID} ..." +run_in_vmid " + set -euo pipefail + cd /opt + if [[ -L /opt/besu ]]; then + current_target=\$(readlink -f /opt/besu) + current_version=\$(basename \"\$current_target\") + else + current_version=\$(/opt/besu/bin/besu --version 2>/dev/null | head -1 | sed -E 's#^.*/(v)?([0-9.]+).*\$#besu-\\2#') + [[ -z \"\$current_version\" ]] && current_version=besu-backup-pre-${BESU_VERSION} + mv /opt/besu \"/opt/\${current_version}\" + fi + rm -rf '${BESU_DIR}' + tar -xzf '/tmp/${BESU_TAR}' -C /opt + rm -f '/tmp/${BESU_TAR}' + ln -sfn '${BESU_DIR}' /opt/besu + chown -h besu:besu /opt/besu + chown -R besu:besu '${BESU_DIR}' /opt/besu-* 2>/dev/null || true +" + +echo "Restarting besu-rpc.service ..." +run_in_vmid "systemctl restart besu-rpc.service" +for _ in $(seq 1 24); do + ACTIVE_STATE="$(run_in_vmid 'systemctl is-active besu-rpc.service' || true)" + [[ "$ACTIVE_STATE" == "active" ]] && break + sleep 5 +done +NEW_VERSION="$(run_in_vmid '/opt/besu/bin/besu --version 2>/dev/null | grep -m1 "besu/" || true' | head -1 || true)" +echo "Service state: ${ACTIVE_STATE:-unknown}" +echo "New version: ${NEW_VERSION:-unknown}" + +echo "Verifying live RPC methods ..." +CHAIN_ID="$(rpc_request eth_chainId | jq -r '.result // empty' 2>/dev/null || true)" +PRIORITY_FEE="$(curl -sS --max-time 20 -X POST 'https://rpc-http-pub.d-bis.org' -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_maxPriorityFeePerGas\",\"params\":[],\"id\":1}' | jq -r '.result // empty' 2>/dev/null || true)" +TRACE_OK="$(rpc_request trace_block '[\"0x1\"]' | jq -r 'has(\"result\")' 2>/dev/null || true)" + +if [[ "$ACTIVE_STATE" != "active" || -z "$CHAIN_ID" || "$TRACE_OK" != "true" || -z "$PRIORITY_FEE" ]]; then + echo "ERROR: post-upgrade verification failed." + echo " eth_chainId result: ${CHAIN_ID:-missing}" + echo " trace_block result present: ${TRACE_OK:-false}" + echo " eth_maxPriorityFeePerGas result: ${PRIORITY_FEE:-missing}" + exit 1 +fi + +echo "OK: VMID ${VMID} upgraded successfully and public RPC now exposes eth_maxPriorityFeePerGas." diff --git a/scripts/deployment/create-dbis-rtgs-control-plane-lxcs.sh b/scripts/deployment/create-dbis-rtgs-control-plane-lxcs.sh new file mode 100644 index 0000000..bbde559 --- /dev/null +++ b/scripts/deployment/create-dbis-rtgs-control-plane-lxcs.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Create placeholder LXCs for the DBIS RTGS control plane. +# Usage: +# ./scripts/deployment/create-dbis-rtgs-control-plane-lxcs.sh [--dry-run] + +HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}" +SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new" +TEMPLATE="${PVE_LXC_TEMPLATE:-local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst}" +STORAGE="${PVE_STORAGE:-local-lvm}" +BRIDGE="${PVE_BRIDGE:-vmbr0}" +GATEWAY="${PVE_GATEWAY:-192.168.11.1}" + +DRY_RUN=false +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true +fi + +LXCS=( + "${RTGS_ORCH_VMID:-5805} ${RTGS_ORCH_HOSTNAME:-rtgs-orchestrator-1} ${RTGS_ORCH_IP:-192.168.11.93} 4096 2 24" + "${RTGS_FX_VMID:-5806} ${RTGS_FX_HOSTNAME:-rtgs-fx-1} ${RTGS_FX_IP:-192.168.11.94} 4096 2 24" + "${RTGS_LIQ_VMID:-5807} ${RTGS_LIQ_HOSTNAME:-rtgs-liquidity-1} ${RTGS_LIQ_IP:-192.168.11.95} 4096 2 24" +) + +run_remote() { + local cmd="$1" + if $DRY_RUN; then + echo "[DRY-RUN] $cmd" + else + ssh $SSH_OPTS "root@$HOST" "$cmd" + fi +} + +echo "=== DBIS RTGS control-plane LXCs ===" +echo "Host: $HOST" +echo "Template: $TEMPLATE" +echo + +for spec in "${LXCS[@]}"; do + read -r vmid hostname ip memory cores disk <<<"$spec" + cmd="pct create $vmid $TEMPLATE \ + --hostname $hostname \ + --cores $cores \ + --memory $memory \ + --rootfs ${STORAGE}:${disk} \ + --net0 name=eth0,bridge=${BRIDGE},gw=${GATEWAY},ip=${ip}/24 \ + --onboot 1 \ + --unprivileged 1 \ + --features nesting=1 \ + --password \$(openssl rand -base64 18) \ + --description 'DBIS RTGS control-plane LXC ($hostname)'" + run_remote "$cmd" +done diff --git a/scripts/deployment/create-dbis-rtgs-later-phase-sidecar-lxcs.sh b/scripts/deployment/create-dbis-rtgs-later-phase-sidecar-lxcs.sh new file mode 100644 index 0000000..e5cc177 --- /dev/null +++ b/scripts/deployment/create-dbis-rtgs-later-phase-sidecar-lxcs.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Create placeholder LXCs for later-phase DBIS RTGS sidecars. +# Usage: +# ./scripts/deployment/create-dbis-rtgs-later-phase-sidecar-lxcs.sh [--dry-run] + +HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}" +SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new" +TEMPLATE="${PVE_LXC_TEMPLATE:-local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst}" +STORAGE="${PVE_STORAGE:-local-lvm}" +BRIDGE="${PVE_BRIDGE:-vmbr0}" +GATEWAY="${PVE_GATEWAY:-192.168.11.1}" + +DRY_RUN=false +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true +fi + +LXCS=( + "${RTGS_SECURITIES_VMID:-5808} ${RTGS_SECURITIES_HOSTNAME:-rtgs-securities-1} ${RTGS_SECURITIES_IP:-192.168.11.96} 4096 2 24" + "${RTGS_CARDNET_VMID:-5809} ${RTGS_CARDNET_HOSTNAME:-rtgs-cardnet-1} ${RTGS_CARDNET_IP:-192.168.11.97} 4096 2 24" + "${RTGS_MT103_VMID:-5810} ${RTGS_MT103_HOSTNAME:-rtgs-mt103-1} ${RTGS_MT103_IP:-192.168.11.98} 4096 2 24" +) + +run_remote() { + local cmd="$1" + if $DRY_RUN; then + echo "[DRY-RUN] $cmd" + else + ssh $SSH_OPTS "root@$HOST" "$cmd" + fi +} + +echo "=== DBIS RTGS later-phase sidecar LXCs ===" +echo "Host: $HOST" +echo "Template: $TEMPLATE" +echo + +for spec in "${LXCS[@]}"; do + read -r vmid hostname ip memory cores disk <<<"$spec" + cmd="pct create $vmid $TEMPLATE \ + --hostname $hostname \ + --cores $cores \ + --memory $memory \ + --rootfs ${STORAGE}:${disk} \ + --net0 name=eth0,bridge=${BRIDGE},gw=${GATEWAY},ip=${ip}/24 \ + --onboot 1 \ + --unprivileged 1 \ + --features nesting=1 \ + --password \$(openssl rand -base64 18) \ + --description 'DBIS RTGS later-phase sidecar LXC ($hostname)'" + run_remote "$cmd" +done diff --git a/scripts/deployment/create-dbis-rtgs-sidecar-lxcs.sh b/scripts/deployment/create-dbis-rtgs-sidecar-lxcs.sh new file mode 100755 index 0000000..640208e --- /dev/null +++ b/scripts/deployment/create-dbis-rtgs-sidecar-lxcs.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Create the three DBIS RTGS first-slice sidecar LXCs on r630-02. +# Usage: +# ./scripts/deployment/create-dbis-rtgs-sidecar-lxcs.sh [--dry-run] + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true + +HOST="${PROXMOX_HOST_R630_02:-${PROXMOX_R630_02:-192.168.11.12}}" +NETWORK="${NETWORK:-vmbr0}" +GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}" +DNS="${DNS_PRIMARY:-1.1.1.1}" +STORAGE="${RTGS_SIDECAR_STORAGE:-thin3}" +TEMPLATE="${TEMPLATE_UBUNTU_24:-local:vztmpl/ubuntu-24.04-standard_24.04-1_amd64.tar.zst}" +SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new" + +DRY_RUN=false +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true +fi + +SIDEcars=( + "5802 rtgs-scsm-1 192.168.11.89 4096 2 24" + "5803 rtgs-funds-1 192.168.11.90 4096 2 24" + "5804 rtgs-xau-1 192.168.11.92 4096 2 24" +) + +resolve_template() { + if ssh $SSH_OPTS "root@$HOST" "pveam list local 2>/dev/null | grep -q 'ubuntu-24.04-standard'" 2>/dev/null; then + echo "local:vztmpl/ubuntu-24.04-standard_24.04-1_amd64.tar.zst" + elif ssh $SSH_OPTS "root@$HOST" "pveam list local 2>/dev/null | grep -q 'ubuntu-22.04-standard'" 2>/dev/null; then + echo "local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst" + else + echo "$TEMPLATE" + fi +} + +TEMPLATE="$(resolve_template)" + +echo "=== DBIS RTGS first-slice sidecar LXCs ===" +echo "Host: $HOST" +echo "Storage: $STORAGE" +echo "Template: $TEMPLATE" +echo + +for spec in "${SIDEcars[@]}"; do + read -r VMID HOSTNAME IP MEMORY CORES ROOTFS_GB <<<"$spec" + if ssh $SSH_OPTS "root@$HOST" "pct status $VMID >/dev/null 2>&1"; then + echo "CT $VMID already exists on $HOST; skipping create." + continue + fi + + CREATE_CMD="pct create $VMID $TEMPLATE \ + --hostname $HOSTNAME \ + --memory $MEMORY \ + --cores $CORES \ + --rootfs $STORAGE:${ROOTFS_GB} \ + --net0 name=eth0,bridge=$NETWORK,ip=$IP/24,gw=$GATEWAY \ + --features nesting=1,keyctl=1 \ + --nameserver $DNS \ + --onboot 1 \ + --start 1 \ + --unprivileged 0 \ + --description 'DBIS RTGS first-slice sidecar LXC ($HOSTNAME)'" + + if $DRY_RUN; then + echo "[DRY-RUN] $CREATE_CMD" + echo + continue + fi + + echo "Creating CT $VMID ($HOSTNAME, $IP)..." + ssh $SSH_OPTS "root@$HOST" "$CREATE_CMD" +done + +echo "Done." diff --git a/scripts/deployment/deploy-dbis-rtgs-control-plane.sh b/scripts/deployment/deploy-dbis-rtgs-control-plane.sh new file mode 100644 index 0000000..a5e790c --- /dev/null +++ b/scripts/deployment/deploy-dbis-rtgs-control-plane.sh @@ -0,0 +1,153 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Deploy the DBIS RTGS control-plane services when artifacts are available. +# Usage: +# ./scripts/deployment/deploy-dbis-rtgs-control-plane.sh [--dry-run] + +HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}" +SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new" + +ORCH_VMID="${RTGS_ORCH_VMID:-5805}" +FX_VMID="${RTGS_FX_VMID:-5806}" +LIQ_VMID="${RTGS_LIQ_VMID:-5807}" + +ORCH_JAR="${RTGS_ORCH_JAR:-}" +FX_JAR="${RTGS_FX_JAR:-}" +LIQ_JAR="${RTGS_LIQ_JAR:-}" + +OMNL_BASE_URL="${OMNL_FINERACT_BASE_URL:-http://192.168.11.85:8080/fineract-provider/api/v1}" +OMNL_TENANT="${OMNL_FINERACT_TENANT:-omnl}" +OMNL_USER="${OMNL_FINERACT_USER:-}" +OMNL_PASSWORD="${OMNL_FINERACT_PASSWORD:-}" + +DRY_RUN=false +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true +fi + +run_remote() { + local vmid="$1" + local cmd="$2" + if $DRY_RUN; then + echo "[DRY-RUN][CT $vmid] $cmd" + else + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc $(printf '%q' "$cmd")" + fi +} + +push_file() { + local vmid="$1" + local src="$2" + local dest="$3" + if $DRY_RUN; then + echo "[DRY-RUN][CT $vmid] copy $src -> $dest" + else + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- mkdir -p $(dirname "$dest")" + ssh $SSH_OPTS "root@$HOST" "cat > /tmp/$(basename "$dest")" < "$src" + ssh $SSH_OPTS "root@$HOST" "pct push $vmid /tmp/$(basename "$dest") $dest >/dev/null && rm -f /tmp/$(basename "$dest")" + fi +} + +setup_base_runtime() { + local vmid="$1" + run_remote "$vmid" "export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y openjdk-21-jre-headless redis-server curl ca-certificates" + run_remote "$vmid" "systemctl enable redis-server --now" +} + +require_artifact() { + local label="$1" + local path="$2" + if [[ -z "$path" ]]; then + echo "Missing ${label}: set the corresponding RTGS_*_JAR env var." >&2 + exit 1 + fi + if [[ ! -f "$path" ]]; then + echo "Missing ${label} artifact: $path" >&2 + exit 1 + fi +} + +deploy_service() { + local vmid="$1" + local service_name="$2" + local jar_path="$3" + local env_path="$4" + local env_content="$5" + local workdir="/opt/dbis-rtgs/${service_name}" + local unitfile + + setup_base_runtime "$vmid" + push_file "$vmid" "$jar_path" "${workdir}/${service_name}.jar" + + local env_tmp + env_tmp="$(mktemp)" + cat > "$env_tmp" <<<"$env_content" + push_file "$vmid" "$env_tmp" "$env_path" + rm -f "$env_tmp" + + unitfile="$(mktemp)" + cat > "$unitfile" </dev/null 2>&1; then + CHAIN138_SETTLEMENT_MERCHANT_ADDRESS="$(cast wallet address "$CHAIN138_SETTLEMENT_PRIVATE_KEY" 2>/dev/null || true)" +fi + +DRY_RUN=false +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true +fi + +TARGETS="${TARGETS:-scsm,funds,xau}" + +require_file() { + local path="$1" + if [[ ! -f "$path" ]]; then + echo "Missing required artifact: $path" >&2 + exit 1 + fi +} + +require_file "$SCSM_JAR" +require_file "$FUNDS_JAR" +require_file "$XAU_JAR" + +run_remote() { + local vmid="$1" + local cmd="$2" + if $DRY_RUN; then + echo "[DRY-RUN][CT $vmid] $cmd" + else + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc $(printf '%q' "$cmd")" + fi +} + +target_enabled() { + local want="$1" + [[ ",$TARGETS," == *",$want,"* ]] +} + +wait_for_health() { + local vmid="$1" + local url="$2" + local out_file="$3" + local attempts="${4:-20}" + local sleep_seconds="${5:-2}" + local cmd="for i in \$(seq 1 $attempts); do if curl -sf \"$url\" > \"$out_file\"; then cat \"$out_file\"; exit 0; fi; sleep $sleep_seconds; done; exit 7" + run_remote "$vmid" "$cmd" +} + +push_file() { + local vmid="$1" + local src="$2" + local dest="$3" + if $DRY_RUN; then + echo "[DRY-RUN][CT $vmid] copy $src -> $dest" + else + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- mkdir -p $(dirname "$dest")" + ssh $SSH_OPTS "root@$HOST" "cat > /tmp/$(basename "$dest")" < "$src" + ssh $SSH_OPTS "root@$HOST" "pct push $vmid /tmp/$(basename "$dest") $dest >/dev/null && rm -f /tmp/$(basename "$dest")" + fi +} + +setup_base_runtime() { + local vmid="$1" + run_remote "$vmid" "export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y openjdk-21-jre-headless redis-server curl ca-certificates" + run_remote "$vmid" "systemctl enable redis-server --now" +} + +deploy_scsm() { + local vmid=5802 + setup_base_runtime "$vmid" + push_file "$vmid" "$SCSM_JAR" "/opt/dbis-rtgs/scsm/scsm-app.jar" + local envfile unit + envfile="$(mktemp)" + cat > "$envfile" < "$unit" <<'EOF' +[Unit] +Description=DBIS RTGS SCSM sidecar +After=network-online.target redis-server.service +Wants=network-online.target + +[Service] +User=root +WorkingDirectory=/opt/dbis-rtgs/scsm +EnvironmentFile=/etc/dbis-rtgs/scsm.env +ExecStart=/usr/bin/java -jar /opt/dbis-rtgs/scsm/scsm-app.jar +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + push_file "$vmid" "$unit" "/etc/systemd/system/dbis-rtgs-scsm.service" + rm -f "$unit" + run_remote "$vmid" "mkdir -p /var/lib/dbis-rtgs/scsm /opt/dbis-rtgs/scsm /etc/dbis-rtgs && systemctl daemon-reload && systemctl enable dbis-rtgs-scsm && systemctl restart dbis-rtgs-scsm" + wait_for_health "$vmid" "http://127.0.0.1:8080/actuator/health" "/tmp/scsm-health.json" +} + +deploy_funds() { + local vmid=5803 + setup_base_runtime "$vmid" + push_file "$vmid" "$FUNDS_JAR" "/opt/dbis-rtgs/funds/funds-app.jar" + local envfile unit + envfile="$(mktemp)" + cat > "$envfile" < "$unit" <<'EOF' +[Unit] +Description=DBIS RTGS server-funds sidecar +After=network-online.target redis-server.service +Wants=network-online.target + +[Service] +User=root +WorkingDirectory=/opt/dbis-rtgs/funds +EnvironmentFile=/etc/dbis-rtgs/funds.env +ExecStart=/usr/bin/java -jar /opt/dbis-rtgs/funds/funds-app.jar +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + push_file "$vmid" "$unit" "/etc/systemd/system/dbis-rtgs-funds.service" + rm -f "$unit" + run_remote "$vmid" "mkdir -p /var/lib/dbis-rtgs/funds /opt/dbis-rtgs/funds /etc/dbis-rtgs && systemctl daemon-reload && systemctl enable dbis-rtgs-funds && systemctl restart dbis-rtgs-funds" + wait_for_health "$vmid" "http://127.0.0.1:8080/actuator/health" "/tmp/funds-health.json" +} + +deploy_xau() { + local vmid=5804 + setup_base_runtime "$vmid" + push_file "$vmid" "$XAU_JAR" "/opt/dbis-rtgs/xau/off-ledger-2-on-ledger-sidecar.jar" + local envfile unit + envfile="$(mktemp)" + cat > "$envfile" < "$unit" <<'EOF' +[Unit] +Description=DBIS RTGS XAU conversion sidecar +After=network-online.target +Wants=network-online.target + +[Service] +User=root +WorkingDirectory=/opt/dbis-rtgs/xau +EnvironmentFile=/etc/dbis-rtgs/xau.env +ExecStart=/usr/bin/java -jar /opt/dbis-rtgs/xau/off-ledger-2-on-ledger-sidecar.jar +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + push_file "$vmid" "$unit" "/etc/systemd/system/dbis-rtgs-xau.service" + rm -f "$unit" + run_remote "$vmid" "mkdir -p /opt/dbis-rtgs/xau /etc/dbis-rtgs && systemctl daemon-reload && systemctl enable dbis-rtgs-xau && systemctl restart dbis-rtgs-xau" + wait_for_health "$vmid" "http://127.0.0.1:8080/actuator/health" "/tmp/xau-health.json" +} + +echo "=== Deploy DBIS RTGS first-slice sidecars ===" +echo "Host: $HOST" +echo + +if target_enabled scsm; then + deploy_scsm +fi +if target_enabled funds; then + deploy_funds +fi +if target_enabled xau; then + deploy_xau +fi + +echo +echo "Done." diff --git a/scripts/deployment/deploy-dbis-rtgs-later-phase-sidecars.sh b/scripts/deployment/deploy-dbis-rtgs-later-phase-sidecars.sh new file mode 100644 index 0000000..117665f --- /dev/null +++ b/scripts/deployment/deploy-dbis-rtgs-later-phase-sidecars.sh @@ -0,0 +1,178 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Deploy later-phase DBIS RTGS sidecars when artifacts are available. +# Usage: +# ./scripts/deployment/deploy-dbis-rtgs-later-phase-sidecars.sh [--dry-run] + +HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}" +SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new" + +SECT_VMID="${RTGS_SECURITIES_VMID:-5808}" +CARD_VMID="${RTGS_CARDNET_VMID:-5809}" +MT103_VMID="${RTGS_MT103_VMID:-5810}" + +SECT_JAR="${RTGS_SECURITIES_JAR:-/home/intlc/projects/HYBX_Sidecars/securities-sidecar/securities-app/target/securities-app-1.0.0-SNAPSHOT.jar}" +CARD_JAR="${RTGS_CARDNET_JAR:-/home/intlc/projects/HYBX_Sidecars/card-networks-sidecar/cardnet-app/target/cardnet-app-1.0.0-SNAPSHOT.jar}" +MT103_BIN="${RTGS_MT103_BIN:-/home/intlc/projects/HYBX_Sidecars/mt103-hardcopy-sidecar/server}" + +OMNL_BASE_URL="${OMNL_FINERACT_BASE_URL:-http://192.168.11.85:8080/fineract-provider/api/v1}" +OMNL_TENANT="${OMNL_FINERACT_TENANT:-omnl}" +OMNL_USER="${OMNL_FINERACT_USER:-}" +OMNL_PASSWORD="${OMNL_FINERACT_PASSWORD:-}" +MT103_DATABASE_URL="${MT103_DATABASE_URL:-postgres://localhost/mt103_sidecar?sslmode=disable}" + +DRY_RUN=false +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true +fi + +run_remote() { + local vmid="$1" + local cmd="$2" + if $DRY_RUN; then + echo "[DRY-RUN][CT $vmid] $cmd" + else + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc $(printf '%q' "$cmd")" + fi +} + +push_file() { + local vmid="$1" + local src="$2" + local dest="$3" + if $DRY_RUN; then + echo "[DRY-RUN][CT $vmid] copy $src -> $dest" + else + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- mkdir -p $(dirname "$dest")" + ssh $SSH_OPTS "root@$HOST" "cat > /tmp/$(basename "$dest")" < "$src" + ssh $SSH_OPTS "root@$HOST" "pct push $vmid /tmp/$(basename "$dest") $dest >/dev/null && rm -f /tmp/$(basename "$dest")" + fi +} + +require_file() { + local path="$1" + if [[ ! -f "$path" ]]; then + echo "Missing required artifact: $path" >&2 + exit 1 + fi +} + +setup_java_runtime() { + local vmid="$1" + run_remote "$vmid" "export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y openjdk-21-jre-headless redis-server curl ca-certificates" + run_remote "$vmid" "systemctl enable redis-server --now" +} + +setup_go_runtime() { + local vmid="$1" + run_remote "$vmid" "export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y curl ca-certificates" +} + +deploy_java_sidecar() { + local vmid="$1" + local svc="$2" + local jar="$3" + local env_content="$4" + require_file "$jar" + setup_java_runtime "$vmid" + push_file "$vmid" "$jar" "/opt/dbis-rtgs/${svc}/${svc}.jar" + + local envfile unit + envfile="$(mktemp)" + cat > "$envfile" <<<"$env_content" + push_file "$vmid" "$envfile" "/etc/dbis-rtgs/${svc}.env" + rm -f "$envfile" + + unit="$(mktemp)" + cat > "$unit" < "$envfile" < "$unit" <<'EOF' +[Unit] +Description=DBIS RTGS mt103-hardcopy-sidecar +After=network-online.target +Wants=network-online.target + +[Service] +User=root +WorkingDirectory=/opt/dbis-rtgs/mt103 +EnvironmentFile=/etc/dbis-rtgs/mt103.env +ExecStart=/opt/dbis-rtgs/mt103/server +Restart=always +RestartSec=5 + +[Install] +WantedBy=multi-user.target +EOF + push_file "$MT103_VMID" "$unit" "/etc/systemd/system/dbis-rtgs-mt103.service" + rm -f "$unit" + run_remote "$MT103_VMID" "mkdir -p /opt/dbis-rtgs/mt103 /etc/dbis-rtgs /var/lib/dbis-rtgs/mt103/storage && systemctl daemon-reload && systemctl enable dbis-rtgs-mt103 && systemctl restart dbis-rtgs-mt103" +} + +deploy_java_sidecar "$SECT_VMID" "securities" "$SECT_JAR" "$(cat <&2 + exit 1 +fi + +REG_JSON="$(curl -sS -H "Authorization: token ${GITEA_TOKEN}" \ + "${GITEA_URL}/api/v1/admin/runners/registration-token")" +REG_TOKEN="$(printf '%s' "$REG_JSON" | sed -n 's/.*"token"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/p')" +if [[ -z "$REG_TOKEN" || "$REG_TOKEN" == "null" ]]; then + echo "ERROR: Could not get admin registration token. Response:" >&2 + printf '%s\n' "$REG_JSON" >&2 + echo "Ensure GITEA_TOKEN is an admin token with access to GET /api/v1/admin/runners/registration-token" >&2 + exit 1 +fi + +PROXMOX_HOST="$(get_host_for_vmid "$VMID")" +echo "Using Proxmox host ${PROXMOX_HOST} for VMID ${VMID}." + +if [[ "${RUNNER_FORCE_REREGISTER:-0}" == "1" ]]; then + ssh -o BatchMode=yes -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" \ + "pct exec ${VMID} -- bash -lc 'rm -f /opt/act_runner/.runner; systemctl stop act-runner 2>/dev/null || true'" +fi + +# Pass registration token into the container without embedding raw secret in ssh argv (still reversible from b64). +TB64="$(printf '%s' "$REG_TOKEN" | base64 | tr -d '\n')" +ssh -o BatchMode=yes -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" \ + "pct exec ${VMID} -- bash -c 'export GITEA_RUNNER_REGISTRATION_TOKEN=\$(printf %s \"${TB64}\" | base64 -d); export RUNNER_LABELS=\"${RUNNER_LABELS}\"; bash -s'" \ + < "${SCRIPT_DIR}/setup-act-runner.sh" + +ssh -o BatchMode=yes -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" \ + "pct exec ${VMID} -- bash -s" < "${SCRIPT_DIR}/install-act-runner-systemd.sh" + +echo "Done. Check Gitea Admin → Actions → Runners for an online runner with labels including: ${RUNNER_LABELS}" diff --git a/scripts/dev-vm/install-act-runner-systemd.sh b/scripts/dev-vm/install-act-runner-systemd.sh new file mode 100755 index 0000000..e3b6815 --- /dev/null +++ b/scripts/dev-vm/install-act-runner-systemd.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Install systemd unit for Gitea act_runner on the Gitea host (e.g. dev-vm 5700). +# Run inside the container, or: ssh root@ "pct exec 5700 -- bash -s" < scripts/dev-vm/install-act-runner-systemd.sh +# +# Optional env: +# WORK_DIR default /opt/act_runner +# GITEA_ACTION_URL default http://127.0.0.1:3000 (same host as Gitea) + +set -euo pipefail + +WORK_DIR="${WORK_DIR:-/opt/act_runner}" +GITEA_ACTION_URL="${GITEA_ACTION_URL:-http://127.0.0.1:3000}" + +if [ ! -x "${WORK_DIR}/act_runner" ]; then + echo "Missing ${WORK_DIR}/act_runner — run setup-act-runner.sh with GITEA_RUNNER_REGISTRATION_TOKEN first." + exit 1 +fi + +if [ ! -f "${WORK_DIR}/.runner" ]; then + echo "Missing ${WORK_DIR}/.runner — register first: GITEA_RUNNER_REGISTRATION_TOKEN=... bash setup-act-runner.sh" + exit 1 +fi + +cat > /etc/systemd/system/act-runner.service << EOF +[Unit] +Description=Gitea act_runner +After=network.target + +[Service] +Type=simple +User=root +WorkingDirectory=${WORK_DIR} +ExecStart=${WORK_DIR}/act_runner daemon +Restart=on-failure +RestartSec=10 +Environment=GITEA_ACTION_URL=${GITEA_ACTION_URL} + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable act-runner +systemctl restart act-runner +systemctl --no-pager status act-runner diff --git a/scripts/dev-vm/setup-act-runner.sh b/scripts/dev-vm/setup-act-runner.sh index b68ee31..34a0b20 100644 --- a/scripts/dev-vm/setup-act-runner.sh +++ b/scripts/dev-vm/setup-act-runner.sh @@ -6,9 +6,12 @@ set -euo pipefail ACT_RUNNER_VERSION="${ACT_RUNNER_VERSION:-0.2.13}" -INSTANCE="${INSTANCE:-http://192.168.11.59:3000}" +# Gitea root URL as seen from this host (same LXC as Gitea → 127.0.0.1) +INSTANCE="${INSTANCE:-http://127.0.0.1:3000}" WORK_DIR="${WORK_DIR:-/opt/act_runner}" TOKEN="${GITEA_RUNNER_REGISTRATION_TOKEN:-}" +# Workflows commonly use runs-on: ubuntu-latest; labels must match. +RUNNER_LABELS="${RUNNER_LABELS:-ubuntu-latest}" if [ -z "$TOKEN" ]; then echo "Set GITEA_RUNNER_REGISTRATION_TOKEN" @@ -29,6 +32,6 @@ fi chmod +x ./act_runner if [ ! -f .runner ]; then - ./act_runner register --no-interactive --instance "$INSTANCE" --token "$TOKEN" + ./act_runner register --no-interactive --instance "$INSTANCE" --token "$TOKEN" --labels "$RUNNER_LABELS" fi echo "Ready. Run: ./act_runner daemon" diff --git a/scripts/docs/generate-dbis-node-role-matrix-md.sh b/scripts/docs/generate-dbis-node-role-matrix-md.sh new file mode 100755 index 0000000..17d8a7e --- /dev/null +++ b/scripts/docs/generate-dbis-node-role-matrix-md.sh @@ -0,0 +1,149 @@ +#!/usr/bin/env bash +# Regenerate docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md body tables from +# config/proxmox-operational-template.json (run from repo root). +set -euo pipefail +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +JSON="$ROOT/config/proxmox-operational-template.json" +OUT="$ROOT/docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md" + +if ! command -v jq &>/dev/null; then + echo "jq required" >&2 + exit 1 +fi + +TMP="$(mktemp)" +trap 'rm -f "$TMP"' EXIT + +jq -r ' +def vstatus: + if .category == "besu_validator" then "QBFT signer" + elif .category == "besu_sentry" then "Sentry (no signer)" + elif (.category | test("^rpc")) then "RPC only" + else "N/A" + end; +def ntype: + if .category == "besu_validator" then "Besu validator" + elif .category == "besu_sentry" then "Besu sentry" + elif .category == "rpc_core" or .category == "rpc_public" or .category == "rpc_private" or .category == "rpc_named" or .category == "rpc_thirdweb" or .category == "rpc_alltra_hybx" then "Besu RPC (\(.category))" + elif .category == "dlt" and (.hostname | test("fabric")) then "Fabric" + elif .category == "dlt" and (.hostname | test("indy")) then "Indy" + elif .category == "firefly" then "FireFly" + elif .category == "explorer" then "Blockscout" + elif .category == "npmplus" then "NPMplus ingress" + elif .category == "infra" then "Infra LXC" + elif .category == "monitoring" and (.hostname | test("cacti")) then "Cacti" + elif .category == "monitoring" then "Monitoring" + elif .category == "oracle" then "Oracle publisher" + elif .category == "ccip" then "CCIP monitor" + elif .category == "tunnel" then "Cloudflare tunnel" + elif .category == "ml" then "ML node" + elif .category == "vault" then "HashiCorp Vault" + elif .category == "order" then "The Order service" + elif .category == "sankofa_phoenix" then "Sankofa / Phoenix" + elif .category == "mim4u" then "MIM4U" + elif .category == "dbis" then "DBIS stack" + elif .category == "mifos" then "Mifos" + elif .category == "dapp" then "DApp" + elif .category == "dev" then "Dev" + elif .category == "ai_infra" then "AI infra" + elif .category == "defi" then "DeFi" + elif .category == "general" then "General CT" + elif .category == "legacy_proxy" then "Legacy NPM" + else .category + end; +def stier: + if .category == "besu_validator" or .category == "besu_sentry" then "validator-tier" + elif (.category | test("^rpc")) then "DMZ / RPC exposure" + elif .category == "npmplus" or .category == "tunnel" then "edge ingress" + elif .category == "dlt" or .category == "firefly" then "identity / workflow DLT" + elif .category == "vault" or .category == "infra" then "management / secrets" + elif .category == "order" or .category == "sankofa_phoenix" or .category == "dbis" then "application" + else "standard internal" + end; +([.services[] | select(.ipv4 != null) | .ipv4] | group_by(.) | map(select(length > 1) | .[0])) as $dup_ips +| .services[] +| (.ipv4) as $ip +| [(.vmid // "—"), .hostname, ($ip // "—"), (if ($ip != null and ($dup_ips | index($ip))) then "shared / non-concurrent mapping — verify live owner" else "unique in template" end), ntype, (.runtime_state // "unspecified"), "TBD", "TBD", (.preferred_node // "—"), vstatus, stier] +| @tsv +' "$JSON" | sort -t$'\t' -k1,1n > "$TMP" + +UPDATED="$(date -u +%Y-%m-%d)" +{ + cat < "$OUT" + +echo "Wrote $OUT" diff --git a/scripts/lib/load-project-env.sh b/scripts/lib/load-project-env.sh index 7a80c53..e92ca4b 100644 --- a/scripts/lib/load-project-env.sh +++ b/scripts/lib/load-project-env.sh @@ -75,7 +75,7 @@ get_host_for_vmid() { case "$vmid" in 10130|10150|10151|106|107|108|10000|10001|10020|10100|10101|10120|10233|10235) echo "${PROXMOX_HOST_R630_01}";; 2101) echo "${PROXMOX_HOST_R630_01}";; - 5000|7810|2201|2303|2401|6200|6201|10234|10237|5800|5801) echo "${PROXMOX_HOST_R630_02}";; + 5000|5700|7810|2201|2303|2401|6200|6201|10234|10237|5800|5801) echo "${PROXMOX_HOST_R630_02}";; 2301|2400|1504|2503|2504|2505) echo "${PROXMOX_HOST_ML110}";; 5400|5401|5402|5403|5410|5411|5412|5413|5414|5415|5416|5417|5418|5419|5420|5421|5422|5423|5424|5425|5440|5441|5442|5443|5444|5445|5446|5447|5448|5449|5450|5451|5452|5453|5454|5455|5470|5471|5472|5473|5474|5475|5476) echo "${PROXMOX_HOST_R630_02}";; *) echo "${PROXMOX_HOST_R630_01:-${PROXMOX_R630_02}}";; diff --git a/scripts/storage-monitor.sh b/scripts/storage-monitor.sh index 3368761..14d3677 100755 --- a/scripts/storage-monitor.sh +++ b/scripts/storage-monitor.sh @@ -48,8 +48,8 @@ NODES[r630-02]="${PROXMOX_HOST_R630_02:-192.168.11.12}:password" NODES[r630-03]="${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}}}}:L@kers2010" NODES[r630-04]="${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}}:L@kers2010" -# Alert tracking -declare -a ALERTS +# Alert tracking (must stay in main shell — no pipe-|while subshell) +ALERTS=() # SSH helper function ssh_node() { @@ -166,22 +166,22 @@ monitor_node() { return 1 fi - # Process each storage line (skip header) - echo "$storage_status" | tail -n +2 | while IFS= read -r line; do + # Process each storage line (skip header) — process substitution keeps ALERTS in this shell + while IFS= read -r line; do if [ -n "$line" ]; then check_storage_usage "$hostname" "$line" fi - done + done < <(echo "$storage_status" | tail -n +2) # Check volume groups local vgs_info=$(ssh_node "$hostname" 'vgs --units g --noheadings -o vg_name,vg_size,vg_free 2>/dev/null' || echo "") if [ -n "$vgs_info" ]; then - echo "$vgs_info" | while IFS= read -r line; do + while IFS= read -r line; do if [ -n "$line" ]; then check_vg_free_space "$hostname" "$line" fi - done + done < <(echo "$vgs_info") fi # Log storage status @@ -199,7 +199,7 @@ monitor_node() { # Send alerts (can be extended to email, Slack, etc.) send_alerts() { - if [ ${#ALERTS[@]} -eq 0 ]; then + if [[ ${#ALERTS[@]} -eq 0 ]]; then log_success "No storage alerts" return 0 fi @@ -244,7 +244,8 @@ generate_summary() { echo "=== Proxmox Storage Summary $(date) ===" echo "" echo "Nodes Monitored:" - for hostname in "${!NODES[@]}"; do + for hostname in ml110 r630-01 r630-02 r630-03 r630-04; do + [[ -n "${NODES[$hostname]:-}" ]] || continue if check_node "$hostname"; then echo " ✅ $hostname" else @@ -280,9 +281,10 @@ main() { echo "Date: $(date)" echo "" - # Monitor all nodes - for hostname in "${!NODES[@]}"; do - monitor_node "$hostname" + # Monitor all nodes (fixed order for readable logs; optional nodes may be unreachable) + for hostname in ml110 r630-01 r630-02 r630-03 r630-04; do + [[ -n "${NODES[$hostname]:-}" ]] || continue + monitor_node "$hostname" || true done # Send alerts @@ -297,7 +299,8 @@ main() { status) # Show current status echo "=== Current Storage Status ===" - for hostname in "${!NODES[@]}"; do + for hostname in ml110 r630-01 r630-02 r630-03 r630-04; do + [[ -n "${NODES[$hostname]:-}" ]] || continue if check_node "$hostname"; then echo "" echo "--- $hostname ---" diff --git a/scripts/upgrade-besu-all-nodes.sh b/scripts/upgrade-besu-all-nodes.sh index 4eba9ba..767fd30 100755 --- a/scripts/upgrade-besu-all-nodes.sh +++ b/scripts/upgrade-besu-all-nodes.sh @@ -1,52 +1,31 @@ #!/usr/bin/env bash -# Upgrade all Besu nodes to the latest (or specified) version. -# Requires: SSH to Proxmox host, curl/wget, enough disk in containers. +# Upgrade all running Besu containers to the requested version. +# Installs Java 21 where needed, preserves the previous /opt/besu-* directory for rollback, +# and restarts the detected Besu systemd unit in each container. +# # Usage: -# ./scripts/upgrade-besu-all-nodes.sh # upgrade to latest (25.12.0) -# ./scripts/upgrade-besu-all-nodes.sh --dry-run # show what would be done -# BESU_VERSION=25.11.0 ./scripts/upgrade-besu-all-nodes.sh -# Optional: pre-download to avoid long run (script uses $LOCAL_CACHE/besu-${BESU_VERSION}.tar.gz): -# curl -sSL -o /tmp/besu-25.12.0.tar.gz https://github.com/hyperledger/besu/releases/download/25.12.0/besu-25.12.0.tar.gz +# bash scripts/upgrade-besu-all-nodes.sh +# bash scripts/upgrade-besu-all-nodes.sh --dry-run +# BESU_VERSION=25.12.0 bash scripts/upgrade-besu-all-nodes.sh set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true -if [ -f "$PROJECT_ROOT/config/ip-addresses.conf" ]; then - # shellcheck source=../config/ip-addresses.conf - source "$PROJECT_ROOT/config/ip-addresses.conf" -fi - -PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_ML110:-192.168.11.10}}" -# Latest stable as of 2025-12; EIP-7702 requires >= 24.1.0 BESU_VERSION="${BESU_VERSION:-25.12.0}" BESU_TAR="besu-${BESU_VERSION}.tar.gz" -BESU_DIR="besu-${BESU_VERSION}" +BESU_DIR="/opt/besu-${BESU_VERSION}" DOWNLOAD_URL="${BESU_DOWNLOAD_URL:-https://github.com/hyperledger/besu/releases/download/${BESU_VERSION}/${BESU_TAR}}" +JAVA21_FALLBACK_URL="${JAVA21_FALLBACK_URL:-https://api.adoptium.net/v3/binary/latest/21/ga/linux/x64/jre/hotspot/normal/eclipse}" +RPC_HTTP_MAX_ACTIVE_CONNECTIONS="${RPC_HTTP_MAX_ACTIVE_CONNECTIONS:-256}" +RPC_WS_MAX_ACTIVE_CONNECTIONS="${RPC_WS_MAX_ACTIVE_CONNECTIONS:-256}" LOCAL_CACHE="${LOCAL_CACHE:-/tmp}" - DRY_RUN=false -for arg in "$@"; do - [ "$arg" = "--dry-run" ] && DRY_RUN=true -done +[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true -# Same node list and services as restart-all-besu-services.sh -declare -A NODE_SERVICES=( - ["1000"]="besu-validator" - ["1001"]="besu-validator" - ["1002"]="besu-validator" - ["1003"]="besu-validator" - ["1004"]="besu-validator" - ["1500"]="besu-sentry" - ["1501"]="besu-sentry" - ["1502"]="besu-sentry" - ["1503"]="besu-sentry" - ["2101"]="besu-rpc" - ["2400"]="besu-rpc" - ["2401"]="besu-rpc" - ["2402"]="besu-rpc" -) +SSH_OPTS=(-o ConnectTimeout=20 -o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o StrictHostKeyChecking=accept-new) RED='\033[0;31m' GREEN='\033[0;32m' @@ -54,120 +33,245 @@ YELLOW='\033[1;33m' BLUE='\033[0;34m' NC='\033[0m' -log_info() { echo -e "${BLUE}[INFO]${NC} $1"; } -log_ok() { echo -e "${GREEN}[OK]${NC} $1"; } -log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } -log_err() { echo -e "${RED}[ERROR]${NC} $1"; } +log_info() { echo -e "${BLUE}[INFO]${NC} $1"; } +log_ok() { echo -e "${GREEN}[OK]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_err() { echo -e "${RED}[ERROR]${NC} $1"; } -is_running() { - local vmid=$1 - ssh -o ConnectTimeout=3 -o StrictHostKeyChecking=accept-new root@"$PROXMOX_HOST" \ - "pct status $vmid 2>/dev/null" | grep -q running +declare -A HOST_BY_VMID +for v in 1000 1001 1002 1500 1501 1502 2101; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"; done +for v in 2201 2303 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"; done +for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2307 2308 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done + +BESU_VMIDS=( + 1000 1001 1002 1003 1004 + 1500 1501 1502 1503 1504 1505 1506 1507 1508 + 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 + 2400 2401 2402 2403 +) + +host_ssh() { + local host="$1" + shift + ssh "${SSH_OPTS[@]}" "root@${host}" "$@" } -# Ensure tarball exists (download to host or use cache) ensure_tarball() { - local path="$LOCAL_CACHE/$BESU_TAR" - if [ -f "$path" ]; then - log_ok "Using existing $path" - echo "$path" - return - fi - log_info "Downloading $DOWNLOAD_URL ..." - if $DRY_RUN; then - echo "" - return - fi - (cd "$LOCAL_CACHE" && curl -sSfL -o "$BESU_TAR" "$DOWNLOAD_URL") || { - log_err "Download failed" - return 1 - } - log_ok "Downloaded $path" - echo "$path" + local path="${LOCAL_CACHE}/${BESU_TAR}" + mkdir -p "$LOCAL_CACHE" + if [[ -f "$path" ]]; then + log_ok "Using existing $path" >&2 + printf '%s\n' "$path" + return 0 + fi + if $DRY_RUN; then + printf '%s\n' "$path" + return 0 + fi + log_info "Downloading ${DOWNLOAD_URL}" >&2 + curl -fsSL -o "$path" "$DOWNLOAD_URL" + log_ok "Downloaded $path" >&2 + printf '%s\n' "$path" +} + +detect_service() { + local host="$1" + local vmid="$2" + host_ssh "$host" "pct exec ${vmid} -- bash -lc 'systemctl list-units --type=service --no-legend 2>/dev/null | awk \"{print \\\$1}\" | grep -iE \"^besu-(validator|sentry|rpc|rpc-core)\\.service$|^besu\\.service$\" | head -1'" 2>/dev/null || true +} + +is_running() { + local host="$1" + local vmid="$2" + host_ssh "$host" "pct status ${vmid} 2>/dev/null | awk '{print \$2}'" 2>/dev/null | grep -q '^running$' +} + +prepare_host_tarball() { + local host="$1" + local local_path="$2" + local host_tmp="/tmp/${BESU_TAR}" + if $DRY_RUN; then + log_info " [dry-run] would copy ${BESU_TAR} to ${host}:${host_tmp}" + return 0 + fi + scp "${SSH_OPTS[@]}" "$local_path" "root@${host}:${host_tmp}" >/dev/null } upgrade_node() { - local vmid=$1 - local service="${NODE_SERVICES[$vmid]:-besu-rpc}" - local tarball_path="$2" + local host="$1" + local vmid="$2" + local service="$3" - if ! is_running "$vmid"; then - log_warn "VMID $vmid not running — skip" - return 0 + if ! is_running "$host" "$vmid"; then + log_warn "VMID ${vmid} @ ${host}: not running, skipping" + return 0 + fi + + if [[ -z "$service" ]]; then + log_warn "VMID ${vmid} @ ${host}: no Besu service detected, skipping" + return 0 + fi + + log_info "VMID ${vmid} @ ${host}: upgrading ${service} to Besu ${BESU_VERSION}" + + if $DRY_RUN; then + log_info " [dry-run] would install Java 21, extract ${BESU_TAR}, switch /opt/besu, restart ${service}" + return 0 + fi + + host_ssh "$host" "pct push ${vmid} /tmp/${BESU_TAR} /tmp/${BESU_TAR}" >/dev/null + + host_ssh "$host" "pct exec ${vmid} -- bash -lc ' + set -euo pipefail + if [[ ! -e /opt/besu ]]; then + fallback=\$(find /opt -maxdepth 1 -type d -name \"besu-*\" | sort -V | tail -1) + if [[ -n \"\${fallback:-}\" ]]; then + ln -sfn \"\$fallback\" /opt/besu + chown -h besu:besu /opt/besu 2>/dev/null || true + fi + elif [[ ! -L /opt/besu ]]; then + current_semver=\$(/opt/besu/bin/besu --version 2>/dev/null | grep -Eo \"[0-9]+\\.[0-9]+\\.[0-9]+\" | head -1) + current_version=\"besu-\${current_semver:-}\" + [[ -z \"\${current_version:-}\" ]] && current_version=besu-backup-pre-${BESU_VERSION} + if [[ ! -d \"/opt/\${current_version}\" ]]; then + mv /opt/besu \"/opt/\${current_version}\" + else + rm -rf /opt/besu + fi + ln -sfn \"/opt/\${current_version}\" /opt/besu + chown -h besu:besu /opt/besu 2>/dev/null || true fi - - log_info "VMID $vmid: upgrade to Besu $BESU_VERSION ($service) ..." - - if $DRY_RUN; then - log_info " [dry-run] would push $BESU_TAR and extract, switch /opt/besu, restart $service" - return 0 + java_major=\$(java -version 2>&1 | sed -n \"1s/.*version \\\"\\([0-9][0-9]*\\).*/\\1/p\") + if [[ -z \"\${java_major:-}\" || \"\$java_major\" -lt 21 ]]; then + export DEBIAN_FRONTEND=noninteractive + apt-get update -qq + apt-get install -y -qq openjdk-21-jre-headless || true + java_major=\$(java -version 2>&1 | sed -n \"1s/.*version \\\"\\([0-9][0-9]*\\).*/\\1/p\") + if [[ -z \"\${java_major:-}\" || \"\$java_major\" -lt 21 ]]; then + command -v curl >/dev/null 2>&1 || apt-get install -y -qq curl ca-certificates + tmp_jre=/tmp/java21-jre.tar.gz + curl -fsSL -o \"\$tmp_jre\" '${JAVA21_FALLBACK_URL}' + tar -tzf \"\$tmp_jre\" > /tmp/java21-jre.list + extracted_dir=\$(head -1 /tmp/java21-jre.list | cut -d/ -f1) + rm -f /tmp/java21-jre.list + tar -xzf \"\$tmp_jre\" -C /opt + rm -f \"\$tmp_jre\" + ln -sfn \"/opt/\${extracted_dir}\" /opt/java-21 + update-alternatives --install /usr/bin/java java /opt/java-21/bin/java 2100 + fi fi - - # Copy tarball into container - if ! ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new root@"$PROXMOX_HOST" \ - "pct push $vmid $tarball_path /tmp/$BESU_TAR" 2>/dev/null; then - log_err " Failed to push tarball to $vmid" - return 1 + config_file=\$(systemctl cat ${service} | sed -n \"s/.*--config-file=\\\\([^ ]*\\\\).*/\\\\1/p\" | tail -1) + if [[ -n \"\${config_file:-}\" && -f \"\$config_file\" ]]; then + find /etc/besu -maxdepth 1 -type f -name \"*.toml\" -print0 2>/dev/null | while IFS= read -r -d \"\" toml; do + sed -i \ + -e \"/^[[:space:]]*miner-enabled[[:space:]]*=.*/d\" \ + -e \"/^[[:space:]]*privacy-enabled[[:space:]]*=.*/d\" \ + \"\$toml\" + if grep -q \"^rpc-http-enabled=true\" \"\$toml\" && ! grep -q \"^rpc-http-max-active-connections=\" \"\$toml\"; then + tmp=\$(mktemp) + awk \"1; /^rpc-http-port=/{print \\\"rpc-http-max-active-connections=${RPC_HTTP_MAX_ACTIVE_CONNECTIONS}\\\"}\" \"\$toml\" > \"\$tmp\" + cat \"\$tmp\" > \"\$toml\" + rm -f \"\$tmp\" + fi + if grep -q \"^rpc-ws-enabled=true\" \"\$toml\" && ! grep -q \"^rpc-ws-max-active-connections=\" \"\$toml\"; then + tmp=\$(mktemp) + awk \"1; /^rpc-ws-port=/{print \\\"rpc-ws-max-active-connections=${RPC_WS_MAX_ACTIVE_CONNECTIONS}\\\"}\" \"\$toml\" > \"\$tmp\" + cat \"\$tmp\" > \"\$toml\" + rm -f \"\$tmp\" + fi + done + if ! grep -q \"^data-storage-format=\" \"\$config_file\"; then + tmp=\$(mktemp) + awk \"1; /^sync-mode=/{print \\\"data-storage-format=\\\\\\\"FOREST\\\\\\\"\\\"}\" \"\$config_file\" > \"\$tmp\" + cat \"\$tmp\" > \"\$config_file\" + rm -f \"\$tmp\" + fi fi - - # Extract, switch symlink, fix ownership, restart (each step via pct exec to avoid quoting issues) - ssh -o ConnectTimeout=60 -o StrictHostKeyChecking=accept-new root@"$PROXMOX_HOST" \ - "pct exec $vmid -- bash -c 'cd /opt && tar -xzf /tmp/$BESU_TAR && rm -f /tmp/$BESU_TAR'" || { - log_err " VMID $vmid: extract failed" - return 1 - } - ssh -o ConnectTimeout=10 root@"$PROXMOX_HOST" \ - "pct exec $vmid -- bash -c 'cd /opt && rm -f besu && ln -sf $BESU_DIR besu && chown -R besu:besu $BESU_DIR besu 2>/dev/null || true'" || true - ssh -o ConnectTimeout=15 root@"$PROXMOX_HOST" \ - "pct exec $vmid -- systemctl restart ${service}.service" || { - log_err " VMID $vmid: restart failed" - return 1 - } - sleep 3 - local active - active=$(ssh -o ConnectTimeout=5 root@"$PROXMOX_HOST" "pct exec $vmid -- systemctl is-active ${service}.service 2>/dev/null" || echo "unknown") - if [ "$active" = "active" ]; then - log_ok " VMID $vmid upgraded and $service active" - return 0 + cd /opt + if [[ ! -d ${BESU_DIR} ]]; then + tar -xzf /tmp/${BESU_TAR} -C /opt fi - log_err " VMID $vmid: service status after restart: $active" - return 1 + rm -f /tmp/${BESU_TAR} + ln -sfn ${BESU_DIR} /opt/besu + chown -h besu:besu /opt/besu 2>/dev/null || true + chown -R besu:besu ${BESU_DIR} /opt/besu-* 2>/dev/null || true + systemctl restart ${service} + '" || return 1 + + local active version + active="" + for _ in $(seq 1 24); do + active="$(host_ssh "$host" "pct exec ${vmid} -- systemctl is-active ${service}" 2>/dev/null || true)" + [[ "$active" == "active" ]] && break + sleep 5 + done + version="$(host_ssh "$host" "pct exec ${vmid} -- bash -lc '/opt/besu/bin/besu --version 2>/dev/null | grep -m1 \"besu/\" || true'" 2>/dev/null || true)" + if [[ "$active" == "active" ]]; then + log_ok " VMID ${vmid}: ${service} active (${version:-version unavailable})" + return 0 + fi + + log_err " VMID ${vmid}: ${service} state=${active:-unknown}" + host_ssh "$host" "pct exec ${vmid} -- journalctl -u ${service} -n 30 --no-pager" 2>/dev/null || true + return 1 } -# --- main --- -log_info "Upgrade Besu on all nodes to $BESU_VERSION (host: $PROXMOX_HOST)" -[ "$DRY_RUN" = true ] && log_warn "DRY RUN — no changes will be made" -echo "" +log_info "Upgrade Besu fleet to ${BESU_VERSION}" +$DRY_RUN && log_warn "DRY RUN: no changes will be made" +echo -# Check SSH -if ! ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=accept-new root@"$PROXMOX_HOST" "echo OK" &>/dev/null; then - log_err "Cannot SSH to $PROXMOX_HOST" - exit 1 -fi +TARBALL_PATH="$(ensure_tarball)" -tarball_path="" -if ! $DRY_RUN; then - tarball_path=$(ensure_tarball) || exit 1 - [ -z "$tarball_path" ] && exit 1 -fi +declare -A VMIDS_ON_HOST +for vmid in "${BESU_VMIDS[@]}"; do + host="${HOST_BY_VMID[$vmid]:-}" + [[ -n "$host" ]] || continue + VMIDS_ON_HOST[$host]+=" ${vmid}" +done PASS=0 FAIL=0 -VMIDS_SORTED=$(echo "${!NODE_SERVICES[@]}" | tr ' ' '\n' | sort -n) -for vmid in $VMIDS_SORTED; do - if upgrade_node "$vmid" "$tarball_path"; then - ((PASS++)) || true - else - ((FAIL++)) || true +SKIP=0 + +for host in "${!VMIDS_ON_HOST[@]}"; do + log_info "Host ${host}" + if ! host_ssh "$host" "echo OK" >/dev/null 2>&1; then + log_err " Cannot SSH to ${host}" + ((FAIL++)) || true + continue + fi + + prepare_host_tarball "$host" "$TARBALL_PATH" + + for vmid in ${VMIDS_ON_HOST[$host]}; do + service="$(detect_service "$host" "$vmid")" + if ! is_running "$host" "$vmid"; then + log_warn "VMID ${vmid} @ ${host}: not running, skipping" + ((SKIP++)) || true + continue fi - echo "" + if [[ -z "$service" ]]; then + log_warn "VMID ${vmid} @ ${host}: no Besu unit found, skipping" + ((SKIP++)) || true + continue + fi + if upgrade_node "$host" "$vmid" "$service"; then + ((PASS++)) || true + else + ((FAIL++)) || true + fi + echo + done + + if ! $DRY_RUN; then + host_ssh "$host" "rm -f /tmp/${BESU_TAR}" >/dev/null 2>&1 || true + fi done -echo "────────────────────────────────────────────────────────────" -log_info "Upgrade summary: $PASS succeeded, $FAIL failed" -echo "────────────────────────────────────────────────────────────" +echo "------------------------------------------------------------" +log_info "Upgrade summary: passed=${PASS} skipped=${SKIP} failed=${FAIL}" +echo "------------------------------------------------------------" -if [ "$FAIL" -gt 0 ]; then - exit 1 +if [[ "$FAIL" -gt 0 ]]; then + exit 1 fi -exit 0 diff --git a/scripts/verify/check-chain138-rpc-health.sh b/scripts/verify/check-chain138-rpc-health.sh index 846cb41..1f00b8b 100755 --- a/scripts/verify/check-chain138-rpc-health.sh +++ b/scripts/verify/check-chain138-rpc-health.sh @@ -122,28 +122,12 @@ check_supported_method() { return 1 } -check_expected_missing_method() { - local method="$1" - local params="${2:-[]}" - local response code message - response="$(rpc_request "$method" "$params" || printf '%s' '{"error":"curl"}')" - code="$(printf '%s' "$response" | jq -r '.error.code // empty' 2>/dev/null || true)" - message="$(printf '%s' "$response" | jq -r '.error.message // empty' 2>/dev/null || true)" - if [[ "$code" == "-32601" || "$message" == "Method not found" ]]; then - printf ' %-32s %s\n' "$method" "EXPECTED_MISSING" - return 0 - fi - printf ' %-32s %s\n' "$method" "UNEXPECTED" - ((fail++)) || true - return 1 -} - check_supported_method "eth_chainId" check_supported_method "eth_gasPrice" +check_supported_method "eth_maxPriorityFeePerGas" check_supported_method "eth_feeHistory" "[\"0x1\", \"latest\", []]" check_supported_method "trace_block" "[\"0x1\"]" check_supported_method "trace_replayBlockTransactions" "[\"0x1\", [\"trace\"]]" -check_expected_missing_method "eth_maxPriorityFeePerGas" if [[ "$fail" -eq 0 ]]; then echo "OK: node health and public RPC capability checks passed" diff --git a/scripts/verify/check-dbis-rtgs-control-plane.sh b/scripts/verify/check-dbis-rtgs-control-plane.sh new file mode 100644 index 0000000..8f7dac2 --- /dev/null +++ b/scripts/verify/check-dbis-rtgs-control-plane.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Verify the DBIS RTGS control-plane services once deployed. + +HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}" +SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new" + +check_ct() { + local vmid="$1" + local hostname="$2" + local service="$3" + + echo "=== CT $vmid ($hostname) ===" + ssh $SSH_OPTS "root@$HOST" "pct status $vmid" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'systemctl is-active redis-server'" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'systemctl is-active $service'" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'curl -sf http://127.0.0.1:8080/actuator/health'" + echo +} + +echo "=== DBIS RTGS control-plane runtime check ===" +echo "Host: $HOST" +echo + +check_ct "${RTGS_ORCH_VMID:-5805}" "${RTGS_ORCH_HOSTNAME:-rtgs-orchestrator-1}" dbis-rtgs-orchestrator +check_ct "${RTGS_FX_VMID:-5806}" "${RTGS_FX_HOSTNAME:-rtgs-fx-1}" dbis-rtgs-fx-engine +check_ct "${RTGS_LIQ_VMID:-5807}" "${RTGS_LIQ_HOSTNAME:-rtgs-liquidity-1}" dbis-rtgs-liquidity-engine + +echo "=== OMNL reachability from control plane ===" +for vmid in "${RTGS_ORCH_VMID:-5805}" "${RTGS_FX_VMID:-5806}" "${RTGS_LIQ_VMID:-5807}"; do + printf 'CT %s -> ' "$vmid" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'curl -s -o /tmp/fineract.out -w \"%{http_code}\" http://192.168.11.85:8080/fineract-provider/api/v1/offices'" + echo +done diff --git a/scripts/verify/check-dbis-rtgs-first-slice.sh b/scripts/verify/check-dbis-rtgs-first-slice.sh new file mode 100755 index 0000000..6e3f7ce --- /dev/null +++ b/scripts/verify/check-dbis-rtgs-first-slice.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Verify the deployed DBIS RTGS first-slice sidecars on Proxmox VE. + +HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}" +SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new" + +check_ct() { + local vmid="$1" + local hostname="$2" + local service="$3" + + echo "=== CT $vmid ($hostname) ===" + ssh $SSH_OPTS "root@$HOST" "pct status $vmid" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'systemctl is-active redis-server'" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'systemctl is-active $service'" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'curl -sf http://127.0.0.1:8080/actuator/health'" + echo +} + +echo "=== DBIS RTGS first-slice runtime check ===" +echo "Host: $HOST" +echo + +check_ct 5802 rtgs-scsm-1 dbis-rtgs-scsm +check_ct 5803 rtgs-funds-1 dbis-rtgs-funds +check_ct 5804 rtgs-xau-1 dbis-rtgs-xau + +echo "=== Fineract reachability from sidecars ===" +for vmid in 5802 5803 5804; do + printf 'CT %s -> ' "$vmid" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'curl -s -o /tmp/fineract.out -w \"%{http_code}\" http://192.168.11.85:8080/fineract-provider/api/v1/offices'" + echo +done + +echo +echo "Interpretation:" +echo "- 200 means unauthenticated route unexpectedly open or credentials baked into proxy" +echo "- 400/401 means HTTP reachability exists, but authenticated tenant flow is not yet frozen" diff --git a/scripts/verify/check-dbis-rtgs-later-phase-sidecars.sh b/scripts/verify/check-dbis-rtgs-later-phase-sidecars.sh new file mode 100644 index 0000000..d82968c --- /dev/null +++ b/scripts/verify/check-dbis-rtgs-later-phase-sidecars.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Verify the later-phase DBIS RTGS sidecars once deployed. + +HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}" +SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new" + +check_java_ct() { + local vmid="$1" + local hostname="$2" + local service="$3" + echo "=== CT $vmid ($hostname) ===" + ssh $SSH_OPTS "root@$HOST" "pct status $vmid" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'systemctl is-active redis-server'" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'systemctl is-active $service'" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'curl -sf http://127.0.0.1:8080/actuator/health'" + echo +} + +check_go_ct() { + local vmid="$1" + local hostname="$2" + local service="$3" + echo "=== CT $vmid ($hostname) ===" + ssh $SSH_OPTS "root@$HOST" "pct status $vmid" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'systemctl is-active $service'" + ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- bash -lc 'curl -sf http://127.0.0.1:8080/health'" + echo +} + +echo "=== DBIS RTGS later-phase sidecar runtime check ===" +echo "Host: $HOST" +echo + +check_java_ct "${RTGS_SECURITIES_VMID:-5808}" "${RTGS_SECURITIES_HOSTNAME:-rtgs-securities-1}" dbis-rtgs-securities +check_java_ct "${RTGS_CARDNET_VMID:-5809}" "${RTGS_CARDNET_HOSTNAME:-rtgs-cardnet-1}" dbis-rtgs-cardnet +check_go_ct "${RTGS_MT103_VMID:-5810}" "${RTGS_MT103_HOSTNAME:-rtgs-mt103-1}" dbis-rtgs-mt103 diff --git a/scripts/verify/print-caliper-chain138-stub.sh b/scripts/verify/print-caliper-chain138-stub.sh new file mode 100755 index 0000000..89a5324 --- /dev/null +++ b/scripts/verify/print-caliper-chain138-stub.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Print Caliper integration hints for Chain 138 (no network I/O). +# See docs/03-deployment/CALIPER_CHAIN138_PERF_HOOK.md + +set -euo pipefail +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +echo "Caliper is not bundled in this repo." +echo "Read: $ROOT/docs/03-deployment/CALIPER_CHAIN138_PERF_HOOK.md" +echo "" +echo "Suggested SUT URL for benchmarks (lab): \${RPC_URL_138:-http://192.168.11.211:8545}" +echo "Chain ID: 138 (verify with eth_chainId)." diff --git a/scripts/verify/print-gitea-actions-urls.sh b/scripts/verify/print-gitea-actions-urls.sh new file mode 100755 index 0000000..0315b1d --- /dev/null +++ b/scripts/verify/print-gitea-actions-urls.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Print Gitea Actions UI URLs (no token). Use after pushing complete-credential / cc-* repos. +# Gitea REST "actions runs" APIs vary by version; the web UI is the reliable check. +set -euo pipefail +GITEA_URL="${GITEA_URL:-https://gitea.d-bis.org}" +ORG="${GITEA_ORG:-DBIS}" +REPOS=( + complete-credential + cc-shared-authz + cc-audit-ledger + cc-eidas-connector +) +echo "Open in browser (Actions tab):" +for r in "${REPOS[@]}"; do + echo " ${GITEA_URL}/${ORG}/${r}/actions" +done diff --git a/scripts/verify/run-dbis-phase3-e2e-simulation.sh b/scripts/verify/run-dbis-phase3-e2e-simulation.sh new file mode 100755 index 0000000..15aa314 --- /dev/null +++ b/scripts/verify/run-dbis-phase3-e2e-simulation.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# DBIS Phase 3 — liveness / availability wrapper: Besu RPC liveness + optional FireFly HTTP + optional full RPC health. +# This does NOT execute Indy issuance, Aries verification, Fabric chaincode, or cross-chain business workflow steps. +# +# Usage: bash scripts/verify/run-dbis-phase3-e2e-simulation.sh +# Env: RPC_URL_138 (default http://192.168.11.211:8545) +# FIREFLY_URL (default http://192.168.11.35:5000) +# RUN_CHAIN138_RPC_HEALTH=1 to run check-chain138-rpc-health.sh (slower) + +set -uo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +# shellcheck source=/dev/null +source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true + +RPC_URL="${RPC_URL_138:-http://${IP_BESU_RPC_CORE_1:-192.168.11.211}:8545}" +FIREFLY_URL="${FIREFLY_URL:-http://192.168.11.35:5000}" + +fail=0 +echo "=== DBIS Phase 3 liveness wrapper (partial) ===" +echo "RPC: $RPC_URL" +echo "" + +if command -v curl &>/dev/null; then + echo "--- Besu eth_chainId / eth_blockNumber ---" + if ! out=$(curl -sS --connect-timeout 5 -X POST -H 'Content-Type: application/json' \ + -d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' "$RPC_URL"); then + echo "[FAIL] curl chainId" + fail=1 + else + echo "$out" + fi + if ! out=$(curl -sS --connect-timeout 5 -X POST -H 'Content-Type: application/json' \ + -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' "$RPC_URL"); then + echo "[FAIL] curl blockNumber" + fail=1 + else + echo "$out" + fi +else + echo "[SKIP] curl not installed" + fail=1 +fi + +echo "" +echo "--- FireFly HTTP (optional) ---" +if command -v curl &>/dev/null; then + code=$(curl -sS -o /dev/null -w '%{http_code}' --connect-timeout 4 "$FIREFLY_URL/api/v1/status" || true) + if [[ "$code" =~ ^(200|401|403)$ ]]; then + echo "[OK] $FIREFLY_URL/api/v1/status HTTP $code" + else + echo "[WARN] $FIREFLY_URL/api/v1/status HTTP ${code:-000} (FireFly may be down or path differs)" + fi +else + echo "[SKIP] curl not installed" +fi + +if [[ "${RUN_CHAIN138_RPC_HEALTH:-}" == "1" ]]; then + echo "" + echo "--- check-chain138-rpc-health.sh ---" + bash "$PROJECT_ROOT/scripts/verify/check-chain138-rpc-health.sh" || fail=1 +fi + +echo "" +echo "--- Manual follow-ups (Section 18) ---" +echo "This script proves only liveness / availability for the automated checks above." +echo "Indy 6400 / Fabric 6000 / CCIP relay on r630-01: see docs/03-deployment/DBIS_PHASE3_E2E_PRODUCTION_SIMULATION_RUNBOOK.md" +echo "Caliper: docs/03-deployment/CALIPER_CHAIN138_PERF_HOOK.md" +echo "" + +exit "$fail" diff --git a/scripts/verify/run-phase1-discovery.sh b/scripts/verify/run-phase1-discovery.sh new file mode 100755 index 0000000..b3bc150 --- /dev/null +++ b/scripts/verify/run-phase1-discovery.sh @@ -0,0 +1,218 @@ +#!/usr/bin/env bash +# Phase 1 — Reality mapping (read-only): compose Proxmox/Besu audits and optional +# Hyperledger CT probes into a timestamped report under reports/phase1-discovery/. +# +# Usage (repo root, LAN + SSH to Proxmox recommended): +# bash scripts/verify/run-phase1-discovery.sh +# HYPERLEDGER_PROBE=1 bash scripts/verify/run-phase1-discovery.sh # SSH pct exec smoke checks on r630-02 +# +# Env: PROXMOX_HOSTS, SSH_USER, SSH_OPTS (same as audit-proxmox-operational-template.sh) +# HYPERLEDGER_PROBE=1 to run optional Fabric/Indy/FireFly container checks (requires SSH to r630-02) + +set -uo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +# shellcheck source=/dev/null +source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true + +REPORT_DIR="${REPORT_DIR:-$PROJECT_ROOT/reports/phase1-discovery}" +STAMP="$(date -u +%Y%m%d_%H%M%S)" +MD="$REPORT_DIR/phase1-discovery-${STAMP}.md" +LOG="$REPORT_DIR/phase1-discovery-${STAMP}.log" +mkdir -p "$REPORT_DIR" + +SSH_USER="${SSH_USER:-root}" +SSH_OPTS="${SSH_OPTS:--o BatchMode=yes -o ConnectTimeout=6 -o StrictHostKeyChecking=accept-new}" +R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}" + +append_cmd() { + local title="$1" + local severity="${2:-info}" + shift 2 || true + local rc=0 + local tmp + tmp="$(mktemp)" + "$@" >"$tmp" 2>&1 + rc=$? + { + echo "" + echo "## $title" + echo "" + echo '```text' + cat "$tmp" + if (( rc != 0 )); then + echo "[exit $rc]" + fi + echo '```' + } | tee -a "$MD" >>"$LOG" + rm -f "$tmp" + if (( rc != 0 )) && [[ "$severity" == "critical" ]]; then + PHASE1_CRITICAL_FAILURES+=("$title (exit $rc)") + fi +} + +PHASE1_CRITICAL_FAILURES=() + +{ + echo "# Phase 1 discovery report" + echo "" + echo "**Generated (UTC):** $(date -u +%Y-%m-%dT%H:%M:%SZ)" + echo "" + echo "**Runbook:** [docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md](../../docs/03-deployment/PHASE1_DISCOVERY_RUNBOOK.md)" + echo "" + echo "**Doctrine:** [dbis_chain_138_technical_master_plan.md](../../dbis_chain_138_technical_master_plan.md) (Sections 3, 19–20)" + echo "" + echo "## Dependency graph (logical)" + echo "" + echo "Same diagram as the runbook; edges reflect documented traffic flow, not live packet capture." + echo "" + cat <<'MERMAID' +```mermaid +flowchart TB + subgraph edge [EdgeIngress] + CF[Cloudflare_DNS] + NPM[NPMplus_LXC] + end + subgraph besu [Chain138_Besu] + RPCpub[RPC_public_2201] + RPCcore[RPC_core_2101] + Val[Validators_1000_1004] + Sen[Sentries_1500_1508] + end + subgraph observe [Observability] + BS[Blockscout_5000] + end + subgraph relay [CrossChain] + CCIP[CCIP_relay_r63001_host] + end + subgraph dlt [Hyperledger_optional] + FF[FireFly_6200_6201] + Fab[Fabric_6000_plus] + Indy[Indy_6400_plus] + end + CF --> NPM + NPM --> RPCpub + NPM --> RPCcore + NPM --> BS + RPCpub --> Sen + RPCcore --> Sen + Sen --> Val + CCIP --> RPCpub + FF --> Fab + FF --> Indy +``` +MERMAID +} >"$MD" +touch "$LOG" + +append_cmd "Proxmox template vs live VMID audit" critical bash "$PROJECT_ROOT/scripts/verify/audit-proxmox-operational-template.sh" + +PROXMOX_HOSTS="${PROXMOX_HOSTS:-${PROXMOX_HOST_ML110:-192.168.11.10} ${PROXMOX_HOST_R630_01:-192.168.11.11} $R630_02}" +append_cmd "Proxmox cluster status (pvecm) per host" critical bash -c " +fail=0 +for h in $PROXMOX_HOSTS; do + echo '=== '"\$h"' ===' + ssh $SSH_OPTS ${SSH_USER}@\"\$h\" 'pvecm status 2>&1' || fail=1 + echo '' +done +exit \$fail +" + +append_cmd "Proxmox storage (pvesm status) per host" critical bash -c " +fail=0 +for h in $PROXMOX_HOSTS; do + echo '=== '"\$h"' ===' + ssh $SSH_OPTS ${SSH_USER}@\"\$h\" 'pvesm status 2>&1 | head -80' || fail=1 + echo '' +done +exit \$fail +" + +append_cmd "Live pct/qm lists per host" critical bash -c " +fail=0 +for h in $PROXMOX_HOSTS; do + echo '=== '"\$h"' ===' + ssh $SSH_OPTS ${SSH_USER}@\"\$h\" 'echo PCT:; pct list 2>&1; echo VM:; qm list 2>&1' || fail=1 + echo '' +done +exit \$fail +" + +if command -v curl &>/dev/null; then + append_cmd "Chain 138 RPC quick probe (core, LAN)" critical bash -c " + curl -sS --connect-timeout 4 -X POST -H 'Content-Type: application/json' \ + --data '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' \ + \"http://${IP_BESU_RPC_CORE_1:-192.168.11.211}:8545\" || echo 'curl failed' + " +fi + +append_cmd "Besu RPC health script (may fail off-LAN)" critical bash -c " + bash \"$PROJECT_ROOT/scripts/verify/check-chain138-rpc-health.sh\" +" + +append_cmd "Besu enodes / IPs verify (may fail off-LAN)" critical bash -c " + bash \"$PROJECT_ROOT/scripts/verify/verify-besu-enodes-and-ips.sh\" +" + +if [[ "${HYPERLEDGER_PROBE:-}" == "1" ]]; then + append_cmd "Hyperledger CT smoke (r630-02; pct exec)" critical bash -c " + ssh $SSH_OPTS ${SSH_USER}@$R630_02 ' + for id in 6200 6201 6000 6001 6002 6400 6401 6402; do + echo \"=== VMID \$id status ===\" + pct status \$id 2>&1 || true + if pct status \$id 2>/dev/null | grep -q running; then + pct exec \$id -- bash -lc \"command -v docker >/dev/null && docker ps --format 'table {{.Names}}\t{{.Status}}' 2>/dev/null | head -10 || true; command -v systemctl >/dev/null && systemctl list-units --type=service --state=running --no-pager 2>/dev/null | head -20 || true; ss -ltnp 2>/dev/null | head -20 || true\" 2>&1 || echo \"[exec failed]\" + fi + echo \"\" + done + ' + " +else + { + echo "" + echo "## Hyperledger CT smoke (skipped)" + echo "" + echo "Set \`HYPERLEDGER_PROBE=1\` to SSH to r630-02 and run \`pct status/exec\` on 6200, 6201, 6000, 6001, 6002, 6400, 6401, 6402." + echo "" + } >>"$MD" +fi + +{ + echo "" + echo "## Configuration snapshot pointers (no secrets in repo)" + echo "" + echo "- \`config/proxmox-operational-template.json\`" + echo "- \`config/ip-addresses.conf\`" + echo "- \`docs/04-configuration/ALL_VMIDS_ENDPOINTS.md\`" + echo "" + echo "## Next steps" + echo "" + echo "1. Reconcile **Entity owner** / **Region** in [DBIS_NODE_ROLE_MATRIX.md](../../docs/02-architecture/DBIS_NODE_ROLE_MATRIX.md)." + echo "2. If ML110 row shows Proxmox + workloads, update [PHYSICAL_HARDWARE_INVENTORY.md](../../docs/02-architecture/PHYSICAL_HARDWARE_INVENTORY.md) vs [NETWORK_CONFIGURATION_MASTER.md](../../docs/11-references/NETWORK_CONFIGURATION_MASTER.md)." + echo "" + if ((${#PHASE1_CRITICAL_FAILURES[@]} > 0)); then + echo "## Critical failure summary" + echo "" + for failure in "${PHASE1_CRITICAL_FAILURES[@]}"; do + echo "- $failure" + done + echo "" + echo "This report is complete as evidence capture, but the discovery run is **not** a pass. Re-run from LAN with working SSH/RPC access until the critical failures clear." + else + echo "## Critical failure summary" + echo "" + echo "- none" + echo "" + echo "All critical discovery checks completed successfully for this run." + fi + echo "" +} >>"$MD" + +echo "Wrote $MD" +echo "Full log mirror: $LOG" +ls -la "$MD" "$LOG" + +if ((${#PHASE1_CRITICAL_FAILURES[@]} > 0)); then + exit 1 +fi diff --git a/scripts/verify/verify-end-to-end-routing.sh b/scripts/verify/verify-end-to-end-routing.sh index d526165..10cabb1 100755 --- a/scripts/verify/verify-end-to-end-routing.sh +++ b/scripts/verify/verify-end-to-end-routing.sh @@ -83,6 +83,14 @@ declare -A DOMAIN_TYPES_ALL=( ["the-order.sankofa.nexus"]="web" # OSJ portal (secure auth); app: ~/projects/the_order ["www.the-order.sankofa.nexus"]="web" # 301 → https://the-order.sankofa.nexus ["studio.sankofa.nexus"]="web" + # Client SSO / IdP / operator dash (FQDN_EXPECTED_CONTENT + EXPECTED_WEB_CONTENT Deployment Status) + ["keycloak.sankofa.nexus"]="web" + ["admin.sankofa.nexus"]="web" + ["portal.sankofa.nexus"]="web" + ["dash.sankofa.nexus"]="web" + # d-bis.org docs on explorer nginx where configured; generic Blockscout hostname (VMID 5000 when proxied) + ["docs.d-bis.org"]="web" + ["blockscout.defi-oracle.io"]="web" ["rpc.public-0138.defi-oracle.io"]="rpc-http" ["rpc.defi-oracle.io"]="rpc-http" ["wss.defi-oracle.io"]="rpc-ws" @@ -166,7 +174,7 @@ else fi # Domains that are optional when any test fails (off-LAN, 502, unreachable); fail → skip so run passes. -_PUB_OPTIONAL_WHEN_FAIL="dapp.d-bis.org mifos.d-bis.org explorer.d-bis.org dbis-admin.d-bis.org dbis-api.d-bis.org dbis-api-2.d-bis.org secure.d-bis.org sankofa.nexus www.sankofa.nexus phoenix.sankofa.nexus www.phoenix.sankofa.nexus the-order.sankofa.nexus www.the-order.sankofa.nexus studio.sankofa.nexus mim4u.org www.mim4u.org secure.mim4u.org training.mim4u.org rpc-http-pub.d-bis.org rpc.d-bis.org rpc2.d-bis.org rpc.public-0138.defi-oracle.io rpc.defi-oracle.io ws.rpc.d-bis.org ws.rpc2.d-bis.org" +_PUB_OPTIONAL_WHEN_FAIL="dapp.d-bis.org mifos.d-bis.org explorer.d-bis.org dbis-admin.d-bis.org dbis-api.d-bis.org dbis-api-2.d-bis.org secure.d-bis.org sankofa.nexus www.sankofa.nexus phoenix.sankofa.nexus www.phoenix.sankofa.nexus the-order.sankofa.nexus www.the-order.sankofa.nexus studio.sankofa.nexus keycloak.sankofa.nexus admin.sankofa.nexus portal.sankofa.nexus dash.sankofa.nexus docs.d-bis.org blockscout.defi-oracle.io mim4u.org www.mim4u.org secure.mim4u.org training.mim4u.org rpc-http-pub.d-bis.org rpc.d-bis.org rpc2.d-bis.org rpc.public-0138.defi-oracle.io rpc.defi-oracle.io ws.rpc.d-bis.org ws.rpc2.d-bis.org" _PRIV_OPTIONAL_WHEN_FAIL="rpc-http-prv.d-bis.org rpc-ws-prv.d-bis.org rpc-fireblocks.d-bis.org ws.rpc-fireblocks.d-bis.org" if [[ -z "${E2E_OPTIONAL_WHEN_FAIL:-}" ]]; then if [[ "$PROFILE" == "private" ]]; then @@ -410,15 +418,16 @@ test_domain() { result=$(echo "$result" | jq --arg time "$time_total" '.tests.https = {"status": "fail", "response_time_seconds": ($time | tonumber)}') fi # Optional: Blockscout API check for explorer.d-bis.org (does not affect E2E pass/fail) - if [ "$domain" = "explorer.d-bis.org" ] && [ "${SKIP_BLOCKSCOUT_API:-0}" != "1" ]; then + if { [ "$domain" = "explorer.d-bis.org" ] || [ "$domain" = "blockscout.defi-oracle.io" ]; } && [ "${SKIP_BLOCKSCOUT_API:-0}" != "1" ]; then log_info "Test 3b: Blockscout API (optional)" - api_body_file="$OUTPUT_DIR/explorer_d-bis_org_blockscout_api.txt" + api_safe="${domain//./_}" + api_body_file="$OUTPUT_DIR/${api_safe}_blockscout_api.txt" api_code=$(curl -s -o "$api_body_file" -w "%{http_code}" -k --connect-timeout 10 "https://$domain/api/v2/stats" 2>/dev/null || echo "000") if [ "$api_code" = "200" ] && [ -s "$api_body_file" ] && (grep -qE '"total_blocks"|"total_transactions"' "$api_body_file" 2>/dev/null); then - log_success "Blockscout API: /api/v2/stats returned 200 with stats" + log_success "Blockscout API: $domain /api/v2/stats returned 200 with stats" result=$(echo "$result" | jq '.tests.blockscout_api = {"status": "pass", "http_code": 200}') else - log_warn "Blockscout API: HTTP $api_code or invalid response (optional; run from LAN if backend unreachable)" + log_warn "Blockscout API: $domain HTTP $api_code or invalid response (optional; run from LAN if backend unreachable)" result=$(echo "$result" | jq --arg code "$api_code" '.tests.blockscout_api = {"status": "skip", "http_code": $code}') fi fi diff --git a/smom-dbis-138 b/smom-dbis-138 index 1771db2..07d9ce4 160000 --- a/smom-dbis-138 +++ b/smom-dbis-138 @@ -1 +1 @@ -Subproject commit 1771db2190343b223888e14f8155217a10ea3f4a +Subproject commit 07d9ce4876acf7f9851b4fe3dda32d50f0e95e3e