- Introduced Aggregator.sol for Chainlink-compatible oracle functionality, including round-based updates and access control. - Added OracleWithCCIP.sol to extend Aggregator with CCIP cross-chain messaging capabilities. - Created .gitmodules to include OpenZeppelin contracts as a submodule. - Developed a comprehensive deployment guide in NEXT_STEPS_COMPLETE_GUIDE.md for Phase 2 and smart contract deployment. - Implemented Vite configuration for the orchestration portal, supporting both Vue and React frameworks. - Added server-side logic for the Multi-Cloud Orchestration Portal, including API endpoints for environment management and monitoring. - Created scripts for resource import and usage validation across non-US regions. - Added tests for CCIP error handling and integration to ensure robust functionality. - Included various new files and directories for the orchestration portal and deployment scripts.
838 lines
34 KiB
Python
Executable File
838 lines
34 KiB
Python
Executable File
#!/usr/bin/env python3
|
||
"""
|
||
Interactive CLI for configuring Besu network
|
||
Configures genesis.json and all configuration files with necessary details
|
||
"""
|
||
|
||
import json
|
||
import os
|
||
import sys
|
||
import shutil
|
||
from pathlib import Path
|
||
from typing import Dict, Any, List, Optional, Tuple
|
||
import subprocess
|
||
import re
|
||
|
||
# Import validation module
|
||
sys.path.insert(0, str(Path(__file__).parent))
|
||
try:
|
||
from configure_network_validation import ConfigurationValidator, ValidationError
|
||
except ImportError:
|
||
# Fallback if validation module not available
|
||
class ConfigurationValidator:
|
||
def __init__(self, config, project_root):
|
||
self.config = config
|
||
self.project_root = project_root
|
||
def validate_all(self):
|
||
return True, [], []
|
||
class ValidationError(Exception):
|
||
pass
|
||
|
||
# Color codes for terminal output
|
||
class Colors:
|
||
HEADER = '\033[95m'
|
||
OKBLUE = '\033[94m'
|
||
OKCYAN = '\033[96m'
|
||
OKGREEN = '\033[92m'
|
||
WARNING = '\033[93m'
|
||
FAIL = '\033[91m'
|
||
ENDC = '\033[0m'
|
||
BOLD = '\033[1m'
|
||
UNDERLINE = '\033[4m'
|
||
|
||
def print_header(text: str):
|
||
"""Print a header"""
|
||
print(f"\n{Colors.HEADER}{Colors.BOLD}{'='*60}{Colors.ENDC}")
|
||
print(f"{Colors.HEADER}{Colors.BOLD}{text:^60}{Colors.ENDC}")
|
||
print(f"{Colors.HEADER}{Colors.BOLD}{'='*60}{Colors.ENDC}\n")
|
||
|
||
def print_success(text: str):
|
||
"""Print success message"""
|
||
print(f"{Colors.OKGREEN}✓ {text}{Colors.ENDC}")
|
||
|
||
def print_error(text: str):
|
||
"""Print error message"""
|
||
print(f"{Colors.FAIL}✗ {text}{Colors.ENDC}")
|
||
|
||
def print_warning(text: str):
|
||
"""Print warning message"""
|
||
print(f"{Colors.WARNING}⚠ {text}{Colors.ENDC}")
|
||
|
||
def print_info(text: str):
|
||
"""Print info message"""
|
||
print(f"{Colors.OKCYAN}ℹ {text}{Colors.ENDC}")
|
||
|
||
def input_with_default(prompt: str, default: str = "", validate_func=None) -> str:
|
||
"""Get input with default value"""
|
||
if default:
|
||
full_prompt = f"{prompt} [{default}]: "
|
||
else:
|
||
full_prompt = f"{prompt}: "
|
||
|
||
while True:
|
||
value = input(full_prompt).strip()
|
||
if not value:
|
||
value = default
|
||
if not value:
|
||
print_error("This field is required")
|
||
continue
|
||
if validate_func and not validate_func(value):
|
||
continue
|
||
return value
|
||
|
||
def input_int(prompt: str, default: int = 0, min_val: int = None, max_val: int = None) -> int:
|
||
"""Get integer input with validation"""
|
||
while True:
|
||
try:
|
||
value = input_with_default(prompt, str(default) if default else "")
|
||
int_value = int(value)
|
||
if min_val is not None and int_value < min_val:
|
||
print_error(f"Value must be >= {min_val}")
|
||
continue
|
||
if max_val is not None and int_value > max_val:
|
||
print_error(f"Value must be <= {max_val}")
|
||
continue
|
||
return int_value
|
||
except ValueError:
|
||
print_error("Please enter a valid integer")
|
||
|
||
def input_hex(prompt: str, default: str = "") -> str:
|
||
"""Get hexadecimal input with validation"""
|
||
while True:
|
||
value = input_with_default(prompt, default)
|
||
if value.startswith("0x"):
|
||
value = value[2:]
|
||
try:
|
||
int(value, 16)
|
||
return f"0x{value}"
|
||
except ValueError:
|
||
print_error("Please enter a valid hexadecimal value")
|
||
|
||
def input_yes_no(prompt: str, default: bool = True) -> bool:
|
||
"""Get yes/no input"""
|
||
default_str = "Y/n" if default else "y/N"
|
||
while True:
|
||
value = input(f"{prompt} [{default_str}]: ").strip().lower()
|
||
if not value:
|
||
return default
|
||
if value in ['y', 'yes']:
|
||
return True
|
||
if value in ['n', 'no']:
|
||
return False
|
||
print_error("Please enter 'y' or 'n'")
|
||
|
||
def validate_ip(ip: str) -> bool:
|
||
"""Validate IP address"""
|
||
parts = ip.split('.')
|
||
if len(parts) != 4:
|
||
return False
|
||
try:
|
||
return all(0 <= int(part) <= 255 for part in parts)
|
||
except ValueError:
|
||
return False
|
||
|
||
def validate_cidr(cidr: str) -> bool:
|
||
"""Validate CIDR notation"""
|
||
try:
|
||
ip, mask = cidr.split('/')
|
||
if not validate_ip(ip):
|
||
return False
|
||
mask = int(mask)
|
||
return 0 <= mask <= 32
|
||
except (ValueError, AttributeError):
|
||
return False
|
||
|
||
def validate_port(port: str) -> bool:
|
||
"""Validate port number"""
|
||
try:
|
||
port_num = int(port)
|
||
return 1 <= port_num <= 65535
|
||
except ValueError:
|
||
return False
|
||
|
||
def validate_chain_id(chain_id: str) -> bool:
|
||
"""Validate chain ID"""
|
||
try:
|
||
cid = int(chain_id)
|
||
return 1 <= cid <= 2147483647
|
||
except ValueError:
|
||
return False
|
||
|
||
class NetworkConfigurator:
|
||
def __init__(self, project_root: Path):
|
||
self.project_root = project_root
|
||
self.config = {}
|
||
self.backup_dir = project_root / ".config-backup"
|
||
|
||
def backup_existing_files(self):
|
||
"""Backup existing configuration files"""
|
||
if self.backup_dir.exists():
|
||
shutil.rmtree(self.backup_dir)
|
||
self.backup_dir.mkdir(parents=True, exist_ok=True)
|
||
|
||
files_to_backup = [
|
||
"config/genesis.json",
|
||
"config/validators/besu-config.toml",
|
||
"config/sentries/besu-config.toml",
|
||
"config/rpc/besu-config.toml",
|
||
"terraform/terraform.tfvars",
|
||
"helm/besu-network/values.yaml",
|
||
]
|
||
|
||
for file_path in files_to_backup:
|
||
source = self.project_root / file_path
|
||
if source.exists():
|
||
dest = self.backup_dir / file_path
|
||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||
shutil.copy2(source, dest)
|
||
print_success(f"Backed up {file_path}")
|
||
|
||
def collect_genesis_config(self):
|
||
"""Collect genesis block configuration"""
|
||
print_header("Genesis Block Configuration")
|
||
|
||
self.config['genesis'] = {
|
||
'chainId': input_int("Chain ID", 138, 1, 2147483647),
|
||
'blockPeriodSeconds': input_int("Block period (seconds)", 2, 1, 60),
|
||
'epochLength': input_int("Epoch length (blocks)", 30000, 1000, 1000000),
|
||
'requestTimeoutSeconds': input_int("Request timeout (seconds)", 10, 1, 60),
|
||
'gasLimit': input_hex("Gas limit", "0x1c9c380"),
|
||
'difficulty': input_hex("Difficulty", "0x1"),
|
||
'timestamp': input_hex("Timestamp", "0x0"),
|
||
}
|
||
|
||
# IBFT2 validators
|
||
print_info("\nIBFT2 Validator Configuration")
|
||
validator_count = input_int("Number of validators", 4, 1, 100)
|
||
self.config['validators'] = []
|
||
|
||
for i in range(validator_count):
|
||
print_info(f"\nValidator {i+1}:")
|
||
validator_address = input_hex(f" Validator address (hex)", "0x0")
|
||
self.config['validators'].append(validator_address)
|
||
|
||
# Pre-allocated accounts
|
||
print_info("\nPre-allocated Accounts")
|
||
alloc_count = input_int("Number of pre-allocated accounts", 4, 0, 100)
|
||
self.config['alloc'] = {}
|
||
|
||
for i in range(alloc_count):
|
||
address = input_hex(f" Account {i+1} address", f"0x{'0'*39}{i+1}")
|
||
|
||
# Decision tree: Validate address format
|
||
if not re.match(r'^0x[a-fA-F0-9]{40}$', address):
|
||
print_error(f"Invalid address format: {address}")
|
||
address = input_hex(f" Account {i+1} address (must be 40 hex characters)", f"0x{'0'*39}{i+1}")
|
||
|
||
balance = input_hex(f" Account {i+1} balance (wei)", "0x1")
|
||
|
||
# Decision tree: Validate balance
|
||
try:
|
||
balance_int = int(balance, 16)
|
||
if balance_int == 0:
|
||
print_warning(f"Account {i+1} has zero balance. Consider adding balance or removing account.")
|
||
except ValueError:
|
||
print_error(f"Invalid balance format: {balance}")
|
||
balance = input_hex(f" Account {i+1} balance (wei, valid hex)", "0x1")
|
||
|
||
self.config['alloc'][address] = {'balance': balance}
|
||
|
||
print_success("Genesis configuration collected")
|
||
|
||
def collect_network_config(self):
|
||
"""Collect network configuration"""
|
||
print_header("Network Configuration")
|
||
|
||
self.config['network'] = {
|
||
'clusterName': input_with_default("Cluster name", "defi-oracle-aks"),
|
||
'resourceGroup': input_with_default("Azure resource group", "defi-oracle-mainnet-rg"),
|
||
'location': input_with_default("Azure region", "eastus"),
|
||
'vnetAddressSpace': input_with_default("VNet address space (CIDR)", "10.0.0.0/16", validate_cidr),
|
||
}
|
||
|
||
# Subnets
|
||
print_info("\nSubnet Configuration")
|
||
self.config['network']['subnets'] = {
|
||
'validators': input_with_default("Validators subnet (CIDR)", "10.0.1.0/24", validate_cidr),
|
||
'sentries': input_with_default("Sentries subnet (CIDR)", "10.0.2.0/24", validate_cidr),
|
||
'rpc': input_with_default("RPC subnet (CIDR)", "10.0.3.0/24", validate_cidr),
|
||
'aks': input_with_default("AKS subnet (CIDR)", "10.0.4.0/24", validate_cidr),
|
||
}
|
||
|
||
# Node counts
|
||
print_info("\nNode Configuration")
|
||
self.config['nodes'] = {
|
||
'validatorCount': input_int("Number of validators", 4, 1, 100),
|
||
'sentryCount': input_int("Number of sentries", 3, 1, 100),
|
||
'rpcCount': input_int("Number of RPC nodes", 3, 1, 100),
|
||
}
|
||
|
||
# VM sizes
|
||
print_info("\nVM Size Configuration")
|
||
self.config['vmSizes'] = {
|
||
'validator': input_with_default("Validator VM size", "Standard_D4s_v3"),
|
||
'sentry': input_with_default("Sentry VM size", "Standard_D4s_v3"),
|
||
'rpc': input_with_default("RPC VM size", "Standard_D8s_v3"),
|
||
}
|
||
|
||
# Ports
|
||
print_info("\nPort Configuration")
|
||
self.config['ports'] = {
|
||
'p2p': input_int("P2P port", 30303, 1, 65535),
|
||
'rpcHttp': input_int("RPC HTTP port", 8545, 1, 65535),
|
||
'rpcWs': input_int("RPC WebSocket port", 8546, 1, 65535),
|
||
'metrics': input_int("Metrics port", 9545, 1, 65535),
|
||
}
|
||
|
||
print_success("Network configuration collected")
|
||
|
||
def collect_besu_config(self):
|
||
"""Collect Besu node configuration"""
|
||
print_header("Besu Node Configuration")
|
||
|
||
# Validator configuration
|
||
print_info("Validator Node Configuration")
|
||
|
||
# Decision tree: Validators should not have RPC enabled
|
||
validator_rpc_enabled = input_yes_no("Enable RPC on validators? (NOT RECOMMENDED for security)", False)
|
||
if validator_rpc_enabled:
|
||
print_warning("Security: Validators with RPC enabled are exposed to attacks. Not recommended for production!")
|
||
if not input_yes_no("Continue with RPC enabled on validators?", False):
|
||
validator_rpc_enabled = False
|
||
|
||
self.config['besu'] = {
|
||
'validators': {
|
||
'syncMode': input_with_default("Sync mode", "FULL", lambda x: x in ['FULL', 'FAST', 'SNAP']),
|
||
'rpcHttpEnabled': validator_rpc_enabled,
|
||
'rpcHttpHost': "127.0.0.1" if validator_rpc_enabled else "0.0.0.0",
|
||
'rpcHttpPort': 8545,
|
||
'p2pPort': self.config['ports']['p2p'],
|
||
'p2pEnabled': True,
|
||
'metricsEnabled': True,
|
||
'metricsPort': self.config['ports']['metrics'],
|
||
},
|
||
'sentries': {
|
||
'syncMode': input_with_default("Sentry sync mode", "FULL", lambda x: x in ['FULL', 'FAST', 'SNAP']),
|
||
'rpcHttpEnabled': True,
|
||
'rpcHttpHost': "127.0.0.1",
|
||
'rpcHttpPort': 8545,
|
||
'p2pPort': self.config['ports']['p2p'],
|
||
'p2pEnabled': True,
|
||
'metricsEnabled': True,
|
||
'metricsPort': self.config['ports']['metrics'],
|
||
},
|
||
'rpc': {
|
||
'syncMode': input_with_default("RPC sync mode", "SNAP", lambda x: x in ['FULL', 'FAST', 'SNAP']),
|
||
'rpcHttpEnabled': True,
|
||
'rpcHttpHost': "0.0.0.0",
|
||
'rpcHttpPort': self.config['ports']['rpcHttp'],
|
||
'rpcWsEnabled': input_yes_no("Enable WebSocket on RPC nodes?", True),
|
||
'rpcWsPort': self.config['ports']['rpcWs'],
|
||
'rpcWsHost': "0.0.0.0",
|
||
'p2pEnabled': False, # RPC nodes should not have P2P
|
||
'metricsEnabled': True,
|
||
'metricsPort': self.config['ports']['metrics'],
|
||
}
|
||
}
|
||
|
||
# Decision tree: RPC nodes should not have P2P
|
||
rpc_p2p_enabled = input_yes_no("Enable P2P on RPC nodes? (NOT RECOMMENDED for security)", False)
|
||
if rpc_p2p_enabled:
|
||
print_warning("Security: RPC nodes with P2P enabled are exposed to network attacks. Not recommended!")
|
||
if not input_yes_no("Continue with P2P enabled on RPC nodes?", False):
|
||
rpc_p2p_enabled = False
|
||
|
||
self.config['besu']['rpc']['p2pEnabled'] = rpc_p2p_enabled
|
||
|
||
# CORS and security
|
||
print_info("\nSecurity Configuration")
|
||
enable_cors = input_yes_no("Enable CORS", False)
|
||
if enable_cors:
|
||
cors_origins_input = input_with_default("CORS origins (comma-separated)", "https://yourdomain.com")
|
||
cors_origins = [origin.strip() for origin in cors_origins_input.split(',')]
|
||
|
||
# Decision tree: Warn about wildcard CORS
|
||
if '*' in cors_origins:
|
||
print_warning("CORS wildcard '*' allows all origins. Security risk in production!")
|
||
if not input_yes_no("Continue with wildcard CORS?", False):
|
||
cors_origins_input = input_with_default("CORS origins (comma-separated, no wildcards)", "https://yourdomain.com")
|
||
cors_origins = [origin.strip() for origin in cors_origins_input.split(',')]
|
||
cors_origins = [o for o in cors_origins if o != '*']
|
||
|
||
# Validate CORS origins format
|
||
for origin in cors_origins:
|
||
if origin and not (origin.startswith('http://') or origin.startswith('https://')):
|
||
print_warning(f"CORS origin '{origin}' should include protocol (http:// or https://)")
|
||
if input_yes_no(f"Add https:// to '{origin}'?", True):
|
||
cors_origins[cors_origins.index(origin)] = f"https://{origin}"
|
||
|
||
self.config['besu']['rpc']['corsOrigins'] = cors_origins
|
||
else:
|
||
self.config['besu']['rpc']['corsOrigins'] = []
|
||
|
||
# Host allowlist
|
||
enable_host_allowlist = input_yes_no("Enable host allowlist", False)
|
||
if enable_host_allowlist:
|
||
host_allowlist_input = input_with_default("Host allowlist (comma-separated)", "localhost,127.0.0.1")
|
||
host_allowlist = [host.strip() for host in host_allowlist_input.split(',')]
|
||
|
||
# Decision tree: Warn about wildcard host allowlist
|
||
if '0.0.0.0' in host_allowlist or '*' in host_allowlist:
|
||
print_warning("Host allowlist '0.0.0.0' or '*' allows all hosts. Security risk in production!")
|
||
if not input_yes_no("Continue with wildcard host allowlist?", False):
|
||
host_allowlist_input = input_with_default("Host allowlist (comma-separated, no wildcards)", "localhost,127.0.0.1")
|
||
host_allowlist = [host.strip() for host in host_allowlist_input.split(',')]
|
||
host_allowlist = [h for h in host_allowlist if h not in ['0.0.0.0', '*']]
|
||
|
||
self.config['besu']['rpc']['hostAllowlist'] = host_allowlist
|
||
else:
|
||
# Decision tree: Warn if RPC enabled without restrictions
|
||
if self.config['besu']['rpc']['rpcHttpEnabled']:
|
||
print_warning("RPC enabled without CORS or host restrictions. Security risk!")
|
||
if input_yes_no("Add host allowlist for security?", True):
|
||
host_allowlist_input = input_with_default("Host allowlist (comma-separated)", "localhost,127.0.0.1")
|
||
host_allowlist = [host.strip() for host in host_allowlist_input.split(',')]
|
||
self.config['besu']['rpc']['hostAllowlist'] = host_allowlist
|
||
else:
|
||
self.config['besu']['rpc']['hostAllowlist'] = []
|
||
else:
|
||
self.config['besu']['rpc']['hostAllowlist'] = []
|
||
|
||
# JVM options
|
||
print_info("\nJVM Configuration")
|
||
self.config['besu']['jvmOptions'] = {
|
||
'validator': input_with_default("Validator JVM options", "-Xmx4g -Xms4g"),
|
||
'sentry': input_with_default("Sentry JVM options", "-Xmx4g -Xms4g"),
|
||
'rpc': input_with_default("RPC JVM options", "-Xmx8g -Xms8g"),
|
||
}
|
||
|
||
print_success("Besu configuration collected")
|
||
|
||
def collect_deployment_config(self):
|
||
"""Collect deployment configuration"""
|
||
print_header("Deployment Configuration")
|
||
|
||
deployment_type = input_with_default("Deployment type (aks/vm/both)", "both",
|
||
lambda x: x in ['aks', 'vm', 'both'])
|
||
|
||
# Decision tree: Deployment type selection
|
||
if deployment_type == 'vm':
|
||
print_info("VM deployment: Simpler setup, lower cost, but manual scaling required.")
|
||
elif deployment_type == 'aks':
|
||
print_info("AKS deployment: Kubernetes orchestration, auto-scaling, but higher cost and complexity.")
|
||
else:
|
||
print_info("Both AKS and VM: Maximum flexibility, but higher cost and complexity.")
|
||
|
||
self.config['deployment'] = {
|
||
'type': deployment_type,
|
||
'aksEnabled': deployment_type in ['aks', 'both'],
|
||
'vmEnabled': deployment_type in ['vm', 'both'],
|
||
}
|
||
|
||
if self.config['deployment']['vmEnabled']:
|
||
print_info("\nVM Deployment Configuration")
|
||
|
||
use_vmss = input_yes_no("Use VM Scale Sets (recommended for auto-scaling)?", False)
|
||
if use_vmss:
|
||
print_info("VM Scale Sets: Automatic scaling, load balancing, but less control over individual VMs.")
|
||
else:
|
||
print_info("Individual VMs: Full control, manual scaling, simpler management.")
|
||
|
||
regions_input = input_with_default("Azure regions (comma-separated)", "eastus,westus")
|
||
regions = [r.strip() for r in regions_input.split(',')]
|
||
|
||
# Decision tree: Warn about too many regions
|
||
if len(regions) > 5:
|
||
print_warning(f"Deploying to {len(regions)} regions may be costly. Consider reducing number of regions.")
|
||
if not input_yes_no("Continue with multiple regions?", True):
|
||
regions_input = input_with_default("Azure regions (comma-separated, fewer regions)", "eastus")
|
||
regions = [r.strip() for r in regions_input.split(',')]
|
||
|
||
ssh_key_path = input_with_default("SSH public key path", "~/.ssh/id_rsa.pub")
|
||
expanded_ssh_key = Path(ssh_key_path).expanduser()
|
||
|
||
# Decision tree: Validate SSH key exists
|
||
if not expanded_ssh_key.exists():
|
||
print_error(f"SSH public key not found: {ssh_key_path}")
|
||
if input_yes_no("Generate SSH key?", True):
|
||
# Generate SSH key
|
||
key_dir = expanded_ssh_key.parent
|
||
key_dir.mkdir(parents=True, exist_ok=True)
|
||
key_name = expanded_ssh_key.stem
|
||
subprocess.run(['ssh-keygen', '-t', 'rsa', '-b', '4096', '-f', str(key_dir / key_name), '-N', ''], check=False)
|
||
print_success(f"Generated SSH key: {expanded_ssh_key}")
|
||
else:
|
||
ssh_key_path = input_with_default("SSH public key path", "~/.ssh/id_rsa.pub")
|
||
|
||
self.config['deployment']['vm'] = {
|
||
'useVmss': use_vmss,
|
||
'regions': regions,
|
||
'sshPublicKey': ssh_key_path,
|
||
}
|
||
|
||
# Decision tree: Warn about large VM deployment
|
||
total_nodes = self.config['nodes']['validatorCount'] + self.config['nodes']['sentryCount'] + self.config['nodes']['rpcCount']
|
||
if total_nodes > 50:
|
||
print_warning(f"Large VM deployment: {total_nodes} nodes. Consider using VM Scale Sets for cost optimization.")
|
||
if not use_vmss and input_yes_no("Switch to VM Scale Sets?", True):
|
||
self.config['deployment']['vm']['useVmss'] = True
|
||
|
||
# Key Vault
|
||
print_info("\nKey Vault Configuration")
|
||
self.config['keyVault'] = {
|
||
'name': input_with_default("Key Vault name", "defi-oracle-kv"),
|
||
'enableSoftDelete': input_yes_no("Enable soft delete", True),
|
||
'enablePurgeProtection': input_yes_no("Enable purge protection", True),
|
||
}
|
||
|
||
# Monitoring
|
||
print_info("\nMonitoring Configuration")
|
||
self.config['monitoring'] = {
|
||
'enabled': input_yes_no("Enable monitoring", True),
|
||
'prometheusEnabled': input_yes_no("Enable Prometheus", True),
|
||
'grafanaEnabled': input_yes_no("Enable Grafana", True),
|
||
'lokiEnabled': input_yes_no("Enable Loki", True),
|
||
}
|
||
|
||
# Blockscout
|
||
print_info("\nBlockscout Configuration")
|
||
blockscout_enabled = input_yes_no("Enable Blockscout", True)
|
||
|
||
# Decision tree: Blockscout requires RPC
|
||
if blockscout_enabled:
|
||
rpc_enabled = self.config['besu']['rpc']['rpcHttpEnabled']
|
||
if not rpc_enabled:
|
||
print_error("Blockscout requires RPC to be enabled. Enable RPC HTTP for RPC nodes first.")
|
||
if input_yes_no("Enable RPC HTTP on RPC nodes?", True):
|
||
self.config['besu']['rpc']['rpcHttpEnabled'] = True
|
||
blockscout_enabled = True
|
||
else:
|
||
print_warning("Blockscout disabled because RPC is not enabled.")
|
||
blockscout_enabled = False
|
||
|
||
self.config['blockscout'] = {
|
||
'enabled': blockscout_enabled,
|
||
'image': input_with_default("Blockscout image", "blockscout/blockscout:v5.1.5") if blockscout_enabled else "blockscout/blockscout:v5.1.5",
|
||
}
|
||
|
||
print_success("Deployment configuration collected")
|
||
|
||
def generate_genesis_json(self):
|
||
"""Generate genesis.json file"""
|
||
genesis = {
|
||
"config": {
|
||
"chainId": self.config['genesis']['chainId'],
|
||
"berlinBlock": 0,
|
||
"londonBlock": 0,
|
||
"istanbulBlock": 0,
|
||
"clique": None,
|
||
"ibft2": {
|
||
"blockperiodseconds": self.config['genesis']['blockPeriodSeconds'],
|
||
"epochlength": self.config['genesis']['epochLength'],
|
||
"requesttimeoutseconds": self.config['genesis']['requestTimeoutSeconds']
|
||
},
|
||
"ethash": {}
|
||
},
|
||
"nonce": "0x0",
|
||
"timestamp": self.config['genesis']['timestamp'],
|
||
"gasLimit": self.config['genesis']['gasLimit'],
|
||
"difficulty": self.config['genesis']['difficulty'],
|
||
"mixHash": "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365",
|
||
"coinbase": "0x0000000000000000000000000000000000000000",
|
||
"alloc": self.config['alloc'],
|
||
"extraData": self._generate_extra_data(),
|
||
"number": "0x0",
|
||
"gasUsed": "0x0",
|
||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||
}
|
||
|
||
genesis_file = self.project_root / "config" / "genesis.json"
|
||
genesis_file.parent.mkdir(parents=True, exist_ok=True)
|
||
|
||
with open(genesis_file, 'w') as f:
|
||
json.dump(genesis, f, indent=2)
|
||
|
||
print_success(f"Generated {genesis_file}")
|
||
|
||
def _generate_extra_data(self) -> str:
|
||
"""Generate extraData for IBFT2"""
|
||
# For IBFT2, extraData should be generated using Besu's operator generate-blockchain-config
|
||
# This returns a placeholder - user should run generate-genesis-proper.sh after generating keys
|
||
print_warning("extraData will be set to 0x - run generate-genesis-proper.sh after generating validator keys")
|
||
return "0x"
|
||
|
||
def generate_besu_config(self, node_type: str):
|
||
"""Generate Besu configuration file"""
|
||
config = self.config['besu'][node_type]
|
||
|
||
toml_content = f"""# Besu Configuration for {node_type} nodes
|
||
# Generated by configure-network.py
|
||
|
||
# Data directory
|
||
data-path="/data"
|
||
|
||
# Network configuration
|
||
network-id={self.config['genesis']['chainId']}
|
||
"""
|
||
|
||
if node_type != 'rpc':
|
||
toml_content += f"p2p-port={config.get('p2pPort', 30303)}\n"
|
||
toml_content += f"p2p-enabled={str(config.get('p2pEnabled', True)).lower()}\n"
|
||
else:
|
||
toml_content += f"p2p-enabled={str(config.get('p2pEnabled', False)).lower()}\n"
|
||
|
||
toml_content += f"""
|
||
# Sync mode
|
||
sync-mode="{config['syncMode']}"
|
||
|
||
# RPC configuration
|
||
rpc-http-enabled={str(config['rpcHttpEnabled']).lower()}
|
||
"""
|
||
|
||
if config['rpcHttpEnabled']:
|
||
toml_content += f'rpc-http-host="{config.get("rpcHttpHost", "0.0.0.0")}"\n'
|
||
toml_content += f"rpc-http-port={config.get('rpcHttpPort', 8545)}\n"
|
||
|
||
if config.get('rpcWsEnabled'):
|
||
toml_content += f"rpc-ws-enabled={str(config.get('rpcWsEnabled', False)).lower()}\n"
|
||
toml_content += f'rpc-ws-host="{config.get("rpcWsHost", "0.0.0.0")}"\n'
|
||
toml_content += f"rpc-ws-port={config.get('rpcWsPort', 8546)}\n"
|
||
|
||
# CORS and host allowlist (only for RPC nodes)
|
||
if node_type == 'rpc':
|
||
if config.get('corsOrigins'):
|
||
cors_origins = ', '.join([f'"{origin}"' for origin in config['corsOrigins']])
|
||
toml_content += f"rpc-http-cors-origins=[{cors_origins}]\n"
|
||
|
||
if config.get('hostAllowlist'):
|
||
host_allowlist = ', '.join([f'"{host}"' for host in config['hostAllowlist']])
|
||
toml_content += f"rpc-http-host-allowlist=[{host_allowlist}]\n"
|
||
|
||
if config.get('rpcWsEnabled') and config.get('corsOrigins'):
|
||
ws_origins = ', '.join([f'"{origin}"' for origin in config['corsOrigins']])
|
||
toml_content += f"rpc-ws-origins=[{ws_origins}]\n"
|
||
|
||
# Permissions (for validators and sentries)
|
||
if node_type in ['validators', 'sentries']:
|
||
toml_content += """
|
||
# Permissions
|
||
permissions-nodes-config-file-enabled=true
|
||
permissions-nodes-config-file="/config/permissions-nodes.toml"
|
||
"""
|
||
|
||
toml_content += f"""
|
||
# Metrics configuration
|
||
metrics-enabled={str(config.get('metricsEnabled', True)).lower()}
|
||
metrics-port={config.get('metricsPort', 9545)}
|
||
metrics-host="0.0.0.0"
|
||
|
||
# Genesis file
|
||
genesis-file="/config/genesis.json"
|
||
|
||
# Logging
|
||
logging="INFO"
|
||
log-destination="BOTH"
|
||
"""
|
||
|
||
config_file = self.project_root / "config" / f"{node_type}s" / "besu-config.toml"
|
||
config_file.parent.mkdir(parents=True, exist_ok=True)
|
||
|
||
with open(config_file, 'w') as f:
|
||
f.write(toml_content)
|
||
|
||
print_success(f"Generated {config_file}")
|
||
|
||
def generate_terraform_vars(self):
|
||
"""Generate Terraform variables file"""
|
||
tfvars = f"""# Terraform variables for Besu network
|
||
# Generated by configure-network.py
|
||
|
||
# Resource group
|
||
resource_group_name = "{self.config['network']['resourceGroup']}"
|
||
location = "{self.config['network']['location']}"
|
||
|
||
# Cluster configuration
|
||
cluster_name = "{self.config['network']['clusterName']}"
|
||
kubernetes_version = "1.28"
|
||
|
||
# Node counts
|
||
node_count = {{
|
||
system = 3
|
||
validators = {self.config['nodes']['validatorCount']}
|
||
sentries = {self.config['nodes']['sentryCount']}
|
||
rpc = {self.config['nodes']['rpcCount']}
|
||
}}
|
||
|
||
# VM sizes
|
||
vm_size = {{
|
||
system = "Standard_D2s_v3"
|
||
validators = "{self.config['vmSizes']['validator']}"
|
||
sentries = "{self.config['vmSizes']['sentry']}"
|
||
rpc = "{self.config['vmSizes']['rpc']}"
|
||
}}
|
||
|
||
# VM deployment
|
||
vm_deployment_enabled = {str(self.config['deployment']['vmEnabled']).lower()}
|
||
"""
|
||
|
||
if self.config['deployment']['vmEnabled']:
|
||
tfvars += f"""
|
||
# VM configuration
|
||
vm_regions = {self.config['deployment']['vm']['regions']}
|
||
validator_vm_count = {self.config['nodes']['validatorCount']}
|
||
sentry_vm_count = {self.config['nodes']['sentryCount']}
|
||
rpc_vm_count = {self.config['nodes']['rpcCount']}
|
||
use_vmss = {str(self.config['deployment']['vm']['useVmss']).lower()}
|
||
ssh_public_key = "$(cat {self.config['deployment']['vm']['sshPublicKey']})"
|
||
"""
|
||
|
||
tfvars_file = self.project_root / "terraform" / "terraform.tfvars"
|
||
tfvars_file.parent.mkdir(parents=True, exist_ok=True)
|
||
|
||
with open(tfvars_file, 'w') as f:
|
||
f.write(tfvars)
|
||
|
||
print_success(f"Generated {tfvars_file}")
|
||
|
||
def generate_helm_values(self):
|
||
"""Generate Helm values file"""
|
||
values = f"""# Helm values for Besu network
|
||
# Generated by configure-network.py
|
||
|
||
global:
|
||
namespace: besu-network
|
||
chainId: {self.config['genesis']['chainId']}
|
||
image:
|
||
repository: hyperledger/besu
|
||
tag: "23.10.0"
|
||
pullPolicy: IfNotPresent
|
||
|
||
validators:
|
||
replicaCount: {self.config['nodes']['validatorCount']}
|
||
resources:
|
||
requests:
|
||
cpu: "2"
|
||
memory: "4Gi"
|
||
limits:
|
||
cpu: "4"
|
||
memory: "8Gi"
|
||
jvmOptions: "{self.config['besu']['jvmOptions']['validator']}"
|
||
|
||
sentries:
|
||
replicaCount: {self.config['nodes']['sentryCount']}
|
||
resources:
|
||
requests:
|
||
cpu: "2"
|
||
memory: "4Gi"
|
||
limits:
|
||
cpu: "4"
|
||
memory: "8Gi"
|
||
jvmOptions: "{self.config['besu']['jvmOptions']['sentry']}"
|
||
|
||
rpc:
|
||
replicaCount: {self.config['nodes']['rpcCount']}
|
||
resources:
|
||
requests:
|
||
cpu: "4"
|
||
memory: "8Gi"
|
||
limits:
|
||
cpu: "8"
|
||
memory: "16Gi"
|
||
jvmOptions: "{self.config['besu']['jvmOptions']['rpc']}"
|
||
"""
|
||
|
||
values_file = self.project_root / "helm" / "besu-network" / "values.yaml"
|
||
values_file.parent.mkdir(parents=True, exist_ok=True)
|
||
|
||
with open(values_file, 'w') as f:
|
||
f.write(values)
|
||
|
||
print_success(f"Generated {values_file}")
|
||
|
||
def generate_config_summary(self):
|
||
"""Generate configuration summary"""
|
||
summary_file = self.project_root / "CONFIG_SUMMARY.md"
|
||
|
||
with open(summary_file, 'w') as f:
|
||
f.write("# Configuration Summary\n\n")
|
||
f.write("This file was generated by `configure-network.py`\n\n")
|
||
f.write("## Genesis Configuration\n\n")
|
||
f.write(f"- Chain ID: {self.config['genesis']['chainId']}\n")
|
||
f.write(f"- Block Period: {self.config['genesis']['blockPeriodSeconds']} seconds\n")
|
||
f.write(f"- Epoch Length: {self.config['genesis']['epochLength']} blocks\n")
|
||
f.write(f"- Gas Limit: {self.config['genesis']['gasLimit']}\n")
|
||
f.write(f"- Validators: {len(self.config['validators'])}\n\n")
|
||
|
||
f.write("## Network Configuration\n\n")
|
||
f.write(f"- Cluster Name: {self.config['network']['clusterName']}\n")
|
||
f.write(f"- Resource Group: {self.config['network']['resourceGroup']}\n")
|
||
f.write(f"- Location: {self.config['network']['location']}\n")
|
||
f.write(f"- VNet Address Space: {self.config['network']['vnetAddressSpace']}\n\n")
|
||
|
||
f.write("## Node Configuration\n\n")
|
||
f.write(f"- Validators: {self.config['nodes']['validatorCount']}\n")
|
||
f.write(f"- Sentries: {self.config['nodes']['sentryCount']}\n")
|
||
f.write(f"- RPC Nodes: {self.config['nodes']['rpcCount']}\n\n")
|
||
|
||
f.write("## Deployment Configuration\n\n")
|
||
f.write(f"- Deployment Type: {self.config['deployment']['type']}\n")
|
||
f.write(f"- AKS Enabled: {self.config['deployment']['aksEnabled']}\n")
|
||
f.write(f"- VM Enabled: {self.config['deployment']['vmEnabled']}\n\n")
|
||
|
||
print_success(f"Generated {summary_file}")
|
||
|
||
def run(self):
|
||
"""Run the configuration process"""
|
||
try:
|
||
print_header("Besu Network Configuration Tool")
|
||
print_info("This tool will help you configure all necessary files for your Besu network.")
|
||
print_warning("Existing configuration files will be backed up.")
|
||
|
||
if not input_yes_no("Continue?", True):
|
||
print_info("Configuration cancelled")
|
||
return
|
||
|
||
# Backup existing files
|
||
self.backup_existing_files()
|
||
|
||
# Collect configuration
|
||
self.collect_genesis_config()
|
||
self.collect_network_config()
|
||
self.collect_besu_config()
|
||
self.collect_deployment_config()
|
||
|
||
# Generate files
|
||
print_header("Generating Configuration Files")
|
||
self.generate_genesis_json()
|
||
self.generate_besu_config('validators')
|
||
self.generate_besu_config('sentries')
|
||
self.generate_besu_config('rpc')
|
||
self.generate_terraform_vars()
|
||
self.generate_helm_values()
|
||
self.generate_config_summary()
|
||
|
||
print_header("Configuration Complete")
|
||
print_success("All configuration files have been generated successfully!")
|
||
print_info(f"Configuration summary: {self.project_root / 'CONFIG_SUMMARY.md'}")
|
||
print_info(f"Backup directory: {self.backup_dir}")
|
||
print_warning("Please review the generated files before deploying.")
|
||
|
||
except KeyboardInterrupt:
|
||
print_error("\nConfiguration cancelled by user")
|
||
sys.exit(1)
|
||
except Exception as e:
|
||
print_error(f"Error: {e}")
|
||
import traceback
|
||
traceback.print_exc()
|
||
sys.exit(1)
|
||
|
||
def main():
|
||
"""Main entry point"""
|
||
project_root = Path(__file__).parent.parent
|
||
configurator = NetworkConfigurator(project_root)
|
||
configurator.run()
|
||
|
||
if __name__ == "__main__":
|
||
main()
|
||
|