#!/usr/bin/env bash # Continue Chain-138 Infrastructure Deployment set -e cd "$(dirname "$0")/../.." # Color codes echo "===================================================================" echo " CONTINUING CHAIN-138 INFRASTRUCTURE DEPLOYMENT" echo "===================================================================" # Load environment variables if [ -f .env ]; then source .env 2>/dev/null || true fi # Step 1: Fix and apply Terraform log_info "Step 1: Applying Terraform (if needed)..." cd terraform # Get current cluster version CURRENT_VERSION=$(az aks show --resource-group az-p-we-rg-comp-001 --name az-p-we-aks-main --query kubernetesVersion -o tsv 2>/dev/null || echo "1.33") # Update terraform.tfvars properly sed -i "s/kubernetes_version = \".*\"/kubernetes_version = \"$CURRENT_VERSION\"/" terraform.tfvars # Validate terraform validate # Plan terraform plan -out=tfplan 2>&1 | tail -20 # Check if there are changes PLAN_CHANGES=$(terraform plan -out=tfplan 2>&1 | grep -E "to add|to change|to destroy" | head -1) if echo "$PLAN_CHANGES" | grep -qE "[1-9]"; then log_warn "⚠️ Terraform plan shows changes" echo " Review above, then run: terraform apply tfplan" else log_success "✅ No Terraform changes needed" fi cd .. # Step 2: Get kubeconfig log_info "Step 2: Configuring Kubernetes access..." RG_NAME="az-p-we-rg-comp-001" CLUSTER_NAME="az-p-we-aks-main" az aks get-credentials --resource-group "$RG_NAME" --name "$CLUSTER_NAME" --overwrite-existing if kubectl cluster-info &> /dev/null 2>&1; then log_success "✅ Kubernetes cluster accessible" kubectl get nodes 2>&1 | head -5 else log_error "❌ Failed to access cluster" exit 1 fi # Step 3: Deploy Kubernetes resources log_info "Step 3: Deploying Kubernetes resources..." kubectl create namespace besu-network --dry-run=client -o yaml | kubectl apply -f - kubectl apply -k k8s/base 2>&1 | tail -10 # Step 4: Deploy Besu network log_info "Step 4: Deploying Besu network..." if [ -f helm/besu-network/values-validators.yaml ]; then if helm list -n besu-network | grep -q besu-validators; then log_success "✅ Validators already deployed" else helm install besu-validators ./helm/besu-network \ -f helm/besu-network/values-validators.yaml \ -n besu-network 2>&1 | tail -10 fi if helm list -n besu-network | grep -q besu-sentries; then log_success "✅ Sentries already deployed" else helm install besu-sentries ./helm/besu-network \ -f helm/besu-network/values-sentries.yaml \ -n besu-network 2>&1 | tail -10 fi if helm list -n besu-network | grep -q besu-rpc; then log_success "✅ RPC nodes already deployed" else helm install besu-rpc ./helm/besu-network \ -f helm/besu-network/values-rpc.yaml \ -n besu-network 2>&1 | tail -10 fi else log_warn "⚠️ Helm values files not found" fi # Step 5: Deploy monitoring log_info "Step 5: Deploying monitoring..." kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f - if [ -f monitoring/k8s/prometheus.yaml ]; then kubectl apply -f monitoring/k8s/prometheus.yaml -n monitoring 2>&1 | tail -5 fi # Step 6: Check status log_info "Step 6: Checking deployment status..." echo "Pods in besu-network:" kubectl get pods -n besu-network 2>&1 | head -10 echo "Pods in monitoring:" kubectl get pods -n monitoring 2>&1 | head -10 # Step 7: Get RPC endpoint log_info "Step 7: Getting RPC endpoint..." RPC_SVC=$(kubectl get svc -n besu-network -l app=besu-rpc -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "") if [ -n "$RPC_SVC" ]; then RPC_IP=$(kubectl get svc -n besu-network "$RPC_SVC" -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "") if [ -n "$RPC_IP" ]; then log_success "✅ RPC endpoint: http://$RPC_IP:8545" echo " Update .env with: CHAIN138_RPC_URL=http://$RPC_IP:8545" else log_warn "⚠️ RPC service exists but IP not assigned yet" fi else log_warn "⚠️ RPC service not found" fi log_success "✅ Infrastructure deployment steps complete!" echo "Next: Verify deployment and deploy contracts"