Add Oracle Aggregator and CCIP Integration
- Introduced Aggregator.sol for Chainlink-compatible oracle functionality, including round-based updates and access control. - Added OracleWithCCIP.sol to extend Aggregator with CCIP cross-chain messaging capabilities. - Created .gitmodules to include OpenZeppelin contracts as a submodule. - Developed a comprehensive deployment guide in NEXT_STEPS_COMPLETE_GUIDE.md for Phase 2 and smart contract deployment. - Implemented Vite configuration for the orchestration portal, supporting both Vue and React frameworks. - Added server-side logic for the Multi-Cloud Orchestration Portal, including API endpoints for environment management and monitoring. - Created scripts for resource import and usage validation across non-US regions. - Added tests for CCIP error handling and integration to ensure robust functionality. - Included various new files and directories for the orchestration portal and deployment scripts.
This commit is contained in:
77
terraform/modules/application-gateway/metamask-cors.tf
Normal file
77
terraform/modules/application-gateway/metamask-cors.tf
Normal file
@@ -0,0 +1,77 @@
|
||||
# CORS configuration for MetaMask integration
|
||||
# This ensures MetaMask and Portfolio can access RPC and explorer endpoints
|
||||
|
||||
resource "azurerm_application_gateway" "main" {
|
||||
# ... existing configuration ...
|
||||
|
||||
# HTTP listener with CORS support
|
||||
http_listener {
|
||||
name = "rpc-https-listener"
|
||||
frontend_ip_configuration_name = "public-ip"
|
||||
frontend_port_name = "https"
|
||||
protocol = "Https"
|
||||
ssl_certificate_name = "ssl-certificate"
|
||||
|
||||
# CORS configuration
|
||||
custom_error_configuration {
|
||||
status_code = "HttpStatus403"
|
||||
custom_error_page_url = "https://explorer.d-bis.org/errors/403.html"
|
||||
}
|
||||
}
|
||||
|
||||
# Backend HTTP settings with CORS headers
|
||||
backend_http_settings {
|
||||
name = "rpc-backend-settings"
|
||||
cookie_based_affinity = "Disabled"
|
||||
port = 8545
|
||||
protocol = "Http"
|
||||
request_timeout = 60
|
||||
|
||||
# CORS headers (configured via rewrite rule set)
|
||||
probe_name = "rpc-health-probe"
|
||||
}
|
||||
|
||||
# Rewrite rule set for CORS headers
|
||||
rewrite_rule_set {
|
||||
name = "cors-headers"
|
||||
|
||||
rewrite_rule {
|
||||
name = "add-cors-headers"
|
||||
rule_sequence = 100
|
||||
|
||||
response_header_configuration {
|
||||
header_name = "Access-Control-Allow-Origin"
|
||||
header_value = "*" # In production, restrict to specific origins
|
||||
}
|
||||
|
||||
response_header_configuration {
|
||||
header_name = "Access-Control-Allow-Methods"
|
||||
header_value = "GET, POST, OPTIONS"
|
||||
}
|
||||
|
||||
response_header_configuration {
|
||||
header_name = "Access-Control-Allow-Headers"
|
||||
header_value = "Content-Type, Authorization"
|
||||
}
|
||||
|
||||
response_header_configuration {
|
||||
header_name = "Access-Control-Max-Age"
|
||||
header_value = "3600"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Request routing rule
|
||||
request_routing_rule {
|
||||
name = "rpc-https-rule"
|
||||
rule_type = "Basic"
|
||||
http_listener_name = "rpc-https-listener"
|
||||
backend_address_pool_name = "rpc-backend-pool"
|
||||
backend_http_settings_name = "rpc-backend-settings"
|
||||
rewrite_rule_set_name = "cors-headers"
|
||||
}
|
||||
}
|
||||
|
||||
# Note: This is a template. Adjust based on your actual Application Gateway configuration.
|
||||
# CORS headers should be added to response headers for RPC and explorer endpoints.
|
||||
|
||||
57
terraform/modules/backup/main.tf
Normal file
57
terraform/modules/backup/main.tf
Normal file
@@ -0,0 +1,57 @@
|
||||
# Backup Module for Azure
|
||||
# Creates Recovery Services Vault for VM backups
|
||||
|
||||
# Recovery Services Vault
|
||||
resource "azurerm_recovery_services_vault" "main" {
|
||||
name = "${var.cluster_name}-backup-vault"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
sku = "Standard"
|
||||
|
||||
# Enable soft delete and retention
|
||||
soft_delete_enabled = true
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Backup"
|
||||
})
|
||||
}
|
||||
|
||||
# Backup Policy for VMs
|
||||
resource "azurerm_backup_policy_vm" "daily" {
|
||||
name = "${var.cluster_name}-daily-backup-policy"
|
||||
resource_group_name = var.resource_group_name
|
||||
recovery_vault_name = azurerm_recovery_services_vault.main.name
|
||||
|
||||
# Daily backup at 2 AM
|
||||
timezone = "UTC"
|
||||
|
||||
backup {
|
||||
frequency = "Daily"
|
||||
time = "02:00"
|
||||
}
|
||||
|
||||
retention_daily {
|
||||
count = var.environment == "prod" ? 30 : 7
|
||||
}
|
||||
|
||||
retention_weekly {
|
||||
count = var.environment == "prod" ? 12 : 4
|
||||
weekdays = ["Sunday"]
|
||||
}
|
||||
|
||||
retention_monthly {
|
||||
count = var.environment == "prod" ? 12 : 3
|
||||
weekdays = ["Sunday"]
|
||||
weeks = ["First"]
|
||||
}
|
||||
|
||||
retention_yearly {
|
||||
count = var.environment == "prod" ? 7 : 1
|
||||
weekdays = ["Sunday"]
|
||||
weeks = ["First"]
|
||||
months = ["January"]
|
||||
}
|
||||
}
|
||||
|
||||
# Outputs are defined in outputs.tf
|
||||
|
||||
17
terraform/modules/backup/outputs.tf
Normal file
17
terraform/modules/backup/outputs.tf
Normal file
@@ -0,0 +1,17 @@
|
||||
# Outputs for Backup Module
|
||||
|
||||
output "recovery_services_vault_id" {
|
||||
value = azurerm_recovery_services_vault.main.id
|
||||
description = "ID of the Recovery Services Vault"
|
||||
}
|
||||
|
||||
output "recovery_services_vault_name" {
|
||||
value = azurerm_recovery_services_vault.main.name
|
||||
description = "Name of the Recovery Services Vault"
|
||||
}
|
||||
|
||||
output "backup_policy_id" {
|
||||
value = azurerm_backup_policy_vm.daily.id
|
||||
description = "ID of the daily backup policy"
|
||||
}
|
||||
|
||||
28
terraform/modules/backup/variables.tf
Normal file
28
terraform/modules/backup/variables.tf
Normal file
@@ -0,0 +1,28 @@
|
||||
# Variables for Backup Module
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
90
terraform/modules/budget/main.tf
Normal file
90
terraform/modules/budget/main.tf
Normal file
@@ -0,0 +1,90 @@
|
||||
# Budget Module
|
||||
# Creates Azure Consumption Budgets for cost management
|
||||
|
||||
variable "subscription_id" {
|
||||
description = "Azure subscription ID"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "budget_name" {
|
||||
description = "Name of the budget"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "amount" {
|
||||
description = "Budget amount"
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "time_grain" {
|
||||
description = "Time grain (Monthly, Quarterly, Annually)"
|
||||
type = string
|
||||
default = "Monthly"
|
||||
}
|
||||
|
||||
variable "start_date" {
|
||||
description = "Budget start date (ISO 8601 format)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "end_date" {
|
||||
description = "Budget end date (ISO 8601 format)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "notification_thresholds" {
|
||||
description = "List of notification thresholds (0-100)"
|
||||
type = list(number)
|
||||
default = [50, 80, 100]
|
||||
}
|
||||
|
||||
variable "contact_emails" {
|
||||
description = "List of email addresses for notifications"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "contact_roles" {
|
||||
description = "List of contact roles for notifications"
|
||||
type = list(string)
|
||||
default = ["Owner", "Contributor"]
|
||||
}
|
||||
|
||||
# Consumption Budget
|
||||
resource "azurerm_consumption_budget_subscription" "main" {
|
||||
name = var.budget_name
|
||||
subscription_id = var.subscription_id
|
||||
|
||||
amount = var.amount
|
||||
time_grain = var.time_grain
|
||||
|
||||
time_period {
|
||||
start_date = var.start_date
|
||||
end_date = var.end_date
|
||||
}
|
||||
|
||||
# Notifications
|
||||
dynamic "notification" {
|
||||
for_each = var.notification_thresholds
|
||||
|
||||
content {
|
||||
enabled = true
|
||||
threshold = notification.value
|
||||
operator = "GreaterThan"
|
||||
threshold_type = "Actual"
|
||||
|
||||
contact_emails = var.contact_emails
|
||||
contact_roles = var.contact_roles
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Outputs
|
||||
output "budget_id" {
|
||||
value = azurerm_consumption_budget_subscription.main.id
|
||||
}
|
||||
|
||||
output "budget_name" {
|
||||
value = azurerm_consumption_budget_subscription.main.name
|
||||
}
|
||||
|
||||
12
terraform/modules/budget/outputs.tf
Normal file
12
terraform/modules/budget/outputs.tf
Normal file
@@ -0,0 +1,12 @@
|
||||
# Outputs for Budget Module
|
||||
|
||||
output "budget_id" {
|
||||
value = azurerm_consumption_budget_subscription.main.id
|
||||
description = "Budget ID"
|
||||
}
|
||||
|
||||
output "budget_name" {
|
||||
value = azurerm_consumption_budget_subscription.main.name
|
||||
description = "Budget name"
|
||||
}
|
||||
|
||||
59
terraform/modules/budget/variables.tf
Normal file
59
terraform/modules/budget/variables.tf
Normal file
@@ -0,0 +1,59 @@
|
||||
# Variables for Budget Module
|
||||
|
||||
variable "subscription_id" {
|
||||
description = "Azure subscription ID"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "budget_name" {
|
||||
description = "Name of the budget"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "amount" {
|
||||
description = "Budget amount"
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "time_grain" {
|
||||
description = "Time grain (Monthly, Quarterly, Annually)"
|
||||
type = string
|
||||
default = "Monthly"
|
||||
validation {
|
||||
condition = contains(["Monthly", "Quarterly", "Annually"], var.time_grain)
|
||||
error_message = "Time grain must be one of: Monthly, Quarterly, Annually"
|
||||
}
|
||||
}
|
||||
|
||||
variable "start_date" {
|
||||
description = "Budget start date (ISO 8601 format)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "end_date" {
|
||||
description = "Budget end date (ISO 8601 format)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "notification_thresholds" {
|
||||
description = "List of notification thresholds (0-100)"
|
||||
type = list(number)
|
||||
default = [50, 80, 100]
|
||||
validation {
|
||||
condition = alltrue([for t in var.notification_thresholds : t >= 0 && t <= 100])
|
||||
error_message = "Notification thresholds must be between 0 and 100"
|
||||
}
|
||||
}
|
||||
|
||||
variable "contact_emails" {
|
||||
description = "List of email addresses for notifications"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "contact_roles" {
|
||||
description = "List of contact roles for notifications"
|
||||
type = list(string)
|
||||
default = ["Owner", "Contributor"]
|
||||
}
|
||||
|
||||
54
terraform/modules/cacti/main.tf
Normal file
54
terraform/modules/cacti/main.tf
Normal file
@@ -0,0 +1,54 @@
|
||||
# Cacti Module for Terraform
|
||||
# Deploys Hyperledger Cacti on Kubernetes
|
||||
|
||||
variable "namespace" {
|
||||
description = "Kubernetes namespace for Cacti"
|
||||
type = string
|
||||
default = "cacti"
|
||||
}
|
||||
|
||||
variable "cactus_api_image" {
|
||||
description = "Cactus API Docker image"
|
||||
type = string
|
||||
default = "ghcr.io/hyperledger/cactus-cmd-api-server:v2.0.0"
|
||||
}
|
||||
|
||||
variable "besu_connector_image" {
|
||||
description = "Cactus Besu connector Docker image"
|
||||
type = string
|
||||
default = "ghcr.io/hyperledger/cactus-plugin-ledger-connector-besu:v2.0.0"
|
||||
}
|
||||
|
||||
variable "besu_rpc_url" {
|
||||
description = "Besu RPC URL"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "besu_ws_url" {
|
||||
description = "Besu WebSocket URL"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "chain_id" {
|
||||
description = "Chain ID"
|
||||
type = number
|
||||
default = 138
|
||||
}
|
||||
|
||||
variable "cactus_api_url" {
|
||||
description = "Cactus API URL"
|
||||
type = string
|
||||
default = "http://cactus-api:4000"
|
||||
}
|
||||
|
||||
# Note: This module would use Kubernetes provider to create resources
|
||||
# For Terraform, we'll create the manifests and apply them via kubectl
|
||||
|
||||
output "cactus_namespace" {
|
||||
value = var.namespace
|
||||
}
|
||||
|
||||
output "cactus_api_url" {
|
||||
value = var.cactus_api_url
|
||||
}
|
||||
|
||||
78
terraform/modules/firefly/main.tf
Normal file
78
terraform/modules/firefly/main.tf
Normal file
@@ -0,0 +1,78 @@
|
||||
# Firefly Module for Terraform
|
||||
# Deploys Hyperledger Firefly on Kubernetes
|
||||
|
||||
variable "namespace" {
|
||||
description = "Kubernetes namespace for Firefly"
|
||||
type = string
|
||||
default = "firefly"
|
||||
}
|
||||
|
||||
variable "firefly_image" {
|
||||
description = "Firefly Docker image"
|
||||
type = string
|
||||
default = "hyperledger/firefly:v1.2.0"
|
||||
}
|
||||
|
||||
variable "postgres_image" {
|
||||
description = "PostgreSQL Docker image"
|
||||
type = string
|
||||
default = "postgres:15-alpine"
|
||||
}
|
||||
|
||||
variable "ipfs_image" {
|
||||
description = "IPFS Docker image"
|
||||
type = string
|
||||
default = "ipfs/kubo:v0.23.0"
|
||||
}
|
||||
|
||||
variable "besu_rpc_url" {
|
||||
description = "Besu RPC URL"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "besu_ws_url" {
|
||||
description = "Besu WebSocket URL"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "chain_id" {
|
||||
description = "Chain ID"
|
||||
type = number
|
||||
default = 138
|
||||
}
|
||||
|
||||
variable "firefly_api_url" {
|
||||
description = "Firefly API URL"
|
||||
type = string
|
||||
default = "http://firefly-api:5000"
|
||||
}
|
||||
|
||||
variable "storage_class" {
|
||||
description = "Kubernetes storage class"
|
||||
type = string
|
||||
default = "managed-premium"
|
||||
}
|
||||
|
||||
variable "postgres_storage_size" {
|
||||
description = "PostgreSQL storage size"
|
||||
type = string
|
||||
default = "50Gi"
|
||||
}
|
||||
|
||||
variable "ipfs_storage_size" {
|
||||
description = "IPFS storage size"
|
||||
type = string
|
||||
default = "100Gi"
|
||||
}
|
||||
|
||||
# Note: This module would use Kubernetes provider to create resources
|
||||
# For Terraform, we'll create the manifests and apply them via kubectl
|
||||
|
||||
output "firefly_namespace" {
|
||||
value = var.namespace
|
||||
}
|
||||
|
||||
output "firefly_api_url" {
|
||||
value = var.firefly_api_url
|
||||
}
|
||||
|
||||
195
terraform/modules/keyvault-enhanced/main.tf
Normal file
195
terraform/modules/keyvault-enhanced/main.tf
Normal file
@@ -0,0 +1,195 @@
|
||||
# Enhanced Key Vault Module
|
||||
# Implements Well-Architected Framework best practices for Key Vault
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "key_vault_name" {
|
||||
description = "Name of the Key Vault"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "enable_rbac" {
|
||||
description = "Enable RBAC authorization (recommended)"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "enable_private_endpoint" {
|
||||
description = "Enable private endpoint"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "private_endpoint_subnet_id" {
|
||||
description = "Subnet ID for private endpoint"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "allowed_subnet_ids" {
|
||||
description = "List of subnet IDs allowed to access Key Vault"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "allowed_ip_ranges" {
|
||||
description = "List of IP ranges allowed to access Key Vault (management)"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "soft_delete_retention_days" {
|
||||
description = "Soft delete retention days"
|
||||
type = number
|
||||
default = 90
|
||||
}
|
||||
|
||||
variable "purge_protection_enabled" {
|
||||
description = "Enable purge protection"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to Key Vault"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
# Get current client config for RBAC assignments
|
||||
data "azurerm_client_config" "current" {}
|
||||
|
||||
# Private DNS Zone for Key Vault
|
||||
resource "azurerm_private_dns_zone" "keyvault" {
|
||||
count = var.enable_private_endpoint ? 1 : 0
|
||||
|
||||
name = "privatelink.vaultcore.azure.net"
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# Private DNS Zone Virtual Network Link
|
||||
# Note: This requires the virtual network to be in the same resource group or accessible
|
||||
# For production, create the DNS zone link separately or use a shared DNS zone
|
||||
resource "azurerm_private_dns_zone_virtual_network_link" "keyvault" {
|
||||
count = var.enable_private_endpoint && length(var.allowed_subnet_ids) > 0 ? 1 : 0
|
||||
|
||||
name = "kv-vnet-link-${replace(var.key_vault_name, "-", "")}"
|
||||
resource_group_name = var.resource_group_name
|
||||
private_dns_zone_name = azurerm_private_dns_zone.keyvault[0].name
|
||||
virtual_network_id = data.azurerm_subnet.main[0].virtual_network_id
|
||||
registration_enabled = false
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# Key Vault
|
||||
resource "azurerm_key_vault" "main" {
|
||||
name = var.key_vault_name
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
tenant_id = data.azurerm_client_config.current.tenant_id
|
||||
sku_name = "standard"
|
||||
|
||||
# Enable RBAC (recommended)
|
||||
enable_rbac_authorization = var.enable_rbac
|
||||
|
||||
# Soft delete and purge protection
|
||||
soft_delete_retention_days = var.soft_delete_retention_days
|
||||
purge_protection_enabled = var.purge_protection_enabled
|
||||
|
||||
# Network ACLs (restrict access)
|
||||
network_acls {
|
||||
default_action = "Deny" # Deny by default
|
||||
bypass = "AzureServices"
|
||||
|
||||
# Allow from specific subnets
|
||||
virtual_network_subnet_ids = var.allowed_subnet_ids
|
||||
|
||||
# Allow from specific IPs (management)
|
||||
ip_rules = var.allowed_ip_ranges
|
||||
}
|
||||
|
||||
# Public network access (disable if using private endpoint)
|
||||
public_network_access_enabled = !var.enable_private_endpoint
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Environment = var.environment
|
||||
Purpose = "Secrets"
|
||||
Security = "High"
|
||||
})
|
||||
}
|
||||
|
||||
# Private Endpoint for Key Vault
|
||||
resource "azurerm_private_endpoint" "keyvault" {
|
||||
count = var.enable_private_endpoint && var.private_endpoint_subnet_id != "" ? 1 : 0
|
||||
|
||||
name = "${var.key_vault_name}-pe"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
subnet_id = var.private_endpoint_subnet_id
|
||||
|
||||
private_service_connection {
|
||||
name = "${var.key_vault_name}-psc"
|
||||
private_connection_resource_id = azurerm_key_vault.main.id
|
||||
subresource_names = ["vault"]
|
||||
is_manual_connection = false
|
||||
}
|
||||
|
||||
private_dns_zone_group {
|
||||
name = "default"
|
||||
private_dns_zone_ids = [azurerm_private_dns_zone.keyvault[0].id]
|
||||
}
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
# RBAC Role Assignment (if RBAC enabled)
|
||||
resource "azurerm_role_assignment" "keyvault_administrator" {
|
||||
count = var.enable_rbac ? 1 : 0
|
||||
|
||||
scope = azurerm_key_vault.main.id
|
||||
role_definition_name = "Key Vault Administrator"
|
||||
principal_id = data.azurerm_client_config.current.object_id
|
||||
}
|
||||
|
||||
# Get Virtual Network data (only if subnets are provided)
|
||||
data "azurerm_subnet" "main" {
|
||||
count = length(var.allowed_subnet_ids) > 0 ? 1 : 0
|
||||
|
||||
name = split("/", var.allowed_subnet_ids[0])[10]
|
||||
resource_group_name = split("/", var.allowed_subnet_ids[0])[4]
|
||||
virtual_network_name = split("/", var.allowed_subnet_ids[0])[8]
|
||||
}
|
||||
|
||||
# Outputs
|
||||
output "key_vault_id" {
|
||||
value = azurerm_key_vault.main.id
|
||||
}
|
||||
|
||||
output "key_vault_name" {
|
||||
value = azurerm_key_vault.main.name
|
||||
}
|
||||
|
||||
output "key_vault_uri" {
|
||||
value = azurerm_key_vault.main.vault_uri
|
||||
}
|
||||
|
||||
output "private_endpoint_id" {
|
||||
value = var.enable_private_endpoint && var.private_endpoint_subnet_id != "" ? azurerm_private_endpoint.keyvault[0].id : null
|
||||
}
|
||||
|
||||
27
terraform/modules/keyvault-enhanced/outputs.tf
Normal file
27
terraform/modules/keyvault-enhanced/outputs.tf
Normal file
@@ -0,0 +1,27 @@
|
||||
# Outputs for Enhanced Key Vault Module
|
||||
|
||||
output "key_vault_id" {
|
||||
value = azurerm_key_vault.main.id
|
||||
description = "Key Vault ID"
|
||||
}
|
||||
|
||||
output "key_vault_name" {
|
||||
value = azurerm_key_vault.main.name
|
||||
description = "Key Vault name"
|
||||
}
|
||||
|
||||
output "key_vault_uri" {
|
||||
value = azurerm_key_vault.main.vault_uri
|
||||
description = "Key Vault URI"
|
||||
}
|
||||
|
||||
output "private_endpoint_id" {
|
||||
value = var.enable_private_endpoint && var.private_endpoint_subnet_id != "" ? azurerm_private_endpoint.keyvault[0].id : null
|
||||
description = "Private Endpoint ID"
|
||||
}
|
||||
|
||||
output "private_dns_zone_id" {
|
||||
value = var.enable_private_endpoint ? azurerm_private_dns_zone.keyvault[0].id : null
|
||||
description = "Private DNS Zone ID"
|
||||
}
|
||||
|
||||
70
terraform/modules/keyvault-enhanced/variables.tf
Normal file
70
terraform/modules/keyvault-enhanced/variables.tf
Normal file
@@ -0,0 +1,70 @@
|
||||
# Variables for Enhanced Key Vault Module
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "key_vault_name" {
|
||||
description = "Name of the Key Vault"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "enable_rbac" {
|
||||
description = "Enable RBAC authorization"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "enable_private_endpoint" {
|
||||
description = "Enable private endpoint"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "private_endpoint_subnet_id" {
|
||||
description = "Subnet ID for private endpoint"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "allowed_subnet_ids" {
|
||||
description = "List of subnet IDs allowed to access Key Vault"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "allowed_ip_ranges" {
|
||||
description = "List of IP ranges allowed to access Key Vault"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "soft_delete_retention_days" {
|
||||
description = "Soft delete retention days"
|
||||
type = number
|
||||
default = 90
|
||||
}
|
||||
|
||||
variable "purge_protection_enabled" {
|
||||
description = "Enable purge protection"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to Key Vault"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
173
terraform/modules/kubernetes/main.tf
Normal file
173
terraform/modules/kubernetes/main.tf
Normal file
@@ -0,0 +1,173 @@
|
||||
# Kubernetes Module for Azure AKS
|
||||
# Creates AKS cluster with multiple node pools
|
||||
# Variables are defined in variables.tf
|
||||
|
||||
# Azure AD integration (optional)
|
||||
data "azurerm_client_config" "current" {}
|
||||
|
||||
# AKS Cluster
|
||||
resource "azurerm_kubernetes_cluster" "main" {
|
||||
name = var.cluster_name
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
dns_prefix = var.cluster_name
|
||||
# Use the version provided by the root module (kept in terraform/variables.tf)
|
||||
kubernetes_version = var.kubernetes_version
|
||||
|
||||
# Network configuration
|
||||
network_profile {
|
||||
network_plugin = "azure"
|
||||
network_policy = "azure"
|
||||
load_balancer_sku = "standard"
|
||||
service_cidr = "10.1.0.0/16"
|
||||
dns_service_ip = "10.1.0.10"
|
||||
}
|
||||
|
||||
# Default node pool (system)
|
||||
default_node_pool {
|
||||
name = "system"
|
||||
node_count = var.node_count["system"]
|
||||
vm_size = var.vm_size["system"]
|
||||
vnet_subnet_id = var.node_subnet_id
|
||||
enable_auto_scaling = false
|
||||
os_disk_size_gb = 128
|
||||
type = "VirtualMachineScaleSets"
|
||||
# Remove explicit zone pinning to avoid SKU/zone constraints
|
||||
|
||||
node_labels = {
|
||||
pool = "system"
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Pool = "system"
|
||||
})
|
||||
}
|
||||
|
||||
# Identity
|
||||
identity {
|
||||
type = "SystemAssigned"
|
||||
}
|
||||
|
||||
# Azure Monitor
|
||||
oms_agent {
|
||||
log_analytics_workspace_id = azurerm_log_analytics_workspace.main.id
|
||||
}
|
||||
|
||||
# Azure Policy (temporarily disabled to pass cluster creation)
|
||||
azure_policy_enabled = false
|
||||
|
||||
# Key Vault integration (disable during initial cluster creation; enable later if needed)
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Kubernetes-Cluster"
|
||||
})
|
||||
}
|
||||
|
||||
# Log Analytics Workspace
|
||||
resource "azurerm_log_analytics_workspace" "main" {
|
||||
name = "${var.cluster_name}-logs"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
sku = "PerGB2018"
|
||||
retention_in_days = var.environment == "prod" ? 90 : 30
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Logging"
|
||||
})
|
||||
}
|
||||
|
||||
# Node pool for validators
|
||||
resource "azurerm_kubernetes_cluster_node_pool" "validators" {
|
||||
count = var.node_count["validators"] > 0 ? 1 : 0
|
||||
name = "validators"
|
||||
kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
|
||||
node_count = var.node_count["validators"]
|
||||
vm_size = var.vm_size["validators"]
|
||||
vnet_subnet_id = var.node_subnet_id
|
||||
os_disk_size_gb = 512
|
||||
enable_auto_scaling = false
|
||||
# Remove explicit zone pinning to avoid SKU/zone constraints
|
||||
|
||||
node_labels = {
|
||||
pool = "validators"
|
||||
role = "validator"
|
||||
}
|
||||
|
||||
node_taints = [
|
||||
"role=validator:NoSchedule"
|
||||
]
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Pool = "validators"
|
||||
Role = "validator"
|
||||
})
|
||||
}
|
||||
|
||||
# Node pool for sentries
|
||||
resource "azurerm_kubernetes_cluster_node_pool" "sentries" {
|
||||
count = var.node_count["sentries"] > 0 ? 1 : 0
|
||||
name = "sentries"
|
||||
kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
|
||||
node_count = var.node_count["sentries"]
|
||||
vm_size = var.vm_size["sentries"]
|
||||
vnet_subnet_id = var.node_subnet_id
|
||||
os_disk_size_gb = 256
|
||||
enable_auto_scaling = false
|
||||
# Remove explicit zone pinning to avoid SKU/zone constraints
|
||||
|
||||
node_labels = {
|
||||
pool = "sentries"
|
||||
role = "sentry"
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Pool = "sentries"
|
||||
Role = "sentry"
|
||||
})
|
||||
}
|
||||
|
||||
# Node pool for RPC nodes
|
||||
resource "azurerm_kubernetes_cluster_node_pool" "rpc" {
|
||||
count = var.node_count["rpc"] > 0 ? 1 : 0
|
||||
name = "rpc"
|
||||
kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
|
||||
node_count = var.node_count["rpc"]
|
||||
vm_size = var.vm_size["rpc"]
|
||||
vnet_subnet_id = var.node_subnet_id
|
||||
os_disk_size_gb = 256
|
||||
enable_auto_scaling = false
|
||||
# Remove explicit zone pinning to avoid SKU/zone constraints
|
||||
|
||||
node_labels = {
|
||||
pool = "rpc"
|
||||
role = "rpc"
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Pool = "rpc"
|
||||
Role = "rpc"
|
||||
})
|
||||
}
|
||||
|
||||
# Key Vault access policy for AKS managed identity (only if using access policies)
|
||||
# NOTE: If using RBAC (enhanced Key Vault module), use role assignments instead
|
||||
resource "azurerm_key_vault_access_policy" "aks" {
|
||||
count = var.environment == "prod" ? 0 : 1 # Skip if using RBAC in production
|
||||
|
||||
key_vault_id = var.key_vault_id
|
||||
tenant_id = data.azurerm_client_config.current.tenant_id
|
||||
object_id = azurerm_kubernetes_cluster.main.identity[0].principal_id
|
||||
|
||||
secret_permissions = [
|
||||
"Get",
|
||||
"List"
|
||||
]
|
||||
|
||||
key_permissions = [
|
||||
"Get",
|
||||
"List"
|
||||
]
|
||||
}
|
||||
|
||||
# Outputs are defined in outputs.tf
|
||||
|
||||
37
terraform/modules/kubernetes/outputs.tf
Normal file
37
terraform/modules/kubernetes/outputs.tf
Normal file
@@ -0,0 +1,37 @@
|
||||
output "cluster_name" {
|
||||
value = azurerm_kubernetes_cluster.main.name
|
||||
}
|
||||
|
||||
output "cluster_fqdn" {
|
||||
value = azurerm_kubernetes_cluster.main.fqdn
|
||||
}
|
||||
|
||||
output "cluster_identity" {
|
||||
value = azurerm_kubernetes_cluster.main.identity[0].principal_id
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = azurerm_kubernetes_cluster.main.kube_config_raw
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "host" {
|
||||
value = azurerm_kubernetes_cluster.main.kube_config[0].host
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "client_key" {
|
||||
value = azurerm_kubernetes_cluster.main.kube_config[0].client_key
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "client_certificate" {
|
||||
value = azurerm_kubernetes_cluster.main.kube_config[0].client_certificate
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "cluster_ca_certificate" {
|
||||
value = azurerm_kubernetes_cluster.main.kube_config[0].cluster_ca_certificate
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
58
terraform/modules/kubernetes/variables.tf
Normal file
58
terraform/modules/kubernetes/variables.tf
Normal file
@@ -0,0 +1,58 @@
|
||||
# Variables for Kubernetes Module
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the AKS cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "kubernetes_version" {
|
||||
description = "Kubernetes version"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "node_count" {
|
||||
description = "Number of nodes per node pool"
|
||||
type = map(number)
|
||||
}
|
||||
|
||||
variable "vm_size" {
|
||||
description = "VM size for node pools"
|
||||
type = map(string)
|
||||
}
|
||||
|
||||
variable "vnet_subnet_id" {
|
||||
description = "Subnet ID for AKS control plane"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "node_subnet_id" {
|
||||
description = "Subnet ID for AKS node pools (validators, sentries, RPC)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "key_vault_id" {
|
||||
description = "Key Vault ID for secrets"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
default = "prod"
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to Kubernetes resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
61
terraform/modules/management-groups/main.tf
Normal file
61
terraform/modules/management-groups/main.tf
Normal file
@@ -0,0 +1,61 @@
|
||||
# Management Groups Module
|
||||
# Creates Azure Management Groups hierarchy according to Well-Architected Framework
|
||||
|
||||
# Root Management Group (assumes it exists)
|
||||
data "azurerm_management_group" "root" {
|
||||
name = var.root_management_group_id
|
||||
}
|
||||
|
||||
# Production Management Group
|
||||
resource "azurerm_management_group" "production" {
|
||||
name = "Production"
|
||||
display_name = "Production"
|
||||
parent_management_group_id = data.azurerm_management_group.root.id
|
||||
|
||||
subscription_ids = var.production_subscription_ids
|
||||
}
|
||||
|
||||
# Non-Production Management Group
|
||||
resource "azurerm_management_group" "non_production" {
|
||||
name = "Non-Production"
|
||||
display_name = "Non-Production"
|
||||
parent_management_group_id = data.azurerm_management_group.root.id
|
||||
|
||||
subscription_ids = var.non_production_subscription_ids
|
||||
}
|
||||
|
||||
# Shared Services Management Group
|
||||
resource "azurerm_management_group" "shared_services" {
|
||||
name = "SharedServices"
|
||||
display_name = "Shared Services"
|
||||
parent_management_group_id = data.azurerm_management_group.root.id
|
||||
|
||||
subscription_ids = var.shared_services_subscription_ids
|
||||
}
|
||||
|
||||
# Sandbox Management Group
|
||||
resource "azurerm_management_group" "sandbox" {
|
||||
name = "Sandbox"
|
||||
display_name = "Sandbox"
|
||||
parent_management_group_id = data.azurerm_management_group.root.id
|
||||
|
||||
subscription_ids = var.sandbox_subscription_ids
|
||||
}
|
||||
|
||||
# Outputs
|
||||
output "production_management_group_id" {
|
||||
value = azurerm_management_group.production.id
|
||||
}
|
||||
|
||||
output "non_production_management_group_id" {
|
||||
value = azurerm_management_group.non_production.id
|
||||
}
|
||||
|
||||
output "shared_services_management_group_id" {
|
||||
value = azurerm_management_group.shared_services.id
|
||||
}
|
||||
|
||||
output "sandbox_management_group_id" {
|
||||
value = azurerm_management_group.sandbox.id
|
||||
}
|
||||
|
||||
32
terraform/modules/management-groups/variables.tf
Normal file
32
terraform/modules/management-groups/variables.tf
Normal file
@@ -0,0 +1,32 @@
|
||||
# Variables for Management Groups Module
|
||||
|
||||
variable "root_management_group_id" {
|
||||
description = "ID of the root management group"
|
||||
type = string
|
||||
default = "tenant"
|
||||
}
|
||||
|
||||
variable "production_subscription_ids" {
|
||||
description = "List of production subscription IDs"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "non_production_subscription_ids" {
|
||||
description = "List of non-production subscription IDs"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "shared_services_subscription_ids" {
|
||||
description = "List of shared services subscription IDs"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "sandbox_subscription_ids" {
|
||||
description = "List of sandbox subscription IDs"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
18
terraform/modules/monitoring/main.tf
Normal file
18
terraform/modules/monitoring/main.tf
Normal file
@@ -0,0 +1,18 @@
|
||||
# Monitoring Module for Azure
|
||||
# Creates Log Analytics Workspace for centralized logging and monitoring
|
||||
|
||||
# Log Analytics Workspace
|
||||
resource "azurerm_log_analytics_workspace" "main" {
|
||||
name = "${var.cluster_name}-logs"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
sku = "PerGB2018"
|
||||
retention_in_days = var.environment == "prod" ? 90 : 30
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Logging"
|
||||
})
|
||||
}
|
||||
|
||||
# Outputs are defined in outputs.tf
|
||||
|
||||
18
terraform/modules/monitoring/outputs.tf
Normal file
18
terraform/modules/monitoring/outputs.tf
Normal file
@@ -0,0 +1,18 @@
|
||||
# Outputs for Monitoring Module
|
||||
|
||||
output "log_analytics_workspace_id" {
|
||||
value = azurerm_log_analytics_workspace.main.id
|
||||
description = "ID of the Log Analytics Workspace"
|
||||
}
|
||||
|
||||
output "log_analytics_workspace_name" {
|
||||
value = azurerm_log_analytics_workspace.main.name
|
||||
description = "Name of the Log Analytics Workspace"
|
||||
}
|
||||
|
||||
output "log_analytics_workspace_primary_shared_key" {
|
||||
value = azurerm_log_analytics_workspace.main.primary_shared_key
|
||||
sensitive = true
|
||||
description = "Primary shared key for the Log Analytics Workspace"
|
||||
}
|
||||
|
||||
28
terraform/modules/monitoring/variables.tf
Normal file
28
terraform/modules/monitoring/variables.tf
Normal file
@@ -0,0 +1,28 @@
|
||||
# Variables for Monitoring Module
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
70
terraform/modules/multi-region/region-config.tf
Normal file
70
terraform/modules/multi-region/region-config.tf
Normal file
@@ -0,0 +1,70 @@
|
||||
# Region-specific configurations for multi-region deployment
|
||||
|
||||
variable "regions" {
|
||||
description = "List of Azure regions for deployment"
|
||||
type = list(string)
|
||||
default = ["westeurope", "northeurope", "francecentral"]
|
||||
}
|
||||
|
||||
variable "region_configs" {
|
||||
description = "Region-specific configurations"
|
||||
type = map(object({
|
||||
validator_count = number
|
||||
sentry_count = number
|
||||
rpc_count = number
|
||||
node_pool_size = string
|
||||
storage_type = string
|
||||
}))
|
||||
default = {
|
||||
eastus = {
|
||||
validator_count = 2
|
||||
sentry_count = 2
|
||||
rpc_count = 2
|
||||
node_pool_size = "Standard_D4s_v3"
|
||||
storage_type = "Premium_LRS"
|
||||
}
|
||||
westus = {
|
||||
validator_count = 2
|
||||
sentry_count = 2
|
||||
rpc_count = 2
|
||||
node_pool_size = "Standard_D4s_v3"
|
||||
storage_type = "Premium_LRS"
|
||||
}
|
||||
westeurope = {
|
||||
validator_count = 2
|
||||
sentry_count = 2
|
||||
rpc_count = 2
|
||||
node_pool_size = "Standard_D4s_v3"
|
||||
storage_type = "Premium_LRS"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Region-specific resource groups
|
||||
resource "azurerm_resource_group" "region_rg" {
|
||||
for_each = toset(var.regions)
|
||||
|
||||
name = "defi-oracle-mainnet-${each.value}-rg"
|
||||
location = each.value
|
||||
|
||||
tags = {
|
||||
Environment = "production"
|
||||
Region = each.value
|
||||
Component = "besu-network"
|
||||
}
|
||||
}
|
||||
|
||||
# Region monitoring
|
||||
resource "azurerm_monitor_action_group" "region_alerts" {
|
||||
for_each = toset(var.regions)
|
||||
|
||||
name = "region-${each.value}-alerts"
|
||||
resource_group_name = azurerm_resource_group.region_rg[each.value].name
|
||||
short_name = "region-${each.value}"
|
||||
|
||||
email_receiver {
|
||||
name = "admin"
|
||||
email_address = "admin@example.com"
|
||||
}
|
||||
}
|
||||
|
||||
185
terraform/modules/networking-vm/main.tf
Normal file
185
terraform/modules/networking-vm/main.tf
Normal file
@@ -0,0 +1,185 @@
|
||||
# Networking Module for VM Deployment (Phase 1)
|
||||
# Creates VNet, subnets, NSGs for VM-based deployment
|
||||
|
||||
# Virtual Network
|
||||
resource "azurerm_virtual_network" "main" {
|
||||
name = "${var.cluster_name}-vnet"
|
||||
address_space = [var.vnet_address_space]
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Networking"
|
||||
})
|
||||
}
|
||||
|
||||
# Subnet for VMs
|
||||
resource "azurerm_subnet" "vm" {
|
||||
name = "${var.cluster_name}-vm-subnet"
|
||||
resource_group_name = var.resource_group_name
|
||||
virtual_network_name = azurerm_virtual_network.main.name
|
||||
address_prefixes = [var.subnet_address_prefix]
|
||||
|
||||
service_endpoints = ["Microsoft.Storage", "Microsoft.KeyVault"]
|
||||
}
|
||||
|
||||
# Network Security Group for VMs
|
||||
resource "azurerm_network_security_group" "vm" {
|
||||
name = "${var.cluster_name}-vm-nsg"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
# Allow SSH (restrict to specific IPs in production)
|
||||
security_rule {
|
||||
name = "AllowSSH"
|
||||
priority = 1000
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "22"
|
||||
source_address_prefix = length(var.allowed_ssh_ips) > 0 ? null : "*"
|
||||
source_address_prefixes = length(var.allowed_ssh_ips) > 0 ? var.allowed_ssh_ips : null
|
||||
destination_address_prefix = "*"
|
||||
description = length(var.allowed_ssh_ips) > 0 ? "Allow SSH access from specified IPs" : "Allow SSH access from anywhere (WARNING: Not secure for production)"
|
||||
}
|
||||
|
||||
# Allow P2P (Besu) - TCP (only for Besu nodes, not Nginx proxy)
|
||||
dynamic "security_rule" {
|
||||
for_each = var.enable_besu_rules ? [1] : []
|
||||
content {
|
||||
name = "AllowP2PTCP"
|
||||
priority = 1001
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "30303"
|
||||
source_address_prefix = length(var.allowed_p2p_ips) > 0 ? null : "*"
|
||||
source_address_prefixes = length(var.allowed_p2p_ips) > 0 ? var.allowed_p2p_ips : null
|
||||
destination_address_prefix = "*"
|
||||
description = length(var.allowed_p2p_ips) > 0 ? "Allow Besu P2P TCP from specified IPs" : "Allow Besu P2P TCP from anywhere (WARNING: Not secure for production)"
|
||||
}
|
||||
}
|
||||
|
||||
# Allow P2P (Besu) - UDP (only for Besu nodes, not Nginx proxy)
|
||||
dynamic "security_rule" {
|
||||
for_each = var.enable_besu_rules ? [1] : []
|
||||
content {
|
||||
name = "AllowP2PUDP"
|
||||
priority = 1002
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "30303"
|
||||
source_address_prefix = length(var.allowed_p2p_ips) > 0 ? null : "*"
|
||||
source_address_prefixes = length(var.allowed_p2p_ips) > 0 ? var.allowed_p2p_ips : null
|
||||
destination_address_prefix = "*"
|
||||
description = length(var.allowed_p2p_ips) > 0 ? "Allow Besu P2P UDP from specified IPs" : "Allow Besu P2P UDP from anywhere (WARNING: Not secure for production)"
|
||||
}
|
||||
}
|
||||
|
||||
# Allow RPC HTTP (from Nginx proxy via VPN/ExpressRoute)
|
||||
# NOTE: Backend VMs use private IPs only. Nginx proxy connects via VPN/ExpressRoute.
|
||||
# Restrict to Nginx proxy private IP subnet once VPN is deployed.
|
||||
# Only for Besu nodes, not Nginx proxy
|
||||
dynamic "security_rule" {
|
||||
for_each = var.enable_besu_rules ? [1] : []
|
||||
content {
|
||||
name = "AllowRPCHTTP"
|
||||
priority = 1003
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8545"
|
||||
source_address_prefix = length(var.allowed_rpc_ips) > 0 ? null : "*"
|
||||
source_address_prefixes = length(var.allowed_rpc_ips) > 0 ? var.allowed_rpc_ips : null
|
||||
destination_address_prefix = "*"
|
||||
description = length(var.allowed_rpc_ips) > 0 ? "Allow RPC HTTP from specified IPs (Nginx proxy subnet or Cloudflare Tunnel IPs)" : "Allow RPC HTTP from anywhere (WARNING: Not secure for production)"
|
||||
}
|
||||
}
|
||||
|
||||
# Allow RPC WebSocket (from Nginx proxy via VPN/ExpressRoute)
|
||||
# NOTE: Backend VMs use private IPs only. Nginx proxy connects via VPN/ExpressRoute.
|
||||
# Restrict to Nginx proxy private IP subnet once VPN is deployed.
|
||||
# Only for Besu nodes, not Nginx proxy
|
||||
dynamic "security_rule" {
|
||||
for_each = var.enable_besu_rules ? [1] : []
|
||||
content {
|
||||
name = "AllowRPCWS"
|
||||
priority = 1004
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "8546"
|
||||
source_address_prefix = length(var.allowed_rpc_ips) > 0 ? null : "*"
|
||||
source_address_prefixes = length(var.allowed_rpc_ips) > 0 ? var.allowed_rpc_ips : null
|
||||
destination_address_prefix = "*"
|
||||
description = length(var.allowed_rpc_ips) > 0 ? "Allow RPC WebSocket from specified IPs (Nginx proxy subnet or Cloudflare Tunnel IPs)" : "Allow RPC WebSocket from anywhere (WARNING: Not secure for production)"
|
||||
}
|
||||
}
|
||||
|
||||
# Allow Metrics (only for Besu nodes, not Nginx proxy)
|
||||
dynamic "security_rule" {
|
||||
for_each = var.enable_besu_rules ? [1] : []
|
||||
content {
|
||||
name = "AllowMetrics"
|
||||
priority = 1005
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "9545"
|
||||
source_address_prefix = length(var.allowed_metrics_ips) > 0 ? null : "*"
|
||||
source_address_prefixes = length(var.allowed_metrics_ips) > 0 ? var.allowed_metrics_ips : null
|
||||
destination_address_prefix = "*"
|
||||
description = length(var.allowed_metrics_ips) > 0 ? "Allow Prometheus metrics from specified IPs" : "Allow Prometheus metrics from anywhere (WARNING: Not secure for production)"
|
||||
}
|
||||
}
|
||||
|
||||
# Allow outbound internet access
|
||||
security_rule {
|
||||
name = "AllowOutboundInternet"
|
||||
priority = 2000
|
||||
direction = "Outbound"
|
||||
access = "Allow"
|
||||
protocol = "*"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
description = "Allow outbound internet access"
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Network-Security"
|
||||
})
|
||||
}
|
||||
|
||||
# Associate NSG with VM subnet (only if subnet_nsg_enabled is true)
|
||||
# For Nginx proxy subnet, we don't need subnet-level NSG (NIC-level NSG is sufficient)
|
||||
resource "azurerm_subnet_network_security_group_association" "vm" {
|
||||
count = var.subnet_nsg_enabled ? 1 : 0
|
||||
subnet_id = azurerm_subnet.vm.id
|
||||
network_security_group_id = azurerm_network_security_group.vm.id
|
||||
}
|
||||
|
||||
# Outputs
|
||||
output "vm_subnet_id" {
|
||||
value = azurerm_subnet.vm.id
|
||||
description = "Subnet ID for VMs"
|
||||
}
|
||||
|
||||
output "vm_nsg_id" {
|
||||
value = azurerm_network_security_group.vm.id
|
||||
description = "Network Security Group ID for VMs"
|
||||
}
|
||||
|
||||
output "vnet_id" {
|
||||
value = azurerm_virtual_network.main.id
|
||||
description = "Virtual Network ID"
|
||||
}
|
||||
|
||||
76
terraform/modules/networking-vm/variables.tf
Normal file
76
terraform/modules/networking-vm/variables.tf
Normal file
@@ -0,0 +1,76 @@
|
||||
# Variables for Networking-VM Module
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "allowed_ssh_ips" {
|
||||
description = "List of IP addresses/CIDR blocks allowed for SSH access. If empty, allows from anywhere (not recommended for production)."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "allowed_rpc_ips" {
|
||||
description = "List of IP addresses/CIDR blocks allowed for RPC access (Nginx proxy subnet or Cloudflare Tunnel IPs). If empty, allows from anywhere (not recommended for production)."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "allowed_p2p_ips" {
|
||||
description = "List of IP addresses/CIDR blocks allowed for P2P access. If empty, allows from anywhere (not recommended for production)."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "allowed_metrics_ips" {
|
||||
description = "List of IP addresses/CIDR blocks allowed for metrics access. If empty, allows from anywhere (not recommended for production)."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "subnet_nsg_enabled" {
|
||||
description = "Whether to attach NSG to subnet. Set to false for Nginx proxy subnet (uses NIC-level NSG instead)."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "enable_besu_rules" {
|
||||
description = "Whether to enable Besu-specific rules (P2P/RPC/Metrics). Set to false for Nginx proxy subnet."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "vnet_address_space" {
|
||||
description = "Address space for the Virtual Network (e.g., 10.0.0.0/16). Use region-specific ranges if VPN/ExpressRoute will connect regions."
|
||||
type = string
|
||||
default = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
variable "subnet_address_prefix" {
|
||||
description = "Address prefix for the VM subnet (e.g., 10.0.1.0/24)"
|
||||
type = string
|
||||
default = "10.0.1.0/24"
|
||||
}
|
||||
|
||||
228
terraform/modules/networking/appgateway-complete.tf.disabled
Normal file
228
terraform/modules/networking/appgateway-complete.tf.disabled
Normal file
@@ -0,0 +1,228 @@
|
||||
# Complete Application Gateway Configuration
|
||||
# This file provides a complete Application Gateway setup with backend pools, listeners, and routing rules
|
||||
# Note: This requires AKS service IPs to be known. For dynamic configuration, use AGIC (Application Gateway Ingress Controller)
|
||||
|
||||
# Backend Address Pool for RPC nodes
|
||||
resource "azurerm_application_gateway_backend_address_pool" "rpc" {
|
||||
name = "${var.cluster_name}-rpc-backend-pool"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
|
||||
# IP addresses will be populated after AKS deployment
|
||||
# Use data source or variables to get service IPs
|
||||
# fqdns = [var.rpc_service_fqdn]
|
||||
# ip_addresses = var.rpc_service_ips
|
||||
}
|
||||
|
||||
# Backend Address Pool for Blockscout
|
||||
resource "azurerm_application_gateway_backend_address_pool" "blockscout" {
|
||||
name = "${var.cluster_name}-blockscout-backend-pool"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
}
|
||||
|
||||
# HTTP Settings for RPC
|
||||
resource "azurerm_application_gateway_backend_http_settings" "rpc" {
|
||||
name = "${var.cluster_name}-rpc-http-settings"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
port = 8545
|
||||
protocol = "Http"
|
||||
cookie_based_affinity = "Disabled"
|
||||
request_timeout = 60
|
||||
probe_name = azurerm_application_gateway_probe.rpc.name
|
||||
pick_host_name_from_backend_address = false
|
||||
}
|
||||
|
||||
# HTTP Settings for Blockscout
|
||||
resource "azurerm_application_gateway_backend_http_settings" "blockscout" {
|
||||
name = "${var.cluster_name}-blockscout-http-settings"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
port = 4000
|
||||
protocol = "Http"
|
||||
cookie_based_affinity = "Disabled"
|
||||
request_timeout = 60
|
||||
probe_name = azurerm_application_gateway_probe.blockscout.name
|
||||
pick_host_name_from_backend_address = false
|
||||
}
|
||||
|
||||
# Health Probe for RPC
|
||||
resource "azurerm_application_gateway_probe" "rpc" {
|
||||
name = "${var.cluster_name}-rpc-probe"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
|
||||
protocol = "Http"
|
||||
path = "/"
|
||||
host = "127.0.0.1"
|
||||
interval = 30
|
||||
timeout = 30
|
||||
unhealthy_threshold = 3
|
||||
minimum_servers = 1
|
||||
|
||||
match {
|
||||
status_code = ["200-399"]
|
||||
}
|
||||
}
|
||||
|
||||
# Health Probe for Blockscout
|
||||
resource "azurerm_application_gateway_probe" "blockscout" {
|
||||
name = "${var.cluster_name}-blockscout-probe"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
|
||||
protocol = "Http"
|
||||
path = "/"
|
||||
host = "127.0.0.1"
|
||||
interval = 30
|
||||
timeout = 30
|
||||
unhealthy_threshold = 3
|
||||
minimum_servers = 1
|
||||
|
||||
match {
|
||||
status_code = ["200-399"]
|
||||
}
|
||||
}
|
||||
|
||||
# HTTP Listener for RPC (HTTP)
|
||||
resource "azurerm_application_gateway_http_listener" "rpc_http" {
|
||||
name = "${var.cluster_name}-rpc-http-listener"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
frontend_ip_configuration_name = "appGatewayFrontendIP"
|
||||
frontend_port_name = "http"
|
||||
protocol = "Http"
|
||||
host_name = var.rpc_hostname
|
||||
}
|
||||
|
||||
# HTTPS Listener for RPC (HTTPS)
|
||||
resource "azurerm_application_gateway_http_listener" "rpc_https" {
|
||||
name = "${var.cluster_name}-rpc-https-listener"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
frontend_ip_configuration_name = "appGatewayFrontendIP"
|
||||
frontend_port_name = "https"
|
||||
protocol = "Https"
|
||||
ssl_certificate_name = azurerm_application_gateway_ssl_certificate.rpc.name
|
||||
host_name = var.rpc_hostname
|
||||
}
|
||||
|
||||
# HTTP Listener for Blockscout (HTTP)
|
||||
resource "azurerm_application_gateway_http_listener" "blockscout_http" {
|
||||
name = "${var.cluster_name}-blockscout-http-listener"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
frontend_ip_configuration_name = "appGatewayFrontendIP"
|
||||
frontend_port_name = "http"
|
||||
protocol = "Http"
|
||||
host_name = var.blockscout_hostname
|
||||
}
|
||||
|
||||
# HTTPS Listener for Blockscout (HTTPS)
|
||||
resource "azurerm_application_gateway_http_listener" "blockscout_https" {
|
||||
name = "${var.cluster_name}-blockscout-https-listener"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
frontend_ip_configuration_name = "appGatewayFrontendIP"
|
||||
frontend_port_name = "https"
|
||||
protocol = "Https"
|
||||
ssl_certificate_name = azurerm_application_gateway_ssl_certificate.blockscout.name
|
||||
host_name = var.blockscout_hostname
|
||||
}
|
||||
|
||||
# SSL Certificate for RPC (use Azure Key Vault or upload certificate)
|
||||
resource "azurerm_application_gateway_ssl_certificate" "rpc" {
|
||||
name = "${var.cluster_name}-rpc-ssl-cert"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
|
||||
# Option 1: Use Key Vault certificate
|
||||
# key_vault_secret_id = var.rpc_ssl_certificate_key_vault_secret_id
|
||||
|
||||
# Option 2: Upload certificate data (not recommended for production)
|
||||
# data = var.rpc_ssl_certificate_data
|
||||
# password = var.rpc_ssl_certificate_password
|
||||
}
|
||||
|
||||
# SSL Certificate for Blockscout
|
||||
resource "azurerm_application_gateway_ssl_certificate" "blockscout" {
|
||||
name = "${var.cluster_name}-blockscout-ssl-cert"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
|
||||
# Option 1: Use Key Vault certificate
|
||||
# key_vault_secret_id = var.blockscout_ssl_certificate_key_vault_secret_id
|
||||
|
||||
# Option 2: Upload certificate data
|
||||
# data = var.blockscout_ssl_certificate_data
|
||||
# password = var.blockscout_ssl_certificate_password
|
||||
}
|
||||
|
||||
# Request Routing Rule for RPC HTTP
|
||||
resource "azurerm_application_gateway_request_routing_rule" "rpc_http" {
|
||||
name = "${var.cluster_name}-rpc-http-rule"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
rule_type = "Basic"
|
||||
http_listener_name = azurerm_application_gateway_http_listener.rpc_http.name
|
||||
backend_address_pool_name = azurerm_application_gateway_backend_address_pool.rpc.name
|
||||
backend_http_settings_name = azurerm_application_gateway_backend_http_settings.rpc.name
|
||||
}
|
||||
|
||||
# Request Routing Rule for RPC HTTPS
|
||||
resource "azurerm_application_gateway_request_routing_rule" "rpc_https" {
|
||||
name = "${var.cluster_name}-rpc-https-rule"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
rule_type = "Basic"
|
||||
http_listener_name = azurerm_application_gateway_http_listener.rpc_https.name
|
||||
backend_address_pool_name = azurerm_application_gateway_backend_address_pool.rpc.name
|
||||
backend_http_settings_name = azurerm_application_gateway_backend_http_settings.rpc.name
|
||||
}
|
||||
|
||||
# Request Routing Rule for Blockscout HTTP
|
||||
resource "azurerm_application_gateway_request_routing_rule" "blockscout_http" {
|
||||
name = "${var.cluster_name}-blockscout-http-rule"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
rule_type = "Basic"
|
||||
http_listener_name = azurerm_application_gateway_http_listener.blockscout_http.name
|
||||
backend_address_pool_name = azurerm_application_gateway_backend_address_pool.blockscout.name
|
||||
backend_http_settings_name = azurerm_application_gateway_backend_http_settings.blockscout.name
|
||||
}
|
||||
|
||||
# Request Routing Rule for Blockscout HTTPS
|
||||
resource "azurerm_application_gateway_request_routing_rule" "blockscout_https" {
|
||||
name = "${var.cluster_name}-blockscout-https-rule"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
rule_type = "Basic"
|
||||
http_listener_name = azurerm_application_gateway_http_listener.blockscout_https.name
|
||||
backend_address_pool_name = azurerm_application_gateway_backend_address_pool.blockscout.name
|
||||
backend_http_settings_name = azurerm_application_gateway_backend_http_settings.blockscout.name
|
||||
}
|
||||
|
||||
# Redirect HTTP to HTTPS for RPC
|
||||
resource "azurerm_application_gateway_redirect_configuration" "rpc_http_redirect" {
|
||||
name = "${var.cluster_name}-rpc-http-redirect"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
|
||||
redirect_type = "Permanent"
|
||||
target_listener_name = azurerm_application_gateway_http_listener.rpc_https.name
|
||||
include_path = true
|
||||
include_query_string = true
|
||||
}
|
||||
|
||||
# Redirect HTTP to HTTPS for Blockscout
|
||||
resource "azurerm_application_gateway_redirect_configuration" "blockscout_http_redirect" {
|
||||
name = "${var.cluster_name}-blockscout-http-redirect"
|
||||
resource_group_name = var.resource_group_name
|
||||
application_gateway_name = azurerm_application_gateway.main.name
|
||||
|
||||
redirect_type = "Permanent"
|
||||
target_listener_name = azurerm_application_gateway_http_listener.blockscout_https.name
|
||||
include_path = true
|
||||
include_query_string = true
|
||||
}
|
||||
319
terraform/modules/networking/main.tf
Normal file
319
terraform/modules/networking/main.tf
Normal file
@@ -0,0 +1,319 @@
|
||||
# Networking Module for Azure
|
||||
# Creates VNet, subnets, NSGs, and Application Gateway
|
||||
|
||||
# Virtual Network
|
||||
resource "azurerm_virtual_network" "main" {
|
||||
name = "${var.cluster_name}-vnet"
|
||||
address_space = ["10.0.0.0/16"]
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Networking"
|
||||
})
|
||||
}
|
||||
|
||||
# Subnet for AKS
|
||||
resource "azurerm_subnet" "aks" {
|
||||
name = "${var.cluster_name}-aks-subnet"
|
||||
resource_group_name = var.resource_group_name
|
||||
virtual_network_name = azurerm_virtual_network.main.name
|
||||
address_prefixes = ["10.0.1.0/24"]
|
||||
|
||||
# Required for AKS
|
||||
service_endpoints = ["Microsoft.Storage", "Microsoft.KeyVault"]
|
||||
}
|
||||
|
||||
# Subnet for validators (private)
|
||||
resource "azurerm_subnet" "validators" {
|
||||
name = "${var.cluster_name}-validators-subnet"
|
||||
resource_group_name = var.resource_group_name
|
||||
virtual_network_name = azurerm_virtual_network.main.name
|
||||
address_prefixes = ["10.0.2.0/24"]
|
||||
|
||||
# No service endpoints for private subnet
|
||||
}
|
||||
|
||||
# Subnet for sentries (public P2P)
|
||||
resource "azurerm_subnet" "sentries" {
|
||||
name = "${var.cluster_name}-sentries-subnet"
|
||||
resource_group_name = var.resource_group_name
|
||||
virtual_network_name = azurerm_virtual_network.main.name
|
||||
address_prefixes = ["10.0.3.0/24"]
|
||||
}
|
||||
|
||||
# Subnet for RPC nodes (DMZ)
|
||||
resource "azurerm_subnet" "rpc" {
|
||||
name = "${var.cluster_name}-rpc-subnet"
|
||||
resource_group_name = var.resource_group_name
|
||||
virtual_network_name = azurerm_virtual_network.main.name
|
||||
address_prefixes = ["10.0.4.0/24"]
|
||||
|
||||
service_endpoints = ["Microsoft.Storage"]
|
||||
}
|
||||
|
||||
# Subnet for Application Gateway
|
||||
resource "azurerm_subnet" "appgateway" {
|
||||
name = "${var.cluster_name}-appgateway-subnet"
|
||||
resource_group_name = var.resource_group_name
|
||||
virtual_network_name = azurerm_virtual_network.main.name
|
||||
address_prefixes = ["10.0.5.0/24"]
|
||||
}
|
||||
|
||||
# Network Security Group for validators (private, no public access)
|
||||
resource "azurerm_network_security_group" "validators" {
|
||||
name = "${var.cluster_name}-validators-nsg"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
# Allow internal communication only
|
||||
security_rule {
|
||||
name = "AllowInternal"
|
||||
priority = 1000
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "*"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefix = "10.0.0.0/16"
|
||||
destination_address_prefix = "*"
|
||||
}
|
||||
|
||||
# Deny all other traffic
|
||||
security_rule {
|
||||
name = "DenyAll"
|
||||
priority = 4096
|
||||
direction = "Inbound"
|
||||
access = "Deny"
|
||||
protocol = "*"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Validators-NSG"
|
||||
})
|
||||
}
|
||||
|
||||
# Network Security Group for sentries (P2P port 30303)
|
||||
resource "azurerm_network_security_group" "sentries" {
|
||||
name = "${var.cluster_name}-sentries-nsg"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
# Allow P2P (30303 TCP/UDP)
|
||||
security_rule {
|
||||
name = "AllowP2P"
|
||||
priority = 1000
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "30303"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
}
|
||||
|
||||
security_rule {
|
||||
name = "AllowP2PUDP"
|
||||
priority = 1001
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Udp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "30303"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
}
|
||||
|
||||
# Allow internal communication
|
||||
security_rule {
|
||||
name = "AllowInternal"
|
||||
priority = 2000
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "*"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefix = "10.0.0.0/16"
|
||||
destination_address_prefix = "*"
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Sentries-NSG"
|
||||
})
|
||||
}
|
||||
|
||||
# Network Security Group for RPC (HTTPS only)
|
||||
resource "azurerm_network_security_group" "rpc" {
|
||||
name = "${var.cluster_name}-rpc-nsg"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
# Allow HTTPS (443)
|
||||
security_rule {
|
||||
name = "AllowHTTPS"
|
||||
priority = 1000
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "443"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
}
|
||||
|
||||
# Allow HTTP (for redirect to HTTPS)
|
||||
security_rule {
|
||||
name = "AllowHTTP"
|
||||
priority = 1001
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "80"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
}
|
||||
|
||||
# Allow internal communication
|
||||
security_rule {
|
||||
name = "AllowInternal"
|
||||
priority = 2000
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "*"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefix = "10.0.0.0/16"
|
||||
destination_address_prefix = "*"
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "RPC-NSG"
|
||||
})
|
||||
}
|
||||
|
||||
# Associate NSGs with subnets
|
||||
resource "azurerm_subnet_network_security_group_association" "validators" {
|
||||
subnet_id = azurerm_subnet.validators.id
|
||||
network_security_group_id = azurerm_network_security_group.validators.id
|
||||
}
|
||||
|
||||
resource "azurerm_subnet_network_security_group_association" "sentries" {
|
||||
subnet_id = azurerm_subnet.sentries.id
|
||||
network_security_group_id = azurerm_network_security_group.sentries.id
|
||||
}
|
||||
|
||||
resource "azurerm_subnet_network_security_group_association" "rpc" {
|
||||
subnet_id = azurerm_subnet.rpc.id
|
||||
network_security_group_id = azurerm_network_security_group.rpc.id
|
||||
}
|
||||
|
||||
# Public IP for Application Gateway
|
||||
resource "azurerm_public_ip" "appgateway" {
|
||||
name = "${var.cluster_name}-appgateway-ip"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
allocation_method = "Static"
|
||||
sku = "Standard"
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Application-Gateway-IP"
|
||||
})
|
||||
}
|
||||
|
||||
# Application Gateway (simplified - full config would include backend pools, listeners, etc.)
|
||||
resource "azurerm_application_gateway" "main" {
|
||||
name = "${var.cluster_name}-appgateway"
|
||||
resource_group_name = var.resource_group_name
|
||||
location = var.location
|
||||
|
||||
sku {
|
||||
name = "WAF_v2"
|
||||
tier = "WAF_v2"
|
||||
capacity = 2
|
||||
}
|
||||
|
||||
gateway_ip_configuration {
|
||||
name = "appGatewayIpConfig"
|
||||
subnet_id = azurerm_subnet.appgateway.id
|
||||
}
|
||||
|
||||
frontend_port {
|
||||
name = "http"
|
||||
port = 80
|
||||
}
|
||||
|
||||
frontend_port {
|
||||
name = "https"
|
||||
port = 443
|
||||
}
|
||||
|
||||
frontend_ip_configuration {
|
||||
name = "appGatewayFrontendIP"
|
||||
public_ip_address_id = azurerm_public_ip.appgateway.id
|
||||
}
|
||||
|
||||
# SSL Policy (required for WAF_v2)
|
||||
ssl_policy {
|
||||
policy_type = "Predefined"
|
||||
policy_name = "AppGwSslPolicy20220101" # Modern TLS policy
|
||||
}
|
||||
|
||||
# WAF configuration
|
||||
waf_configuration {
|
||||
enabled = true
|
||||
firewall_mode = "Prevention"
|
||||
rule_set_type = "OWASP"
|
||||
rule_set_version = "3.2"
|
||||
file_upload_limit_mb = 100
|
||||
request_body_check = true
|
||||
max_request_body_size_kb = 128
|
||||
}
|
||||
|
||||
# Minimal required blocks - will be configured after AKS deployment
|
||||
backend_address_pool {
|
||||
name = "default-backend-pool"
|
||||
# Backend IPs will be added after AKS services are deployed
|
||||
}
|
||||
|
||||
backend_http_settings {
|
||||
name = "default-http-settings"
|
||||
cookie_based_affinity = "Disabled"
|
||||
port = 80
|
||||
protocol = "Http"
|
||||
request_timeout = 20
|
||||
}
|
||||
|
||||
http_listener {
|
||||
name = "default-listener"
|
||||
frontend_ip_configuration_name = "appGatewayFrontendIP"
|
||||
frontend_port_name = "http"
|
||||
protocol = "Http"
|
||||
}
|
||||
|
||||
request_routing_rule {
|
||||
name = "default-routing-rule"
|
||||
rule_type = "Basic"
|
||||
priority = 100
|
||||
http_listener_name = "default-listener"
|
||||
backend_address_pool_name = "default-backend-pool"
|
||||
backend_http_settings_name = "default-http-settings"
|
||||
}
|
||||
|
||||
# Note: Backend pools, HTTP settings, probes, listeners, and routing rules
|
||||
# should be reconfigured after AKS deployment when service IPs are known.
|
||||
# Use Azure CLI or Terraform to update these resources after initial deployment.
|
||||
#
|
||||
# For production, consider using Azure Application Gateway Ingress Controller (AGIC)
|
||||
# which automatically configures the Application Gateway based on Kubernetes ingress resources.
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Application-Gateway"
|
||||
})
|
||||
}
|
||||
|
||||
# Outputs are defined in outputs.tf
|
||||
48
terraform/modules/networking/outputs.tf
Normal file
48
terraform/modules/networking/outputs.tf
Normal file
@@ -0,0 +1,48 @@
|
||||
output "vnet_id" {
|
||||
value = azurerm_virtual_network.main.id
|
||||
}
|
||||
|
||||
output "aks_subnet_id" {
|
||||
value = azurerm_subnet.aks.id
|
||||
}
|
||||
|
||||
output "node_subnet_id" {
|
||||
value = azurerm_subnet.aks.id
|
||||
}
|
||||
|
||||
output "validators_subnet_id" {
|
||||
value = azurerm_subnet.validators.id
|
||||
}
|
||||
|
||||
output "sentries_subnet_id" {
|
||||
value = azurerm_subnet.sentries.id
|
||||
}
|
||||
|
||||
output "rpc_subnet_id" {
|
||||
value = azurerm_subnet.rpc.id
|
||||
}
|
||||
|
||||
output "application_gateway_id" {
|
||||
value = azurerm_application_gateway.main.id
|
||||
}
|
||||
|
||||
output "application_gateway_fqdn" {
|
||||
value = azurerm_public_ip.appgateway.fqdn
|
||||
description = "FQDN of the Application Gateway public IP"
|
||||
}
|
||||
|
||||
output "validators_nsg_id" {
|
||||
value = azurerm_network_security_group.validators.id
|
||||
description = "Network Security Group ID for validators"
|
||||
}
|
||||
|
||||
output "sentries_nsg_id" {
|
||||
value = azurerm_network_security_group.sentries.id
|
||||
description = "Network Security Group ID for sentries"
|
||||
}
|
||||
|
||||
output "rpc_nsg_id" {
|
||||
value = azurerm_network_security_group.rpc.id
|
||||
description = "Network Security Group ID for RPC nodes"
|
||||
}
|
||||
|
||||
28
terraform/modules/networking/variables.tf
Normal file
28
terraform/modules/networking/variables.tf
Normal file
@@ -0,0 +1,28 @@
|
||||
# Variables for Networking Module
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the AKS cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
default = "prod"
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to networking resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
91
terraform/modules/networking/waf-rules.tf
Normal file
91
terraform/modules/networking/waf-rules.tf
Normal file
@@ -0,0 +1,91 @@
|
||||
# WAF Rules for Application Gateway
|
||||
# Configures Web Application Firewall rules for security
|
||||
|
||||
resource "azurerm_web_application_firewall_policy" "main" {
|
||||
name = "${var.cluster_name}-waf-policy"
|
||||
resource_group_name = var.resource_group_name
|
||||
location = var.location
|
||||
|
||||
# Policy settings
|
||||
policy_settings {
|
||||
enabled = true
|
||||
mode = var.environment == "prod" ? "Prevention" : "Detection"
|
||||
request_body_check = true
|
||||
max_request_body_size_in_kb = 128
|
||||
file_upload_limit_in_mb = 100
|
||||
}
|
||||
|
||||
# Managed rules (use default OWASP + BotManager rule sets, no deprecated overrides)
|
||||
managed_rules {
|
||||
# OWASP Core Rule Set
|
||||
managed_rule_set {
|
||||
type = "OWASP"
|
||||
version = "3.2"
|
||||
}
|
||||
|
||||
# Bot Protection
|
||||
managed_rule_set {
|
||||
type = "Microsoft_BotManagerRuleSet"
|
||||
version = "1.0"
|
||||
}
|
||||
}
|
||||
|
||||
# Custom rules
|
||||
# Note: RateLimitRule requires group_by_user_session which may not be supported in current provider version
|
||||
# Uncomment and configure when provider supports it, or use Azure Portal/CLI to configure rate limiting
|
||||
# custom_rules {
|
||||
# name = "BlockHighRateRequests"
|
||||
# priority = 1
|
||||
# rule_type = "RateLimitRule"
|
||||
# action = "Block"
|
||||
# rate_limit_threshold = 100
|
||||
# }
|
||||
|
||||
# Custom rule to block suspicious IPs (only if IPs are provided)
|
||||
# Note: If blocked_ips is empty, this rule is effectively disabled
|
||||
dynamic "custom_rules" {
|
||||
for_each = length(var.blocked_ips) > 0 ? [1] : []
|
||||
content {
|
||||
name = "BlockSuspiciousIPs"
|
||||
priority = 2
|
||||
rule_type = "MatchRule"
|
||||
action = "Block"
|
||||
|
||||
match_conditions {
|
||||
match_variables {
|
||||
variable_name = "RemoteAddr"
|
||||
}
|
||||
operator = "IPMatch"
|
||||
match_values = var.blocked_ips
|
||||
negation_condition = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
custom_rules {
|
||||
name = "AllowSpecificMethods"
|
||||
priority = 3
|
||||
rule_type = "MatchRule"
|
||||
action = "Allow"
|
||||
|
||||
match_conditions {
|
||||
match_variables {
|
||||
variable_name = "RequestMethod"
|
||||
}
|
||||
operator = "Contains"
|
||||
match_values = ["GET", "POST", "OPTIONS"]
|
||||
negation_condition = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Variables
|
||||
variable "blocked_ips" {
|
||||
description = "List of IP addresses/CIDR blocks to block"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
# Variables environment, cluster_name, resource_group_name, and location
|
||||
# are defined in variables.tf
|
||||
|
||||
171
terraform/modules/nginx-proxy/main.tf
Normal file
171
terraform/modules/nginx-proxy/main.tf
Normal file
@@ -0,0 +1,171 @@
|
||||
# Nginx Proxy Server Module
|
||||
# Deploys an Nginx reverse proxy to route Cloudflare traffic to backend VMs across regions
|
||||
|
||||
# Network Interface for Nginx Proxy
|
||||
resource "azurerm_network_interface" "nginx_proxy" {
|
||||
name = "${var.cluster_name}-nginx-nic"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
ip_configuration {
|
||||
name = "internal"
|
||||
subnet_id = var.subnet_id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
public_ip_address_id = azurerm_public_ip.nginx_proxy.id
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Nginx-Proxy"
|
||||
})
|
||||
}
|
||||
|
||||
# Public IP for Nginx Proxy
|
||||
resource "azurerm_public_ip" "nginx_proxy" {
|
||||
name = "${var.cluster_name}-nginx-ip"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
allocation_method = "Static"
|
||||
sku = "Standard"
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Nginx-Proxy"
|
||||
})
|
||||
}
|
||||
|
||||
# Network Security Group for Nginx Proxy
|
||||
resource "azurerm_network_security_group" "nginx_proxy" {
|
||||
name = "${var.cluster_name}-nginx-nsg"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
# Allow HTTP from Cloudflare
|
||||
security_rule {
|
||||
name = "AllowHTTP"
|
||||
priority = 1000
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "80"
|
||||
source_address_prefix = "*" # TODO: Restrict to Cloudflare IP ranges
|
||||
destination_address_prefix = "*"
|
||||
description = "Allow HTTP from Cloudflare"
|
||||
}
|
||||
|
||||
# Allow HTTPS from Cloudflare
|
||||
security_rule {
|
||||
name = "AllowHTTPS"
|
||||
priority = 1001
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "443"
|
||||
source_address_prefix = "*" # TODO: Restrict to Cloudflare IP ranges
|
||||
destination_address_prefix = "*"
|
||||
description = "Allow HTTPS from Cloudflare"
|
||||
}
|
||||
|
||||
# Allow SSH for management
|
||||
security_rule {
|
||||
name = "AllowSSH"
|
||||
priority = 1002
|
||||
direction = "Inbound"
|
||||
access = "Allow"
|
||||
protocol = "Tcp"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "22"
|
||||
source_address_prefix = "*" # TODO: Restrict to admin IPs
|
||||
destination_address_prefix = "*"
|
||||
description = "Allow SSH for management"
|
||||
}
|
||||
|
||||
# Allow outbound to backend VMs
|
||||
security_rule {
|
||||
name = "AllowOutboundBackend"
|
||||
priority = 2000
|
||||
direction = "Outbound"
|
||||
access = "Allow"
|
||||
protocol = "*"
|
||||
source_port_range = "*"
|
||||
destination_port_range = "*"
|
||||
source_address_prefix = "*"
|
||||
destination_address_prefix = "*"
|
||||
description = "Allow outbound to backend VMs"
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Network-Security"
|
||||
})
|
||||
}
|
||||
|
||||
# Associate NSG with NIC
|
||||
resource "azurerm_network_interface_security_group_association" "nginx_proxy" {
|
||||
network_interface_id = azurerm_network_interface.nginx_proxy.id
|
||||
network_security_group_id = azurerm_network_security_group.nginx_proxy.id
|
||||
}
|
||||
|
||||
# Virtual Machine for Nginx Proxy
|
||||
resource "azurerm_linux_virtual_machine" "nginx_proxy" {
|
||||
name = "${var.cluster_name}-nginx"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
size = "Standard_D4s_v4" # 4 vCPUs - x64 architecture (compatible with x64 Ubuntu)
|
||||
admin_username = var.admin_username
|
||||
|
||||
network_interface_ids = [azurerm_network_interface.nginx_proxy.id]
|
||||
|
||||
admin_ssh_key {
|
||||
username = var.admin_username
|
||||
public_key = var.ssh_public_key
|
||||
}
|
||||
|
||||
os_disk {
|
||||
name = "${var.cluster_name}-nginx-disk"
|
||||
caching = "ReadWrite"
|
||||
storage_account_type = "Premium_LRS"
|
||||
disk_size_gb = 128
|
||||
}
|
||||
|
||||
source_image_reference {
|
||||
publisher = "Canonical"
|
||||
offer = "0001-com-ubuntu-server-jammy"
|
||||
sku = "22_04-lts-gen2"
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
identity {
|
||||
type = "SystemAssigned"
|
||||
}
|
||||
|
||||
custom_data = base64encode(templatefile("${path.module}/nginx-cloud-init.yaml", {
|
||||
backend_vms = var.backend_vms
|
||||
admin_username = var.admin_username
|
||||
}))
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Nginx-Proxy"
|
||||
})
|
||||
}
|
||||
|
||||
# Outputs
|
||||
output "fqdn" {
|
||||
value = azurerm_public_ip.nginx_proxy.fqdn
|
||||
description = "FQDN of the Nginx proxy (if DNS configured)"
|
||||
}
|
||||
|
||||
output "public_ip" {
|
||||
value = azurerm_public_ip.nginx_proxy.ip_address
|
||||
description = "Public IP address of the Nginx proxy"
|
||||
}
|
||||
|
||||
output "private_ip" {
|
||||
value = azurerm_network_interface.nginx_proxy.private_ip_address
|
||||
description = "Private IP address of the Nginx proxy"
|
||||
}
|
||||
|
||||
output "principal_id" {
|
||||
value = azurerm_linux_virtual_machine.nginx_proxy.identity[0].principal_id
|
||||
description = "Managed Identity principal ID for Nginx proxy (for Key Vault access)"
|
||||
}
|
||||
|
||||
240
terraform/modules/nginx-proxy/nginx-cloud-init.yaml
Normal file
240
terraform/modules/nginx-proxy/nginx-cloud-init.yaml
Normal file
@@ -0,0 +1,240 @@
|
||||
#cloud-config
|
||||
# Cloud-init configuration for Nginx Proxy Server
|
||||
# Routes Cloudflare traffic to backend Besu VMs across 5 US regions
|
||||
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
|
||||
packages:
|
||||
- nginx
|
||||
- certbot
|
||||
- python3-certbot-nginx
|
||||
- curl
|
||||
- jq
|
||||
- wget
|
||||
- unzip
|
||||
- cloudflared
|
||||
|
||||
write_files:
|
||||
- path: /etc/nginx/nginx.conf
|
||||
content: |
|
||||
user www-data;
|
||||
worker_processes auto;
|
||||
pid /run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
use epoll;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 20M;
|
||||
|
||||
# Gzip compression
|
||||
gzip on;
|
||||
gzip_vary on;
|
||||
gzip_proxied any;
|
||||
gzip_comp_level 6;
|
||||
gzip_types text/plain text/css text/xml text/javascript application/json application/javascript application/xml+rss;
|
||||
|
||||
# Upstream backend servers (load balancing across 5 US regions)
|
||||
# NOTE: Backend VMs use private IPs only. Cross-region connectivity requires VPN/ExpressRoute
|
||||
# or Cloudflare Tunnel agents on each backend VM. For Phase 1, configure VPN or use
|
||||
# Cloudflare Tunnel on each backend VM to expose services.
|
||||
upstream besu_rpc_backend {
|
||||
least_conn; # Load balancing method
|
||||
|
||||
# Backend VMs from all regions (use private IPs - requires VPN/ExpressRoute for cross-region)
|
||||
%{ if length([for region, vms in backend_vms : length(vms.private_ips) > 0 ? 1 : 0]) > 0 ~}
|
||||
${join("\n ", [for region, vms in backend_vms : length(vms.private_ips) > 0 ? join("\n ", [for idx, ip in vms.private_ips : "server ${ip}:8545 max_fails=3 fail_timeout=30s;"]) : "# No backend VMs in ${region}"])}
|
||||
%{ else ~}
|
||||
# No backend VMs configured - add default backend or configure backends
|
||||
server 127.0.0.1:8545 down; # Placeholder - will return 502
|
||||
%{ endif ~}
|
||||
}
|
||||
|
||||
upstream besu_ws_backend {
|
||||
least_conn;
|
||||
|
||||
# Backend VMs from all regions (use private IPs - requires VPN/ExpressRoute for cross-region)
|
||||
%{ if length([for region, vms in backend_vms : length(vms.private_ips) > 0 ? 1 : 0]) > 0 ~}
|
||||
${join("\n ", [for region, vms in backend_vms : length(vms.private_ips) > 0 ? join("\n ", [for idx, ip in vms.private_ips : "server ${ip}:8546 max_fails=3 fail_timeout=30s;"]) : "# No backend VMs in ${region}"])}
|
||||
%{ else ~}
|
||||
# No backend VMs configured - add default backend or configure backends
|
||||
server 127.0.0.1:8546 down; # Placeholder - will return 502
|
||||
%{ endif ~}
|
||||
}
|
||||
|
||||
# HTTP server (redirect to HTTPS)
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
root /var/www/html;
|
||||
}
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
# HTTPS server
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
server_name _;
|
||||
|
||||
# SSL configuration (will be updated by certbot)
|
||||
ssl_certificate /etc/letsencrypt/live/_/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/_/privkey.pem;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
# Security headers
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
|
||||
# RPC HTTP endpoint
|
||||
location /rpc {
|
||||
proxy_pass http://besu_rpc_backend;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection 'upgrade';
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
proxy_read_timeout 300s;
|
||||
proxy_connect_timeout 75s;
|
||||
}
|
||||
|
||||
# WebSocket endpoint
|
||||
location /ws {
|
||||
proxy_pass http://besu_ws_backend;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 3600s;
|
||||
proxy_connect_timeout 75s;
|
||||
}
|
||||
|
||||
# Health check endpoint
|
||||
location /health {
|
||||
access_log off;
|
||||
return 200 "healthy\n";
|
||||
add_header Content-Type text/plain;
|
||||
}
|
||||
|
||||
# Metrics endpoint (if needed)
|
||||
location /metrics {
|
||||
deny all;
|
||||
return 403;
|
||||
}
|
||||
}
|
||||
permissions: '0644'
|
||||
owner: root:root
|
||||
|
||||
- path: /opt/nginx/setup.sh
|
||||
content: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
ADMIN_USERNAME="${admin_username}"
|
||||
|
||||
echo "Setting up Nginx Proxy Server..."
|
||||
|
||||
# Create directories
|
||||
mkdir -p /var/www/html
|
||||
mkdir -p /etc/nginx/conf.d
|
||||
mkdir -p /etc/cloudflared
|
||||
|
||||
# Test Nginx configuration
|
||||
nginx -t
|
||||
|
||||
# Enable and start Nginx
|
||||
systemctl enable nginx
|
||||
systemctl restart nginx
|
||||
|
||||
# Cloudflare Tunnel setup
|
||||
echo "Configuring Cloudflare Tunnel..."
|
||||
echo "NOTE: You need to run 'cloudflared tunnel login' and 'cloudflared tunnel create <tunnel-name>'"
|
||||
echo " Then configure /etc/cloudflared/config.yml with your tunnel ID and credentials"
|
||||
echo " See: https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/"
|
||||
|
||||
# Create Cloudflare Tunnel systemd service (will be enabled after manual configuration)
|
||||
cat > /etc/systemd/system/cloudflared.service << 'EOF'
|
||||
[Unit]
|
||||
Description=Cloudflare Tunnel
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
ExecStart=/usr/bin/cloudflared tunnel --config /etc/cloudflared/config.yml run
|
||||
Restart=on-failure
|
||||
RestartSec=5s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
# Create placeholder config file
|
||||
cat > /etc/cloudflared/config.yml << 'EOF'
|
||||
# Cloudflare Tunnel Configuration
|
||||
# Replace with your actual tunnel ID and credentials after running:
|
||||
# cloudflared tunnel login
|
||||
# cloudflared tunnel create <tunnel-name>
|
||||
#
|
||||
# Example configuration:
|
||||
# tunnel: <your-tunnel-id>
|
||||
# credentials-file: /etc/cloudflared/<tunnel-id>.json
|
||||
#
|
||||
# ingress:
|
||||
# - hostname: your-domain.com
|
||||
# service: http://localhost:443
|
||||
# - service: http_status:404
|
||||
EOF
|
||||
|
||||
chmod 600 /etc/cloudflared/config.yml
|
||||
|
||||
echo "Nginx Proxy setup complete!"
|
||||
echo "Backend regions configured: ${join(", ", keys(backend_vms))}"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. SSH to this server: ssh ${admin_username}@<nginx-proxy-public-ip>"
|
||||
echo "2. Run: cloudflared tunnel login"
|
||||
echo "3. Run: cloudflared tunnel create <tunnel-name>"
|
||||
echo "4. Update /etc/cloudflared/config.yml with tunnel ID and ingress rules"
|
||||
echo "5. Enable and start: systemctl enable cloudflared && systemctl start cloudflared"
|
||||
permissions: '0755'
|
||||
owner: root:root
|
||||
|
||||
runcmd:
|
||||
- /opt/nginx/setup.sh
|
||||
- systemctl status nginx
|
||||
|
||||
final_message: "Nginx Proxy Server setup complete. Configure SSL with certbot after DNS is set up."
|
||||
|
||||
54
terraform/modules/nginx-proxy/variables.tf
Normal file
54
terraform/modules/nginx-proxy/variables.tf
Normal file
@@ -0,0 +1,54 @@
|
||||
# Variables for Nginx Proxy Module
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "Subnet ID for Nginx proxy"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "backend_vms" {
|
||||
description = "Map of backend VMs by region with their IPs"
|
||||
type = map(object({
|
||||
region = string
|
||||
private_ips = list(string)
|
||||
public_ips = list(string)
|
||||
}))
|
||||
}
|
||||
|
||||
variable "admin_username" {
|
||||
description = "Admin username for VM"
|
||||
type = string
|
||||
default = "besuadmin"
|
||||
}
|
||||
|
||||
variable "ssh_public_key" {
|
||||
description = "SSH public key for VM access"
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
144
terraform/modules/resource-groups/main.tf
Normal file
144
terraform/modules/resource-groups/main.tf
Normal file
@@ -0,0 +1,144 @@
|
||||
# Resource Groups Module
|
||||
# Creates resource groups according to Well-Architected Framework best practices
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "project_name" {
|
||||
description = "Project name"
|
||||
type = string
|
||||
default = "defi-oracle-mainnet"
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resource groups"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
# Common tags
|
||||
locals {
|
||||
common_tags = merge(
|
||||
{
|
||||
Environment = var.environment
|
||||
Project = var.project_name
|
||||
ChainID = "138"
|
||||
ManagedBy = "Terraform"
|
||||
},
|
||||
var.tags
|
||||
)
|
||||
}
|
||||
|
||||
# Networking Resource Group (Long-lived)
|
||||
resource "azurerm_resource_group" "network" {
|
||||
name = "rg-${var.environment}-network-001"
|
||||
location = var.location
|
||||
|
||||
tags = merge(local.common_tags, {
|
||||
Purpose = "Networking"
|
||||
Lifecycle = "Long-lived"
|
||||
})
|
||||
}
|
||||
|
||||
# Compute Resource Group (Long-lived)
|
||||
resource "azurerm_resource_group" "compute" {
|
||||
name = "rg-${var.environment}-compute-001"
|
||||
location = var.location
|
||||
|
||||
tags = merge(local.common_tags, {
|
||||
Purpose = "Compute"
|
||||
Lifecycle = "Long-lived"
|
||||
})
|
||||
}
|
||||
|
||||
# Storage Resource Group (Long-lived)
|
||||
resource "azurerm_resource_group" "storage" {
|
||||
name = "rg-${var.environment}-storage-001"
|
||||
location = var.location
|
||||
|
||||
tags = merge(local.common_tags, {
|
||||
Purpose = "Storage"
|
||||
Lifecycle = "Long-lived"
|
||||
})
|
||||
}
|
||||
|
||||
# Security Resource Group (Long-lived)
|
||||
resource "azurerm_resource_group" "security" {
|
||||
name = "rg-${var.environment}-security-001"
|
||||
location = var.location
|
||||
|
||||
tags = merge(local.common_tags, {
|
||||
Purpose = "Security"
|
||||
Lifecycle = "Long-lived"
|
||||
})
|
||||
}
|
||||
|
||||
# Monitoring Resource Group (Long-lived)
|
||||
resource "azurerm_resource_group" "monitoring" {
|
||||
name = "rg-${var.environment}-monitoring-001"
|
||||
location = var.location
|
||||
|
||||
tags = merge(local.common_tags, {
|
||||
Purpose = "Monitoring"
|
||||
Lifecycle = "Long-lived"
|
||||
})
|
||||
}
|
||||
|
||||
# Identity Resource Group (Long-lived)
|
||||
resource "azurerm_resource_group" "identity" {
|
||||
name = "rg-${var.environment}-identity-001"
|
||||
location = var.location
|
||||
|
||||
tags = merge(local.common_tags, {
|
||||
Purpose = "Identity"
|
||||
Lifecycle = "Long-lived"
|
||||
})
|
||||
}
|
||||
|
||||
# Temporary Resource Group (Ephemeral)
|
||||
resource "azurerm_resource_group" "temp" {
|
||||
name = "rg-${var.environment}-temp-001"
|
||||
location = var.location
|
||||
|
||||
tags = merge(local.common_tags, {
|
||||
Purpose = "Temporary"
|
||||
Lifecycle = "Ephemeral"
|
||||
})
|
||||
}
|
||||
|
||||
# Outputs
|
||||
output "network_rg_name" {
|
||||
value = azurerm_resource_group.network.name
|
||||
}
|
||||
|
||||
output "compute_rg_name" {
|
||||
value = azurerm_resource_group.compute.name
|
||||
}
|
||||
|
||||
output "storage_rg_name" {
|
||||
value = azurerm_resource_group.storage.name
|
||||
}
|
||||
|
||||
output "security_rg_name" {
|
||||
value = azurerm_resource_group.security.name
|
||||
}
|
||||
|
||||
output "monitoring_rg_name" {
|
||||
value = azurerm_resource_group.monitoring.name
|
||||
}
|
||||
|
||||
output "identity_rg_name" {
|
||||
value = azurerm_resource_group.identity.name
|
||||
}
|
||||
|
||||
output "temp_rg_name" {
|
||||
value = azurerm_resource_group.temp.name
|
||||
}
|
||||
|
||||
28
terraform/modules/resource-groups/variables.tf
Normal file
28
terraform/modules/resource-groups/variables.tf
Normal file
@@ -0,0 +1,28 @@
|
||||
# Variables for Resource Groups Module
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
validation {
|
||||
condition = contains(["prod", "dev", "test", "staging"], var.environment)
|
||||
error_message = "Environment must be one of: prod, dev, test, staging"
|
||||
}
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "project_name" {
|
||||
description = "Project name"
|
||||
type = string
|
||||
default = "defi-oracle-mainnet"
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resource groups"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
120
terraform/modules/security/main.tf
Normal file
120
terraform/modules/security/main.tf
Normal file
@@ -0,0 +1,120 @@
|
||||
# Security Module for Azure
|
||||
# Configures Azure Security Center, Key Vault, and security policies
|
||||
|
||||
# Azure Security Center (Defender for Cloud)
|
||||
resource "azurerm_security_center_subscription_pricing" "main" {
|
||||
tier = "Standard"
|
||||
resource_type = "VirtualMachines"
|
||||
}
|
||||
|
||||
resource "azurerm_security_center_subscription_pricing" "storage" {
|
||||
tier = "Standard"
|
||||
resource_type = "StorageAccounts"
|
||||
}
|
||||
|
||||
resource "azurerm_security_center_subscription_pricing" "sql" {
|
||||
tier = "Standard"
|
||||
resource_type = "SqlServers"
|
||||
}
|
||||
|
||||
resource "azurerm_security_center_subscription_pricing" "app_services" {
|
||||
tier = "Standard"
|
||||
resource_type = "AppServices"
|
||||
}
|
||||
|
||||
# Security Center Auto Provisioning
|
||||
resource "azurerm_security_center_auto_provisioning" "main" {
|
||||
auto_provision = "On"
|
||||
}
|
||||
|
||||
# Security Center Contact
|
||||
resource "azurerm_security_center_contact" "main" {
|
||||
email = var.security_contact_email
|
||||
phone = var.security_contact_phone
|
||||
alert_notifications = true
|
||||
alerts_to_admins = true
|
||||
}
|
||||
|
||||
# Security Center Workspace
|
||||
resource "azurerm_security_center_workspace" "main" {
|
||||
scope = "/subscriptions/${var.subscription_id}"
|
||||
workspace_id = azurerm_log_analytics_workspace.security.id
|
||||
}
|
||||
|
||||
# Log Analytics Workspace for Security Center
|
||||
resource "azurerm_log_analytics_workspace" "security" {
|
||||
name = "${var.cluster_name}-security-workspace"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
sku = "PerGB2018"
|
||||
retention_in_days = 90
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "Security-Monitoring"
|
||||
})
|
||||
}
|
||||
|
||||
# Security Center Assessment
|
||||
resource "azurerm_security_center_assessment" "aks" {
|
||||
assessment_policy_id = azurerm_security_center_assessment_policy.aks.id
|
||||
target_resource_id = var.aks_cluster_id
|
||||
status {
|
||||
code = "Healthy"
|
||||
}
|
||||
}
|
||||
|
||||
# Security Center Assessment Policy
|
||||
resource "azurerm_security_center_assessment_policy" "aks" {
|
||||
display_name = "AKS Security Assessment"
|
||||
description = "Security assessment for AKS cluster"
|
||||
severity = "Medium"
|
||||
categories = ["Security"]
|
||||
implementation_effort = "Low"
|
||||
remediation_description = "Follow AKS security best practices"
|
||||
threats = ["DataExfiltration", "DataSpillage", "MaliciousInsider"]
|
||||
user_impact = "Low"
|
||||
}
|
||||
|
||||
# Variables
|
||||
variable "security_contact_email" {
|
||||
description = "Email for security contact"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "security_contact_phone" {
|
||||
description = "Phone for security contact"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "subscription_id" {
|
||||
description = "Azure subscription ID"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "aks_cluster_id" {
|
||||
description = "AKS cluster resource ID"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Cluster name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Resource group name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
63
terraform/modules/storage/main.tf
Normal file
63
terraform/modules/storage/main.tf
Normal file
@@ -0,0 +1,63 @@
|
||||
# Storage Module for Azure
|
||||
# Creates storage accounts for backups and shared storage
|
||||
# Variables are defined in variables.tf
|
||||
|
||||
# Storage account for backups
|
||||
resource "azurerm_storage_account" "backups" {
|
||||
name = substr("${replace(lower(var.cluster_name), "-", "")}b${substr(var.environment, 0, 1)}${substr(md5(var.resource_group_name), 0, 6)}", 0, 24)
|
||||
resource_group_name = var.resource_group_name
|
||||
location = var.location
|
||||
account_tier = "Standard"
|
||||
account_replication_type = var.environment == "prod" ? "GRS" : "LRS"
|
||||
min_tls_version = "TLS1_2"
|
||||
|
||||
# Enable blob versioning
|
||||
blob_properties {
|
||||
versioning_enabled = true
|
||||
delete_retention_policy {
|
||||
days = var.environment == "prod" ? 90 : 30
|
||||
}
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "backups"
|
||||
})
|
||||
}
|
||||
|
||||
# Container for chaindata backups
|
||||
resource "azurerm_storage_container" "chaindata" {
|
||||
name = "chaindata"
|
||||
storage_account_name = azurerm_storage_account.backups.name
|
||||
container_access_type = "private"
|
||||
}
|
||||
|
||||
# Container for configuration backups
|
||||
resource "azurerm_storage_container" "config" {
|
||||
name = "config"
|
||||
storage_account_name = azurerm_storage_account.backups.name
|
||||
container_access_type = "private"
|
||||
}
|
||||
|
||||
# Storage account for shared configuration (optional)
|
||||
resource "azurerm_storage_account" "shared" {
|
||||
name = substr("${replace(lower(var.cluster_name), "-", "")}s${substr(var.environment, 0, 1)}${substr(md5(var.resource_group_name), 0, 6)}", 0, 24)
|
||||
resource_group_name = var.resource_group_name
|
||||
location = var.location
|
||||
account_tier = "Standard"
|
||||
account_replication_type = "LRS"
|
||||
min_tls_version = "TLS1_2"
|
||||
|
||||
tags = merge(var.tags, {
|
||||
Purpose = "shared"
|
||||
})
|
||||
}
|
||||
|
||||
# File share for shared configuration
|
||||
resource "azurerm_storage_share" "config" {
|
||||
name = "config"
|
||||
storage_account_name = azurerm_storage_account.shared.name
|
||||
quota = 10
|
||||
}
|
||||
|
||||
# Outputs are defined in outputs.tf
|
||||
|
||||
21
terraform/modules/storage/outputs.tf
Normal file
21
terraform/modules/storage/outputs.tf
Normal file
@@ -0,0 +1,21 @@
|
||||
output "backup_storage_account_name" {
|
||||
value = azurerm_storage_account.backups.name
|
||||
}
|
||||
|
||||
output "backup_storage_account_key" {
|
||||
value = azurerm_storage_account.backups.primary_access_key
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "backup_container_name" {
|
||||
value = azurerm_storage_container.chaindata.name
|
||||
}
|
||||
|
||||
output "shared_storage_account_name" {
|
||||
value = azurerm_storage_account.shared.name
|
||||
}
|
||||
|
||||
output "shared_file_share_name" {
|
||||
value = azurerm_storage_share.config.name
|
||||
}
|
||||
|
||||
28
terraform/modules/storage/variables.tf
Normal file
28
terraform/modules/storage/variables.tf
Normal file
@@ -0,0 +1,28 @@
|
||||
# Variables for Storage Module
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the AKS cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
description = "Environment (prod, dev, test, staging)"
|
||||
type = string
|
||||
default = "prod"
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags to apply to storage resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
112
terraform/modules/vm-deployment/README.md
Normal file
112
terraform/modules/vm-deployment/README.md
Normal file
@@ -0,0 +1,112 @@
|
||||
# VM Deployment Module
|
||||
|
||||
This Terraform module deploys Besu nodes on Azure Virtual Machines (VMs) or Virtual Machine Scale Sets (VMSS) with Docker Engine.
|
||||
|
||||
## Features
|
||||
|
||||
- Deploy validators, sentries, or RPC nodes
|
||||
- Support for individual VMs or VM Scale Sets
|
||||
- Multi-region deployment
|
||||
- Automatic Docker installation
|
||||
- Automatic Besu configuration
|
||||
- Cloud-init setup
|
||||
- Managed Identity for Key Vault access
|
||||
- Boot diagnostics
|
||||
- Network security groups
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```hcl
|
||||
module "besu_validators" {
|
||||
source = "./modules/vm-deployment"
|
||||
|
||||
resource_group_name = "defi-oracle-mainnet-rg"
|
||||
location = "eastus"
|
||||
cluster_name = "defi-oracle-aks"
|
||||
node_type = "validator"
|
||||
node_count = 4
|
||||
vm_size = "Standard_D4s_v3"
|
||||
ssh_public_key = file("~/.ssh/id_rsa.pub")
|
||||
subnet_id = azurerm_subnet.validators.id
|
||||
storage_account_name = azurerm_storage_account.vm_storage.primary_blob_endpoint
|
||||
key_vault_id = azurerm_key_vault.main.id
|
||||
genesis_file_path = "https://storageaccount.blob.core.windows.net/genesis/genesis.json"
|
||||
network_security_group_id = azurerm_network_security_group.validators.id
|
||||
}
|
||||
```
|
||||
|
||||
### VM Scale Set
|
||||
|
||||
```hcl
|
||||
module "besu_rpc_vmss" {
|
||||
source = "./modules/vm-deployment"
|
||||
|
||||
resource_group_name = "defi-oracle-mainnet-rg"
|
||||
location = "eastus"
|
||||
cluster_name = "defi-oracle-aks"
|
||||
node_type = "rpc"
|
||||
node_count = 3
|
||||
vm_size = "Standard_D8s_v3"
|
||||
use_scale_set = true
|
||||
ssh_public_key = file("~/.ssh/id_rsa.pub")
|
||||
subnet_id = azurerm_subnet.rpc.id
|
||||
storage_account_name = azurerm_storage_account.vm_storage.primary_blob_endpoint
|
||||
key_vault_id = azurerm_key_vault.main.id
|
||||
genesis_file_path = "https://storageaccount.blob.core.windows.net/genesis/genesis.json"
|
||||
network_security_group_id = azurerm_network_security_group.rpc.id
|
||||
}
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
| Name | Description | Type | Default | Required |
|
||||
|------|-------------|------|---------|----------|
|
||||
| resource_group_name | Name of the resource group | string | - | yes |
|
||||
| location | Azure region | string | - | yes |
|
||||
| cluster_name | Name of the Besu network cluster | string | - | yes |
|
||||
| node_type | Type of node (validator, sentry, rpc) | string | - | yes |
|
||||
| node_count | Number of nodes | number | 1 | no |
|
||||
| vm_size | VM size | string | "Standard_D4s_v3" | no |
|
||||
| admin_username | Admin username for VMs | string | "besuadmin" | no |
|
||||
| ssh_public_key | SSH public key for VM access | string | - | yes |
|
||||
| use_scale_set | Use VM Scale Set instead of individual VMs | bool | false | no |
|
||||
| subnet_id | Subnet ID for VMs | string | - | yes |
|
||||
| storage_account_name | Storage account name for boot diagnostics | string | - | yes |
|
||||
| key_vault_id | Key Vault ID for secrets | string | - | yes |
|
||||
| genesis_file_path | Path to genesis file in storage | string | - | yes |
|
||||
| network_security_group_id | Network Security Group ID | string | - | yes |
|
||||
| tags | Tags for resources | map(string) | {} | no |
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| vm_ids | VM or VMSS IDs |
|
||||
| vm_private_ips | Private IP addresses of VMs |
|
||||
| vm_public_ips | Public IP addresses of VMs (sentry and RPC nodes only) |
|
||||
| vm_names | VM or VMSS names |
|
||||
| vmss_id | VM Scale Set ID (if using scale set) |
|
||||
|
||||
## Requirements
|
||||
|
||||
- Terraform >= 1.0
|
||||
- Azure Provider >= 3.0
|
||||
- Azure CLI installed and configured
|
||||
- SSH key pair
|
||||
|
||||
## Examples
|
||||
|
||||
See `terraform/vm-deployment.tf` for complete examples.
|
||||
|
||||
## Notes
|
||||
|
||||
- VMs are automatically configured via cloud-init
|
||||
- Docker is installed automatically
|
||||
- Besu is started automatically via systemd service
|
||||
- Validator keys are downloaded from Key Vault using Managed Identity
|
||||
- Genesis file is downloaded from Azure Storage
|
||||
- Boot diagnostics are enabled
|
||||
- Managed Identity is enabled for Key Vault access
|
||||
|
||||
195
terraform/modules/vm-deployment/cloud-init-phase1.yaml
Normal file
195
terraform/modules/vm-deployment/cloud-init-phase1.yaml
Normal file
@@ -0,0 +1,195 @@
|
||||
#cloud-config
|
||||
# Cloud-init configuration for Besu node setup (Phase 1)
|
||||
# Installs: Docker Engine, NVM, Node 22 LTS, JDK 17, and Besu
|
||||
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
|
||||
packages:
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
- lsb-release
|
||||
- jq
|
||||
- wget
|
||||
- unzip
|
||||
- git
|
||||
- build-essential
|
||||
|
||||
write_files:
|
||||
- path: /opt/besu/setup.sh
|
||||
content: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
NODE_TYPE="${node_type}"
|
||||
NODE_INDEX="${node_index}"
|
||||
CLUSTER_NAME="${cluster_name}"
|
||||
KEY_VAULT_ID="${key_vault_id}"
|
||||
GENESIS_FILE_PATH="${genesis_file_path}"
|
||||
ADMIN_USERNAME="${admin_username}"
|
||||
|
||||
echo "Setting up Besu node: $NODE_TYPE-$NODE_INDEX"
|
||||
|
||||
# Install Docker Engine
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "Installing Docker Engine..."
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
apt-get update
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
systemctl enable docker
|
||||
systemctl start docker
|
||||
usermod -aG docker $ADMIN_USERNAME
|
||||
echo "Docker Engine installed successfully"
|
||||
fi
|
||||
|
||||
# Install NVM (Node Version Manager)
|
||||
if [ ! -d "/home/$ADMIN_USERNAME/.nvm" ]; then
|
||||
echo "Installing NVM..."
|
||||
export NVM_DIR="/home/$ADMIN_USERNAME/.nvm"
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash
|
||||
chown -R $ADMIN_USERNAME:$ADMIN_USERNAME /home/$ADMIN_USERNAME/.nvm
|
||||
echo 'export NVM_DIR="$HOME/.nvm"' >> /home/$ADMIN_USERNAME/.bashrc
|
||||
echo '[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"' >> /home/$ADMIN_USERNAME/.bashrc
|
||||
echo '[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"' >> /home/$ADMIN_USERNAME/.bashrc
|
||||
echo "NVM installed successfully"
|
||||
fi
|
||||
|
||||
# Install Node.js 22 LTS via NVM
|
||||
echo "Installing Node.js 22 LTS..."
|
||||
export NVM_DIR="/home/$ADMIN_USERNAME/.nvm"
|
||||
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
|
||||
su - $ADMIN_USERNAME -c "source ~/.nvm/nvm.sh && nvm install 22 && nvm alias default 22 && nvm use 22"
|
||||
echo "Node.js 22 LTS installed successfully"
|
||||
|
||||
# Install JDK 17 (OpenJDK)
|
||||
if ! command -v java &> /dev/null || ! java -version 2>&1 | grep -q "17"; then
|
||||
echo "Installing JDK 17..."
|
||||
apt-get update
|
||||
apt-get install -y openjdk-17-jdk
|
||||
update-alternatives --set java /usr/lib/jvm/java-17-openjdk-amd64/bin/java
|
||||
echo "JAVA_HOME=/usr/lib/jvm/java-17-openjdk-amd64" >> /etc/environment
|
||||
echo "JDK 17 installed successfully"
|
||||
fi
|
||||
|
||||
# Install Azure CLI (if not already installed)
|
||||
if ! command -v az &> /dev/null; then
|
||||
echo "Installing Azure CLI..."
|
||||
curl -sL https://aka.ms/InstallAzureCLIDeb | bash
|
||||
fi
|
||||
|
||||
# Create directories
|
||||
mkdir -p /opt/besu/{data,config,keys,logs}
|
||||
chown -R $ADMIN_USERNAME:$ADMIN_USERNAME /opt/besu
|
||||
|
||||
# Download genesis file from storage (if URL provided)
|
||||
if [ -n "$GENESIS_FILE_PATH" ] && [[ "$GENESIS_FILE_PATH" == http* ]]; then
|
||||
echo "Downloading genesis file from $GENESIS_FILE_PATH..."
|
||||
wget -q -O /opt/besu/config/genesis.json "$GENESIS_FILE_PATH" || echo "Failed to download genesis file"
|
||||
fi
|
||||
|
||||
# Configure Azure CLI authentication (Managed Identity)
|
||||
if [ -n "$KEY_VAULT_ID" ]; then
|
||||
echo "Configuring Azure authentication for Key Vault access..."
|
||||
# VMs use Managed Identity for Key Vault access
|
||||
# Azure CLI will use Managed Identity automatically
|
||||
fi
|
||||
|
||||
# Download validator keys from Key Vault (if validator)
|
||||
if [ "$NODE_TYPE" == "validator" ] && [ -n "$KEY_VAULT_ID" ]; then
|
||||
echo "Downloading validator keys from Key Vault..."
|
||||
# Extract key vault name from ID
|
||||
KEY_VAULT_NAME=$(echo "$KEY_VAULT_ID" | sed 's/.*\/\([^/]*\)$/\1/')
|
||||
# Download keys using Azure CLI with Managed Identity
|
||||
# az keyvault secret show --vault-name "$KEY_VAULT_NAME" --name "validator-key-$NODE_INDEX" --query value -o tsv > /opt/besu/keys/validator-key.txt || echo "Failed to download key"
|
||||
fi
|
||||
|
||||
# Ensure directories have correct permissions
|
||||
chown -R $ADMIN_USERNAME:$ADMIN_USERNAME /opt/besu
|
||||
|
||||
# Verify installations
|
||||
echo "=== Installation Verification ==="
|
||||
docker --version
|
||||
su - $ADMIN_USERNAME -c "source ~/.nvm/nvm.sh && node --version && npm --version"
|
||||
java -version
|
||||
echo "=== Setup complete! ==="
|
||||
permissions: '0755'
|
||||
owner: root:root
|
||||
|
||||
- path: /opt/besu/docker-compose.yml
|
||||
content: |
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
besu:
|
||||
image: hyperledger/besu:23.10.0
|
||||
container_name: besu-${node_type}-${node_index}
|
||||
restart: unless-stopped
|
||||
user: "${admin_username}"
|
||||
volumes:
|
||||
- /opt/besu/data:/data
|
||||
- /opt/besu/config:/config
|
||||
- /opt/besu/keys:/keys:ro
|
||||
- /opt/besu/logs:/logs
|
||||
ports:
|
||||
- "9545:9545" # Metrics
|
||||
${node_type == "validator" || node_type == "sentry" ? "- \"30303:30303\" # P2P TCP\n - \"30303:30303/udp\" # P2P UDP" : ""}
|
||||
${node_type == "rpc" ? "- \"8545:8545\" # RPC HTTP\n - \"8546:8546\" # WebSocket" : ""}
|
||||
command:
|
||||
- /opt/besu/bin/besu
|
||||
- --config-file=/config/besu-config.toml
|
||||
environment:
|
||||
- BESU_OPTS=-Xmx4g -Xms4g
|
||||
networks:
|
||||
- besu-network
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:9545/metrics"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 120s
|
||||
|
||||
networks:
|
||||
besu-network:
|
||||
driver: bridge
|
||||
permissions: '0644'
|
||||
owner: ${admin_username}:${admin_username}
|
||||
|
||||
- path: /etc/systemd/system/besu.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Besu Node Service
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
WorkingDirectory=/opt/besu
|
||||
ExecStart=/usr/bin/docker compose up -d
|
||||
ExecStop=/usr/bin/docker compose down
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
User=${admin_username}
|
||||
Group=${admin_username}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
permissions: '0644'
|
||||
owner: root:root
|
||||
|
||||
runcmd:
|
||||
- /opt/besu/setup.sh
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable besu.service
|
||||
- systemctl start besu.service
|
||||
|
||||
final_message: "Besu node setup complete. Node type: ${node_type}, Index: ${node_index}. Docker, NVM, Node 22 LTS, and JDK 17 installed."
|
||||
|
||||
159
terraform/modules/vm-deployment/cloud-init.yaml
Normal file
159
terraform/modules/vm-deployment/cloud-init.yaml
Normal file
@@ -0,0 +1,159 @@
|
||||
#cloud-config
|
||||
# Cloud-init configuration for Besu node setup
|
||||
# This script installs Docker, configures the node, and starts Besu
|
||||
|
||||
package_update: true
|
||||
package_upgrade: true
|
||||
|
||||
packages:
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
- lsb-release
|
||||
- jq
|
||||
- wget
|
||||
- unzip
|
||||
- azure-cli
|
||||
|
||||
write_files:
|
||||
- path: /opt/besu/setup.sh
|
||||
content: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
NODE_TYPE="${node_type}"
|
||||
NODE_INDEX="${node_index}"
|
||||
CLUSTER_NAME="${cluster_name}"
|
||||
KEY_VAULT_ID="${key_vault_id}"
|
||||
GENESIS_FILE_PATH="${genesis_file_path}"
|
||||
ADMIN_USERNAME="${admin_username}"
|
||||
|
||||
echo "Setting up Besu node: $NODE_TYPE-$NODE_INDEX"
|
||||
|
||||
# Install Docker
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "Installing Docker..."
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
apt-get update
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
systemctl enable docker
|
||||
systemctl start docker
|
||||
usermod -aG docker $ADMIN_USERNAME
|
||||
fi
|
||||
|
||||
# Install Azure CLI (if not already installed)
|
||||
if ! command -v az &> /dev/null; then
|
||||
echo "Installing Azure CLI..."
|
||||
curl -sL https://aka.ms/InstallAzureCLIDeb | bash
|
||||
fi
|
||||
|
||||
# Create directories
|
||||
mkdir -p /opt/besu/{data,config,keys,logs}
|
||||
chown -R $ADMIN_USERNAME:$ADMIN_USERNAME /opt/besu
|
||||
|
||||
# Download genesis file from storage (if URL provided)
|
||||
if [ -n "$GENESIS_FILE_PATH" ] && [[ "$GENESIS_FILE_PATH" == http* ]]; then
|
||||
echo "Downloading genesis file from $GENESIS_FILE_PATH..."
|
||||
wget -q -O /opt/besu/config/genesis.json "$GENESIS_FILE_PATH" || echo "Failed to download genesis file"
|
||||
fi
|
||||
|
||||
# Configure Azure CLI authentication (Managed Identity)
|
||||
if [ -n "$KEY_VAULT_ID" ]; then
|
||||
echo "Configuring Azure authentication for Key Vault access..."
|
||||
# VMs use Managed Identity for Key Vault access
|
||||
# Azure CLI will use Managed Identity automatically
|
||||
fi
|
||||
|
||||
# Download validator keys from Key Vault (if validator)
|
||||
if [ "$NODE_TYPE" == "validator" ] && [ -n "$KEY_VAULT_ID" ]; then
|
||||
echo "Downloading validator keys from Key Vault..."
|
||||
# Extract key vault name from ID
|
||||
KEY_VAULT_NAME=$(echo "$KEY_VAULT_ID" | sed 's/.*\/\([^/]*\)$/\1/')
|
||||
# Download keys using Azure CLI with Managed Identity
|
||||
# az keyvault secret show --vault-name "$KEY_VAULT_NAME" --name "validator-key-$NODE_INDEX" --query value -o tsv > /opt/besu/keys/validator-key.txt || echo "Failed to download key"
|
||||
fi
|
||||
|
||||
# Ensure directories have correct permissions
|
||||
chown -R $ADMIN_USERNAME:$ADMIN_USERNAME /opt/besu
|
||||
|
||||
echo "Setup complete!"
|
||||
permissions: '0755'
|
||||
owner: root:root
|
||||
|
||||
- path: /opt/besu/docker-compose.yml
|
||||
content: |
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
besu:
|
||||
image: hyperledger/besu:23.10.0
|
||||
container_name: besu-${node_type}-${node_index}
|
||||
restart: unless-stopped
|
||||
user: "${admin_username}"
|
||||
volumes:
|
||||
- /opt/besu/data:/data
|
||||
- /opt/besu/config:/config
|
||||
- /opt/besu/keys:/keys:ro
|
||||
- /opt/besu/logs:/logs
|
||||
ports:
|
||||
- "9545:9545" # Metrics
|
||||
${node_type == "validator" || node_type == "sentry" ? "- \"30303:30303\" # P2P TCP\n - \"30303:30303/udp\" # P2P UDP" : ""}
|
||||
${node_type == "rpc" ? "- \"8545:8545\" # RPC HTTP\n - \"8546:8546\" # WebSocket" : ""}
|
||||
command:
|
||||
- /opt/besu/bin/besu
|
||||
- --config-file=/config/besu-config.toml
|
||||
environment:
|
||||
- BESU_OPTS=-Xmx4g -Xms4g
|
||||
networks:
|
||||
- besu-network
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:9545/metrics"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 120s
|
||||
|
||||
networks:
|
||||
besu-network:
|
||||
driver: bridge
|
||||
permissions: '0644'
|
||||
owner: ${admin_username}:${admin_username}
|
||||
|
||||
- path: /etc/systemd/system/besu.service
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Besu Node Service
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
WorkingDirectory=/opt/besu
|
||||
ExecStart=/usr/bin/docker compose up -d
|
||||
ExecStop=/usr/bin/docker compose down
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
User=${admin_username}
|
||||
Group=${admin_username}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
permissions: '0644'
|
||||
owner: root:root
|
||||
|
||||
runcmd:
|
||||
- /opt/besu/setup.sh
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable besu.service
|
||||
- systemctl start besu.service
|
||||
|
||||
final_message: "Besu node setup complete. Node type: ${node_type}, Index: ${node_index}"
|
||||
|
||||
188
terraform/modules/vm-deployment/main.tf
Normal file
188
terraform/modules/vm-deployment/main.tf
Normal file
@@ -0,0 +1,188 @@
|
||||
# VM Deployment Module for Besu Network
|
||||
# Deploys Besu nodes on Virtual Machines or VM Scale Sets with Docker Engine
|
||||
# Variables are defined in variables.tf
|
||||
|
||||
# Network Interface
|
||||
resource "azurerm_network_interface" "besu_node" {
|
||||
count = var.use_scale_set ? 0 : var.node_count
|
||||
name = "${var.cluster_name}-${var.node_type}-nic-${count.index}"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
ip_configuration {
|
||||
name = "internal"
|
||||
subnet_id = var.subnet_id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
# Only sentry and RPC nodes get public IPs; besu-node uses private IPs only
|
||||
public_ip_address_id = (var.node_type == "sentry" || var.node_type == "rpc") ? azurerm_public_ip.besu_node[count.index].id : null
|
||||
}
|
||||
|
||||
tags = merge(var.tags, {
|
||||
NodeType = var.node_type
|
||||
NodeIndex = count.index
|
||||
})
|
||||
}
|
||||
|
||||
# Associate NSG with NIC
|
||||
resource "azurerm_network_interface_security_group_association" "besu_node" {
|
||||
count = var.use_scale_set ? 0 : var.node_count
|
||||
network_interface_id = azurerm_network_interface.besu_node[count.index].id
|
||||
network_security_group_id = var.network_security_group_id
|
||||
}
|
||||
|
||||
# Public IP for sentry and RPC nodes only (besu-node uses private IPs only)
|
||||
# Note: Phase 1 backend VMs use private IPs only; Nginx proxy connects via Cloudflare Tunnel
|
||||
resource "azurerm_public_ip" "besu_node" {
|
||||
count = var.use_scale_set ? 0 : (var.node_type == "sentry" || var.node_type == "rpc" ? var.node_count : 0)
|
||||
name = "${var.cluster_name}-${var.node_type}-ip-${count.index}"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
allocation_method = "Static"
|
||||
sku = "Standard"
|
||||
|
||||
tags = merge(var.tags, {
|
||||
NodeType = var.node_type
|
||||
NodeIndex = count.index
|
||||
})
|
||||
}
|
||||
|
||||
# Virtual Machine
|
||||
resource "azurerm_linux_virtual_machine" "besu_node" {
|
||||
count = var.use_scale_set ? 0 : var.node_count
|
||||
name = "${var.cluster_name}-${var.node_type}-${count.index}"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
size = var.vm_size
|
||||
admin_username = var.admin_username
|
||||
|
||||
network_interface_ids = [azurerm_network_interface.besu_node[count.index].id]
|
||||
|
||||
admin_ssh_key {
|
||||
username = var.admin_username
|
||||
public_key = var.ssh_public_key
|
||||
}
|
||||
|
||||
os_disk {
|
||||
name = "${var.cluster_name}-${var.node_type}-disk-${count.index}"
|
||||
caching = "ReadWrite"
|
||||
storage_account_type = var.storage_account_type
|
||||
disk_size_gb = var.disk_size_gb
|
||||
}
|
||||
|
||||
source_image_reference {
|
||||
publisher = "Canonical"
|
||||
offer = "0001-com-ubuntu-server-jammy"
|
||||
sku = "22_04-lts-gen2"
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
dynamic "boot_diagnostics" {
|
||||
for_each = var.vm_enable_boot_diagnostics && var.storage_account_name != "" ? [1] : []
|
||||
content {
|
||||
storage_account_uri = var.storage_account_name != "" ? "https://${var.storage_account_name}.blob.core.windows.net/" : null
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "identity" {
|
||||
for_each = var.vm_enable_managed_identity ? [1] : []
|
||||
content {
|
||||
type = "SystemAssigned"
|
||||
}
|
||||
}
|
||||
|
||||
custom_data = base64encode(templatefile(
|
||||
var.use_phase1_cloud_init ? "${path.module}/cloud-init-phase1.yaml" : "${path.module}/cloud-init.yaml",
|
||||
{
|
||||
node_type = var.node_type
|
||||
node_index = count.index
|
||||
cluster_name = var.cluster_name
|
||||
key_vault_id = var.key_vault_id
|
||||
genesis_file_path = var.genesis_file_path
|
||||
admin_username = var.admin_username
|
||||
}
|
||||
))
|
||||
|
||||
tags = merge(var.tags, {
|
||||
NodeType = var.node_type
|
||||
NodeIndex = count.index
|
||||
})
|
||||
|
||||
depends_on = [azurerm_network_interface.besu_node]
|
||||
}
|
||||
|
||||
# VM Scale Set (alternative to individual VMs)
|
||||
resource "azurerm_linux_virtual_machine_scale_set" "besu_node" {
|
||||
count = var.use_scale_set ? 1 : 0
|
||||
name = "${var.cluster_name}-${var.node_type}-vmss"
|
||||
location = var.location
|
||||
resource_group_name = var.resource_group_name
|
||||
sku = var.vm_size
|
||||
instances = var.node_count
|
||||
admin_username = var.admin_username
|
||||
|
||||
admin_ssh_key {
|
||||
username = var.admin_username
|
||||
public_key = var.ssh_public_key
|
||||
}
|
||||
|
||||
source_image_reference {
|
||||
publisher = "Canonical"
|
||||
offer = "0001-com-ubuntu-server-jammy"
|
||||
sku = "22_04-lts-gen2"
|
||||
version = "latest"
|
||||
}
|
||||
|
||||
os_disk {
|
||||
storage_account_type = var.storage_account_type
|
||||
caching = "ReadWrite"
|
||||
disk_size_gb = var.disk_size_gb
|
||||
}
|
||||
|
||||
network_interface {
|
||||
name = "${var.cluster_name}-${var.node_type}-nic"
|
||||
primary = true
|
||||
|
||||
ip_configuration {
|
||||
name = "internal"
|
||||
primary = true
|
||||
subnet_id = var.subnet_id
|
||||
|
||||
# Only sentry and RPC nodes get public IPs; besu-node uses private IPs only
|
||||
# Match the logic used for individual VMs
|
||||
dynamic "public_ip_address" {
|
||||
for_each = (var.node_type == "sentry" || var.node_type == "rpc") ? [1] : []
|
||||
content {
|
||||
name = "${var.cluster_name}-${var.node_type}-public-ip"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "identity" {
|
||||
for_each = var.vm_enable_managed_identity ? [1] : []
|
||||
content {
|
||||
type = "SystemAssigned"
|
||||
}
|
||||
}
|
||||
|
||||
custom_data = base64encode(templatefile(
|
||||
var.use_phase1_cloud_init ? "${path.module}/cloud-init-phase1.yaml" : "${path.module}/cloud-init.yaml",
|
||||
{
|
||||
node_type = var.node_type
|
||||
node_index = 0
|
||||
cluster_name = var.cluster_name
|
||||
key_vault_id = var.key_vault_id
|
||||
genesis_file_path = var.genesis_file_path
|
||||
admin_username = var.admin_username
|
||||
}
|
||||
))
|
||||
|
||||
upgrade_mode = "Manual"
|
||||
|
||||
tags = merge(var.tags, {
|
||||
NodeType = var.node_type
|
||||
})
|
||||
}
|
||||
|
||||
# Outputs are defined in outputs.tf
|
||||
|
||||
45
terraform/modules/vm-deployment/outputs.tf
Normal file
45
terraform/modules/vm-deployment/outputs.tf
Normal file
@@ -0,0 +1,45 @@
|
||||
output "vm_ids" {
|
||||
value = var.use_scale_set ? azurerm_linux_virtual_machine_scale_set.besu_node[*].id : azurerm_linux_virtual_machine.besu_node[*].id
|
||||
description = "VM or VMSS IDs"
|
||||
}
|
||||
|
||||
output "vm_private_ips" {
|
||||
value = var.use_scale_set ? [] : azurerm_network_interface.besu_node[*].private_ip_address
|
||||
description = "Private IP addresses of VMs"
|
||||
}
|
||||
|
||||
output "vm_public_ips" {
|
||||
value = var.use_scale_set ? [] : (var.node_type == "sentry" || var.node_type == "rpc" ? azurerm_public_ip.besu_node[*].ip_address : [])
|
||||
description = "Public IP addresses of VMs (sentry and RPC types only; besu-node uses private IPs only)"
|
||||
}
|
||||
|
||||
output "private_ip_addresses" {
|
||||
value = var.use_scale_set ? [] : azurerm_network_interface.besu_node[*].private_ip_address
|
||||
description = "Private IP addresses of VMs (alias for vm_private_ips)"
|
||||
}
|
||||
|
||||
output "public_ip_addresses" {
|
||||
value = var.use_scale_set ? [] : (var.node_type == "sentry" || var.node_type == "rpc" ? azurerm_public_ip.besu_node[*].ip_address : [])
|
||||
description = "Public IP addresses of VMs (alias for vm_public_ips - sentry and RPC types only; besu-node uses private IPs only)"
|
||||
}
|
||||
|
||||
output "location" {
|
||||
value = var.location
|
||||
description = "Location of the VMs"
|
||||
}
|
||||
|
||||
output "vm_names" {
|
||||
value = var.use_scale_set ? ["${var.cluster_name}-${var.node_type}-vmss"] : azurerm_linux_virtual_machine.besu_node[*].name
|
||||
description = "VM or VMSS names"
|
||||
}
|
||||
|
||||
output "vmss_id" {
|
||||
value = var.use_scale_set ? azurerm_linux_virtual_machine_scale_set.besu_node[0].id : null
|
||||
description = "VM Scale Set ID (if using scale set)"
|
||||
}
|
||||
|
||||
output "principal_ids" {
|
||||
value = var.use_scale_set ? (var.vm_enable_managed_identity ? [azurerm_linux_virtual_machine_scale_set.besu_node[0].identity[0].principal_id] : []) : (var.vm_enable_managed_identity ? [for vm in azurerm_linux_virtual_machine.besu_node : vm.identity[0].principal_id] : [])
|
||||
description = "Managed Identity principal IDs for VMs (for Key Vault access policies)"
|
||||
}
|
||||
|
||||
112
terraform/modules/vm-deployment/variables.tf
Normal file
112
terraform/modules/vm-deployment/variables.tf
Normal file
@@ -0,0 +1,112 @@
|
||||
# Variables for VM Deployment Module
|
||||
|
||||
variable "resource_group_name" {
|
||||
description = "Name of the resource group"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "location" {
|
||||
description = "Azure region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the Besu network cluster"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "node_type" {
|
||||
description = "Type of node (validator, sentry, rpc)"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "node_count" {
|
||||
description = "Number of nodes"
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "vm_size" {
|
||||
description = "VM size"
|
||||
type = string
|
||||
default = "Standard_D4s_v3"
|
||||
}
|
||||
|
||||
variable "admin_username" {
|
||||
description = "Admin username for VMs"
|
||||
type = string
|
||||
default = "besuadmin"
|
||||
}
|
||||
|
||||
variable "ssh_public_key" {
|
||||
description = "SSH public key for VM access"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "use_scale_set" {
|
||||
description = "Use VM Scale Set instead of individual VMs"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "subnet_id" {
|
||||
description = "Subnet ID for VMs"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "storage_account_name" {
|
||||
description = "Storage account name for boot diagnostics"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "key_vault_id" {
|
||||
description = "Key Vault ID for secrets"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "genesis_file_path" {
|
||||
description = "Path to genesis file in storage"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "network_security_group_id" {
|
||||
description = "Network Security Group ID"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "Tags for resources"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "storage_account_type" {
|
||||
description = "Storage account type for VM disks"
|
||||
type = string
|
||||
default = "Premium_LRS"
|
||||
}
|
||||
|
||||
variable "disk_size_gb" {
|
||||
description = "Disk size in GB"
|
||||
type = number
|
||||
default = 256
|
||||
}
|
||||
|
||||
variable "vm_enable_boot_diagnostics" {
|
||||
description = "Enable boot diagnostics for VMs"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "vm_enable_managed_identity" {
|
||||
description = "Enable Managed Identity for VMs"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "use_phase1_cloud_init" {
|
||||
description = "Use Phase 1 cloud-init (includes NVM, Node 22 LTS, JDK 17)"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user