Apply Composer changes: comprehensive API updates, migrations, middleware, and infrastructure improvements

- Add comprehensive database migrations (001-024) for schema evolution
- Enhance API schema with expanded type definitions and resolvers
- Add new middleware: audit logging, rate limiting, MFA enforcement, security, tenant auth
- Implement new services: AI optimization, billing, blockchain, compliance, marketplace
- Add adapter layer for cloud integrations (Cloudflare, Kubernetes, Proxmox, storage)
- Update Crossplane provider with enhanced VM management capabilities
- Add comprehensive test suite for API endpoints and services
- Update frontend components with improved GraphQL subscriptions and real-time updates
- Enhance security configurations and headers (CSP, CORS, etc.)
- Update documentation and configuration files
- Add new CI/CD workflows and validation scripts
- Implement design system improvements and UI enhancements
This commit is contained in:
defiQUG
2025-12-12 18:01:35 -08:00
parent e01131efaf
commit 9daf1fd378
968 changed files with 160890 additions and 1092 deletions

View File

@@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: sankofa-api
namespace: argocd
spec:
project: default
source:
repoURL: https://github.com/sankofa/phoenix-sankofa-cloud
targetRevision: main
path: gitops/apps/api
destination:
server: https://kubernetes.default.svc
namespace: sankofa
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,14 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- manifests/deployment.yaml
- manifests/service.yaml
- manifests/configmap.yaml
namespace: sankofa
commonLabels:
app: sankofa-api
part-of: sankofa-phoenix

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: sankofa-api-config
namespace: sankofa
data:
PORT: "4000"
NODE_ENV: "production"
LOG_LEVEL: "info"

View File

@@ -0,0 +1,90 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sankofa-api
namespace: sankofa
labels:
app: sankofa-api
spec:
replicas: 3
selector:
matchLabels:
app: sankofa-api
template:
metadata:
labels:
app: sankofa-api
spec:
containers:
- name: api
image: sankofa-api:latest
ports:
- containerPort: 4000
name: http
env:
- name: DB_HOST
valueFrom:
secretKeyRef:
name: sankofa-db-credentials
key: host
- name: DB_PORT
value: "5432"
- name: DB_NAME
valueFrom:
secretKeyRef:
name: sankofa-db-credentials
key: database
- name: DB_USER
valueFrom:
secretKeyRef:
name: sankofa-db-credentials
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: sankofa-db-credentials
key: password
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: sankofa-api-secrets
key: jwt-secret
- name: NODE_ENV
value: "production"
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2000m
memory: 2Gi
livenessProbe:
httpGet:
path: /health
port: 4000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: 4000
initialDelaySeconds: 10
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: sankofa-api
namespace: sankofa
labels:
app: sankofa-api
spec:
type: ClusterIP
ports:
- port: 4000
targetPort: 4000
protocol: TCP
name: http
selector:
app: sankofa-api

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: sankofa-api
namespace: sankofa
spec:
type: ClusterIP
ports:
- port: 4000
targetPort: 4000
protocol: TCP
name: http
- port: 4001
targetPort: 4001
protocol: TCP
name: websocket
selector:
app: sankofa-api

View File

@@ -0,0 +1,27 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: sankofa-phoenix
namespace: argocd
spec:
project: default
source:
repoURL: ${GIT_REPO_URL:-https://github.com/YOUR_ORG/sankofa-phoenix}
targetRevision: ${GIT_BRANCH:-main}
path: gitops/apps
destination:
server: https://kubernetes.default.svc
namespace: sankofa
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m

View File

@@ -8,7 +8,7 @@ metadata:
spec:
project: default
source:
repoURL: https://github.com/yourorg/hybrid-cloud-gitops
repoURL: https://github.com/sankofa/Sankofa
targetRevision: main
path: gitops/apps
destination:

View File

@@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: sankofa-frontend
namespace: argocd
spec:
project: default
source:
repoURL: https://github.com/sankofa/phoenix-sankofa-cloud
targetRevision: main
path: gitops/apps/frontend
destination:
server: https://kubernetes.default.svc
namespace: sankofa
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: sankofa-frontend-config
namespace: sankofa
data:
NEXT_PUBLIC_GRAPHQL_ENDPOINT: "http://sankofa-api:4000/graphql"
NEXT_PUBLIC_APP_URL: "https://sankofa.nexus"
NODE_ENV: "production"

View File

@@ -0,0 +1,86 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sankofa-frontend
namespace: sankofa
labels:
app: sankofa-frontend
spec:
replicas: 2
selector:
matchLabels:
app: sankofa-frontend
template:
metadata:
labels:
app: sankofa-frontend
spec:
containers:
- name: frontend
image: sankofa-frontend:latest
ports:
- containerPort: 3000
name: http
env:
- name: NEXT_PUBLIC_GRAPHQL_ENDPOINT
valueFrom:
configMapKeyRef:
name: sankofa-frontend-config
key: graphql-endpoint
- name: NODE_ENV
value: "production"
resources:
requests:
cpu: 250m
memory: 256Mi
limits:
cpu: 1000m
memory: 1Gi
livenessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: sankofa-frontend
namespace: sankofa
labels:
app: sankofa-frontend
spec:
type: ClusterIP
ports:
- port: 3000
targetPort: 3000
protocol: TCP
name: http
selector:
app: sankofa-frontend
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: sankofa-frontend
namespace: sankofa
spec:
rules:
- host: sankofa.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: sankofa-frontend
port:
number: 3000

View File

@@ -0,0 +1,21 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: keycloak
namespace: argocd
spec:
project: default
source:
repoURL: https://github.com/sankofa/sankofa-phoenix-gitops
targetRevision: main
path: gitops/apps/keycloak
destination:
server: https://kubernetes.default.svc
namespace: keycloak
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -0,0 +1,132 @@
apiVersion: v1
kind: Secret
metadata:
name: keycloak-credentials
namespace: keycloak
type: Opaque
stringData:
username: admin
password: ${KEYCLOAK_ADMIN_PASSWORD:-change-me}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: keycloak
namespace: keycloak
spec:
replicas: 1
selector:
matchLabels:
app: keycloak
template:
metadata:
labels:
app: keycloak
spec:
containers:
- name: keycloak
image: quay.io/keycloak/keycloak:23.0
args:
- start
- --optimized
- --db=postgres
- --db-url-host=keycloak-postgres
- --db-url-port=5432
- --db-username=$(DB_USERNAME)
- --db-password=$(DB_PASSWORD)
- --db-url-database=keycloak
- --http-relative-path=/
- --proxy-headers=xforwarded
- --hostname-strict=false
- --hostname-strict-https=false
env:
- name: KEYCLOAK_ADMIN
valueFrom:
secretKeyRef:
name: keycloak-credentials
key: username
- name: KEYCLOAK_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: keycloak-credentials
key: password
- name: DB_USERNAME
valueFrom:
secretKeyRef:
name: keycloak-db-credentials
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: keycloak-db-credentials
key: password
- name: KEYCLOAK_MULTI_REALM
value: "true"
ports:
- containerPort: 8080
name: http
- containerPort: 8443
name: https
livenessProbe:
httpGet:
path: /health/live
port: 8080
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
httpGet:
path: /health/ready
port: 8080
initialDelaySeconds: 60
periodSeconds: 30
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
---
apiVersion: v1
kind: Service
metadata:
name: keycloak
namespace: keycloak
spec:
selector:
app: keycloak
ports:
- port: 8080
targetPort: 8080
name: http
- port: 8443
targetPort: 8443
name: https
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: keycloak
namespace: keycloak
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- keycloak.sankofa.nexus
secretName: keycloak-tls
rules:
- host: keycloak.sankofa.nexus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: keycloak
port:
number: 8080

View File

@@ -0,0 +1,87 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: keycloak-client-config
namespace: keycloak
data:
# Client configuration script for Keycloak
configure-clients.sh: |
#!/bin/bash
# Configure Keycloak clients via REST API
# This should be run after Keycloak is deployed
KEYCLOAK_URL="${KEYCLOAK_URL:-http://localhost:8080}"
ADMIN_USER="${KEYCLOAK_ADMIN:-admin}"
ADMIN_PASSWORD="${KEYCLOAK_ADMIN_PASSWORD:-admin}"
REALM="${REALM:-master}"
# Get admin token
TOKEN=$(curl -s -X POST "${KEYCLOAK_URL}/realms/${REALM}/protocol/openid-connect/token" \
-H "Content-Type: application/x-www-form-urlencoded" \
-d "username=${ADMIN_USER}" \
-d "password=${ADMIN_PASSWORD}" \
-d "grant_type=password" \
-d "client_id=admin-cli" | jq -r '.access_token')
if [ "$TOKEN" == "null" ] || [ -z "$TOKEN" ]; then
echo "Failed to get admin token"
exit 1
fi
# Create sankofa-api client (confidential)
curl -s -X POST "${KEYCLOAK_URL}/admin/realms/${REALM}/clients" \
-H "Authorization: Bearer ${TOKEN}" \
-H "Content-Type: application/json" \
-d '{
"clientId": "sankofa-api",
"name": "Sankofa API Client",
"description": "GraphQL API backend client",
"enabled": true,
"clientAuthenticatorType": "client-secret",
"secret": "'${SANKOFA_API_CLIENT_SECRET:-generate-me}'",
"standardFlowEnabled": false,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"serviceAccountsEnabled": true,
"publicClient": false,
"protocol": "openid-connect",
"attributes": {
"access.token.lifespan": "300",
"client.secret.creation.time": "'$(date +%s)'"
}
}'
# Create portal-client (confidential)
curl -s -X POST "${KEYCLOAK_URL}/admin/realms/${REALM}/clients" \
-H "Authorization: Bearer ${TOKEN}" \
-H "Content-Type: application/json" \
-d '{
"clientId": "portal-client",
"name": "Sankofa Portal Client",
"description": "Portal frontend client",
"enabled": true,
"clientAuthenticatorType": "client-secret",
"secret": "'${PORTAL_CLIENT_SECRET:-generate-me}'",
"standardFlowEnabled": true,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"serviceAccountsEnabled": false,
"publicClient": false,
"protocol": "openid-connect",
"redirectUris": [
"http://localhost:3000/*",
"https://portal.sankofa.nexus/*",
"https://*.sankofa.nexus/*"
],
"webOrigins": [
"http://localhost:3000",
"https://portal.sankofa.nexus",
"https://*.sankofa.nexus"
],
"attributes": {
"access.token.lifespan": "1800"
}
}'
echo "Keycloak clients configured successfully"

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Namespace
metadata:
name: keycloak
labels:
app: keycloak
component: identity

View File

@@ -0,0 +1,90 @@
apiVersion: v1
kind: Secret
metadata:
name: keycloak-db-credentials
namespace: keycloak
type: Opaque
stringData:
username: keycloak
password: ${KEYCLOAK_DB_PASSWORD:-change-me}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: keycloak-db-config
namespace: keycloak
data:
POSTGRES_DB: keycloak
POSTGRES_USER: keycloak
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: keycloak-postgres
namespace: keycloak
spec:
serviceName: keycloak-postgres
replicas: 1
selector:
matchLabels:
app: keycloak-postgres
template:
metadata:
labels:
app: keycloak-postgres
spec:
containers:
- name: postgres
image: postgres:15-alpine
env:
- name: POSTGRES_DB
valueFrom:
configMapKeyRef:
name: keycloak-db-config
key: POSTGRES_DB
- name: POSTGRES_USER
valueFrom:
configMapKeyRef:
name: keycloak-db-config
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: keycloak-db-credentials
key: password
ports:
- containerPort: 5432
name: postgres
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: keycloak-postgres
namespace: keycloak
spec:
selector:
app: keycloak-postgres
ports:
- port: 5432
targetPort: 5432
name: postgres
clusterIP: None

View File

@@ -0,0 +1,207 @@
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: sankofa-alerts
namespace: monitoring
labels:
app: sankofa
prometheus: kube-prometheus
role: alert-rules
spec:
groups:
- name: api
interval: 30s
rules:
# API High Error Rate
- alert: APIHighErrorRate
expr: |
sum(rate(http_requests_total{job="api",status=~"5.."}[5m]))
/
sum(rate(http_requests_total{job="api"}[5m])) > 0.05
for: 5m
labels:
severity: critical
annotations:
summary: "API error rate is above 5%"
description: "API error rate is {{ $value | humanizePercentage }} for the last 5 minutes"
# API High Latency
- alert: APIHighLatency
expr: |
histogram_quantile(0.95,
sum(rate(http_request_duration_seconds_bucket{job="api"}[5m])) by (le)
) > 0.5
for: 5m
labels:
severity: warning
annotations:
summary: "API p95 latency is above 500ms"
description: "API p95 latency is {{ $value }}s"
# API Down
- alert: APIDown
expr: up{job="api"} == 0
for: 1m
labels:
severity: critical
annotations:
summary: "API is down"
description: "API service has been down for more than 1 minute"
- name: portal
interval: 30s
rules:
# Portal High Error Rate
- alert: PortalHighErrorRate
expr: |
sum(rate(http_requests_total{job="portal",status=~"5.."}[5m]))
/
sum(rate(http_requests_total{job="portal"}[5m])) > 0.05
for: 5m
labels:
severity: critical
annotations:
summary: "Portal error rate is above 5%"
description: "Portal error rate is {{ $value | humanizePercentage }}"
# Portal Down
- alert: PortalDown
expr: up{job="portal"} == 0
for: 1m
labels:
severity: critical
annotations:
summary: "Portal is down"
description: "Portal service has been down for more than 1 minute"
- name: database
interval: 30s
rules:
# Database High Connection Count
- alert: DatabaseHighConnections
expr: |
pg_stat_database_numbackends{datname="sankofa"} > 80
for: 5m
labels:
severity: warning
annotations:
summary: "Database connection count is high"
description: "Database has {{ $value }} active connections"
# Database Slow Queries
- alert: DatabaseSlowQueries
expr: |
pg_stat_activity_count{state="active"} > 10
for: 5m
labels:
severity: warning
annotations:
summary: "Database has slow queries"
description: "Database has {{ $value }} active queries running for more than 5 minutes"
# Database Down
- alert: DatabaseDown
expr: pg_up == 0
for: 1m
labels:
severity: critical
annotations:
summary: "Database is down"
description: "PostgreSQL database is not responding"
- name: keycloak
interval: 30s
rules:
# Keycloak Down
- alert: KeycloakDown
expr: up{job="keycloak"} == 0
for: 1m
labels:
severity: critical
annotations:
summary: "Keycloak is down"
description: "Keycloak authentication service is down"
# Keycloak High Authentication Failures
- alert: KeycloakHighAuthFailures
expr: |
sum(rate(keycloak_login_failures_total[5m])) > 10
for: 5m
labels:
severity: warning
annotations:
summary: "High authentication failure rate"
description: "Keycloak has {{ $value }} authentication failures per second"
- name: infrastructure
interval: 30s
rules:
# High CPU Usage
- alert: HighCPUUsage
expr: |
(1 - avg(rate(container_cpu_usage_seconds_total{container!="POD"}[5m]))) < 0.1
for: 10m
labels:
severity: warning
annotations:
summary: "High CPU usage"
description: "CPU usage is above 90% for 10 minutes"
# High Memory Usage
- alert: HighMemoryUsage
expr: |
(1 - (container_memory_working_set_bytes{container!="POD"} / container_spec_memory_limit_bytes)) < 0.1
for: 10m
labels:
severity: warning
annotations:
summary: "High memory usage"
description: "Memory usage is above 90% for 10 minutes"
# Pod CrashLooping
- alert: PodCrashLooping
expr: |
rate(kube_pod_container_status_restarts_total[15m]) > 0
for: 5m
labels:
severity: warning
annotations:
summary: "Pod is crash looping"
description: "Pod {{ $labels.pod }} in namespace {{ $labels.namespace }} is crash looping"
# Disk Space Low
- alert: DiskSpaceLow
expr: |
(node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"}) < 0.1
for: 5m
labels:
severity: warning
annotations:
summary: "Disk space is low"
description: "Disk space is below 10% on {{ $labels.instance }}"
- name: backups
interval: 1h
rules:
# Backup Failed
- alert: BackupFailed
expr: |
time() - backup_last_success_timestamp > 86400
for: 1h
labels:
severity: critical
annotations:
summary: "Backup has not run in 24 hours"
description: "Last successful backup was more than 24 hours ago"
# Backup Too Old
- alert: BackupTooOld
expr: |
time() - backup_last_success_timestamp > 172800
for: 1h
labels:
severity: critical
annotations:
summary: "Backup is more than 48 hours old"
description: "Last successful backup was {{ $value }} seconds ago"

View File

@@ -40,7 +40,7 @@ spec:
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
hosts:
- grafana.yourdomain.com
- grafana.sankofa.nexus
persistence:
enabled: true
size: 10Gi

View File

@@ -0,0 +1,81 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: postgres-backup
namespace: api
spec:
schedule: "0 2 * * *" # Daily at 2 AM
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
template:
spec:
containers:
- name: postgres-backup
image: postgres:14-alpine
command:
- /bin/bash
- -c
- |
set -e
BACKUP_DIR="/backups/postgres"
DB_NAME="${DB_NAME:-sankofa}"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="${BACKUP_DIR}/${DB_NAME}_${TIMESTAMP}.sql"
mkdir -p "$BACKUP_DIR"
echo "Starting backup..."
pg_dump -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" \
-F p -f "$BACKUP_FILE"
echo "Compressing backup..."
gzip "$BACKUP_FILE"
echo "Cleaning up backups older than 7 days..."
find "$BACKUP_DIR" -name "${DB_NAME}_*.sql.gz" -type f -mtime +7 -delete
echo "Backup completed: ${BACKUP_FILE}.gz"
env:
- name: DB_HOST
valueFrom:
secretKeyRef:
name: db-credentials
key: host
- name: DB_PORT
value: "5432"
- name: DB_USER
valueFrom:
secretKeyRef:
name: db-credentials
key: username
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: db-credentials
key: password
- name: DB_NAME
value: "sankofa"
volumeMounts:
- name: backup-storage
mountPath: /backups
restartPolicy: OnFailure
volumes:
- name: backup-storage
persistentVolumeClaim:
claimName: postgres-backup-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-backup-pvc
namespace: api
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
storageClassName: standard

View File

@@ -0,0 +1,35 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-dashboards
namespace: monitoring
labels:
grafana_dashboard: "1"
data:
sankofa-overview.json: |
{
"dashboard": {
"title": "Sankofa Phoenix Overview",
"panels": [
{
"title": "API Request Rate",
"targets": [
{
"expr": "rate(http_requests_total[5m])",
"legendFormat": "{{method}} {{status}}"
}
]
},
{
"title": "Database Connections",
"targets": [
{
"expr": "pg_stat_database_numbackends",
"legendFormat": "{{datname}}"
}
]
}
]
}
}

View File

@@ -0,0 +1,50 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: monitoring
data:
prometheus.yml: |
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'sankofa-api'
kubernetes_sd_configs:
- role: pod
namespaces:
names:
- sankofa
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app]
action: keep
regex: sankofa-api
- source_labels: [__meta_kubernetes_pod_ip]
action: replace
target_label: __address__
replacement: $1:4000
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- job_name: 'kubernetes-pods'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
target_label: __address__

View File

@@ -8,7 +8,7 @@ metadata:
spec:
project: default
source:
repoURL: https://github.com/yourorg/hybrid-cloud-gitops
repoURL: https://github.com/sankofa/Sankofa
targetRevision: main
path: gitops/apps/portal/manifests
destination:

View File

@@ -79,10 +79,10 @@ metadata:
name: portal-config
namespace: portal
data:
keycloak-url: "https://keycloak.yourdomain.com"
keycloak-url: "https://keycloak.sankofa.nexus"
crossplane-api-url: "https://crossplane-api.crossplane-system.svc.cluster.local"
argocd-url: "https://argocd.yourdomain.com"
grafana-url: "https://grafana.yourdomain.com"
argocd-url: "https://argocd.sankofa.nexus"
grafana-url: "https://grafana.sankofa.nexus"
loki-url: "https://loki.monitoring.svc.cluster.local:3100"
---
apiVersion: networking.k8s.io/v1
@@ -97,10 +97,10 @@ spec:
ingressClassName: nginx
tls:
- hosts:
- portal.yourdomain.com
- portal.sankofa.nexus
secretName: portal-tls
rules:
- host: portal.yourdomain.com
- host: portal.sankofa.nexus
http:
paths:
- path: /

View File

@@ -14,7 +14,7 @@ spec:
helm:
releaseName: rancher
values: |
hostname: rancher.yourdomain.com
hostname: rancher.sankofa.nexus
replicas: 3
ingress:
enabled: true

View File

@@ -32,7 +32,7 @@ spec:
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
hosts:
- host: vault.yourdomain.com
- host: vault.sankofa.nexus
paths:
- /
ui:

View File

@@ -0,0 +1,95 @@
# Tenant Namespaces Template
# More granular than Azure with custom resource limits per tenant tier
# NOTE: This is a template file. Placeholders (TENANT_ID_PLACEHOLDER, TENANT_TIER_PLACEHOLDER)
# should be replaced by automation when creating actual tenant namespaces.
# Use the tenant namespace automation script or controller to process this template.
apiVersion: v1
kind: Namespace
metadata:
name: tenant-template
labels:
name: tenant-template
tenant-id: "TENANT_ID_PLACEHOLDER" # Replace with actual tenant ID
tenant-tier: "TENANT_TIER_PLACEHOLDER" # Replace with actual tier (FREE, STANDARD, ENTERPRISE, SOVEREIGN)
managed-by: sankofa-phoenix
---
# Resource Quota - More granular than Azure
apiVersion: v1
kind: ResourceQuota
metadata:
name: tenant-quota
namespace: tenant-template
spec:
hard:
requests.cpu: "100" # Adjust based on tenant tier
requests.memory: 512Gi
limits.cpu: "200"
limits.memory: 1Ti
persistentvolumeclaims: "50"
services.loadbalancers: "10"
services.nodeports: "20"
count/deployments.apps: "100"
count/statefulsets.apps: "50"
---
# Limit Range - Per-container limits
apiVersion: v1
kind: LimitRange
metadata:
name: tenant-limits
namespace: tenant-template
spec:
limits:
- default:
cpu: "2"
memory: 4Gi
defaultRequest:
cpu: "500m"
memory: 1Gi
type: Container
- max:
cpu: "8"
memory: 16Gi
min:
cpu: "100m"
memory: 128Mi
type: Container
---
# Network Policy - Tenant isolation
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: tenant-isolation
namespace: tenant-template
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
tenant-id: "TENANT_ID_PLACEHOLDER"
- namespaceSelector:
matchLabels:
name: system
egress:
- to:
- namespaceSelector:
matchLabels:
tenant-id: "TENANT_ID_PLACEHOLDER"
- namespaceSelector:
matchLabels:
name: system
- namespaceSelector:
matchLabels:
name: monitoring
egress:
- to: [] # Allow DNS and external
ports:
- protocol: UDP
port: 53
- protocol: TCP
port: 443

View File

@@ -0,0 +1,97 @@
# Tenant RBAC - More granular than Azure RBAC
# Fine-grained permissions beyond Azure's role-based access
# Tenant Owner Role - Full control over tenant
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tenant-owner
namespace: tenant-template
rules:
- apiGroups: [""]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["apps"]
resources: ["*"]
verbs: ["*"]
- apiGroups: ["networking.k8s.io"]
resources: ["*"]
verbs: ["*"]
---
# Tenant Admin Role - Administrative access
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tenant-admin
namespace: tenant-template
rules:
- apiGroups: [""]
resources: ["pods", "services", "configmaps", "secrets", "persistentvolumeclaims"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "daemonsets"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["networking.k8s.io"]
resources: ["networkpolicies", "ingresses"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
# Tenant User Role - Standard access
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tenant-user
namespace: tenant-template
rules:
- apiGroups: [""]
resources: ["pods", "services", "configmaps"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
# Tenant Viewer Role - Read-only access
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tenant-viewer
namespace: tenant-template
rules:
- apiGroups: [""]
resources: ["*"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["*"]
verbs: ["get", "list", "watch"]
---
# Tenant Billing Admin Role - Billing management
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tenant-billing-admin
namespace: tenant-template
rules:
- apiGroups: [""]
resources: ["pods", "services"]
verbs: ["get", "list", "watch"] # Read-only for billing calculations
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets"]
verbs: ["get", "list", "watch"]
---
# RoleBinding Template
# This would be created per tenant user
# NOTE: This is a template file. USER_EMAIL_PLACEHOLDER should be replaced with actual user email
# when creating RoleBindings. Use the tenant RBAC automation script or controller to process this template.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tenant-user-binding-template
namespace: tenant-template
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: tenant-user # Change based on user role (tenant-owner, tenant-admin, tenant-user, tenant-viewer, tenant-billing-admin)
subjects:
- kind: User
name: "USER_EMAIL_PLACEHOLDER" # Replace with actual user email (e.g., user@example.com)
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,11 +1,11 @@
apiVersion: proxmox.yourorg.io/v1alpha1
apiVersion: proxmox.sankofa.nexus/v1alpha1
kind: ProxmoxVM
metadata:
name: web-server-01
namespace: default
spec:
forProvider:
node: pve1
node: ML110-01
name: web-server-01
cpu: 4
memory: 8Gi
@@ -13,14 +13,139 @@ spec:
storage: local-lvm
network: vmbr0
image: ubuntu-22.04-cloud
site: us-east-1
site: us-sfvalley
userData: |
#cloud-config
# Package management
package_update: true
package_upgrade: true
# Required packages
packages:
- qemu-guest-agent
- curl
- wget
- net-tools
- chrony
- unattended-upgrades
# Time synchronization (NTP)
ntp:
enabled: true
ntp_client: chrony
servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
# User configuration
users:
- name: admin
ssh-authorized-keys:
- ssh-rsa AAAAB3NzaC1yc2E...
sshKeys:
- ssh-rsa AAAAB3NzaC1yc2E...
groups: sudo
shell: /bin/bash
sudo: ALL=(ALL) NOPASSWD:ALL
lock_passwd: false
# Note: Add SSH keys via userData or use Proxmox API to inject keys
# ssh_authorized_keys:
# - ssh-rsa YOUR_PUBLIC_SSH_KEY_HERE
# Boot commands - executed in order
runcmd:
# Verify packages are installed
- |
echo "=========================================="
echo "Verifying required packages are installed..."
echo "=========================================="
for pkg in qemu-guest-agent curl wget net-tools chrony unattended-upgrades; do
if ! dpkg -l | grep -q "^ii.*$pkg"; then
echo "ERROR: Package $pkg is not installed"
exit 1
fi
echo "✅ Package $pkg is installed"
done
echo "All required packages verified"
# Verify qemu-guest-agent package details
- |
echo "=========================================="
echo "Checking qemu-guest-agent package details..."
echo "=========================================="
if dpkg -l | grep -q "^ii.*qemu-guest-agent"; then
echo "✅ qemu-guest-agent package IS installed"
dpkg -l | grep qemu-guest-agent
else
echo "❌ qemu-guest-agent package is NOT installed"
echo "Attempting to install..."
apt-get update
apt-get install -y qemu-guest-agent
fi
# Enable and start QEMU Guest Agent
- |
echo "=========================================="
echo "Enabling and starting QEMU Guest Agent..."
echo "=========================================="
systemctl enable qemu-guest-agent
systemctl start qemu-guest-agent
echo "QEMU Guest Agent enabled and started"
# Verify guest agent service is running
- |
echo "=========================================="
echo "Verifying QEMU Guest Agent service status..."
echo "=========================================="
for i in {1..30}; do
if systemctl is-active --quiet qemu-guest-agent; then
echo "✅ QEMU Guest Agent service IS running"
systemctl status qemu-guest-agent --no-pager -l
exit 0
fi
echo "Waiting for QEMU Guest Agent to start... ($i/30)"
sleep 1
done
echo "⚠️ WARNING: QEMU Guest Agent may not have started properly"
systemctl status qemu-guest-agent --no-pager -l || true
echo "Attempting to restart..."
systemctl restart qemu-guest-agent
sleep 3
if systemctl is-active --quiet qemu-guest-agent; then
echo "✅ QEMU Guest Agent started after restart"
else
echo "❌ QEMU Guest Agent failed to start"
fi
# Configure NTP (Chrony)
- |
echo "Configuring NTP (Chrony)..."
systemctl enable chrony
systemctl restart chrony
sleep 3
if systemctl is-active --quiet chrony; then
echo "NTP (Chrony) is running"
chronyc tracking | head -1 || true
else
echo "WARNING: NTP (Chrony) may not be running"
fi
# Final message
final_message: |
==========================================
System Boot Completed Successfully!
==========================================
Services Status:
- QEMU Guest Agent: $(systemctl is-active qemu-guest-agent)
- NTP (Chrony): $(systemctl is-active chrony)
System Information:
- Hostname: $(hostname)
- IP Address: $(hostname -I | awk '{print $1}')
- Time: $(date)
Packages Installed:
- qemu-guest-agent, curl, wget, net-tools
- chrony (NTP), unattended-upgrades (Security)
==========================================
providerConfigRef:
name: proxmox-provider-config

View File

@@ -9,36 +9,43 @@ spec:
name: vm-connection-secret
namespace: crossplane-system
compositeTypeRef:
apiVersion: proxmox.yourorg.io/v1alpha1
kind: ProxmoxVM
apiVersion: proxmox.sankofa.nexus/v1alpha1
kind: VirtualMachine
resources:
- name: proxmox-vm
base:
apiVersion: proxmox.yourorg.io/v1alpha1
apiVersion: proxmox.sankofa.nexus/v1alpha1
kind: ProxmoxVM
metadata:
labels:
tenant-id: "TENANT_ID_PLACEHOLDER"
managed-by: sankofa-phoenix
spec:
forProvider:
node: pve1
node: ML110-01
cpu: 2
memory: 4Gi
disk: 50Gi
storage: local-lvm
network: vmbr0
image: ubuntu-22.04-cloud
site: us-east-1
site: us-sfvalley
patches:
- type: FromCompositeFieldPath
fromFieldPath: spec.forProvider.name
fromFieldPath: spec.parameters.name
toFieldPath: spec.forProvider.name
- type: FromCompositeFieldPath
fromFieldPath: spec.forProvider.cpu
fromFieldPath: spec.parameters.cpu
toFieldPath: spec.forProvider.cpu
- type: FromCompositeFieldPath
fromFieldPath: spec.forProvider.memory
fromFieldPath: spec.parameters.memory
toFieldPath: spec.forProvider.memory
- type: FromCompositeFieldPath
fromFieldPath: spec.forProvider.disk
fromFieldPath: spec.parameters.disk
toFieldPath: spec.forProvider.disk
- type: FromCompositeFieldPath
fromFieldPath: spec.forProvider.site
fromFieldPath: spec.parameters.site
toFieldPath: spec.forProvider.site
- type: FromCompositeFieldPath
fromFieldPath: metadata.labels['tenant-id']
toFieldPath: metadata.labels['tenant-id']

View File

@@ -1,9 +1,9 @@
apiVersion: apiextensions.crossplane.io/v1
kind: CompositeResourceDefinition
metadata:
name: virtualmachines.proxmox.yourorg.io
name: virtualmachines.proxmox.sankofa.nexus
spec:
group: proxmox.yourorg.io
group: proxmox.sankofa.nexus
names:
kind: VirtualMachine
plural: virtualmachines

View File

@@ -1,11 +1,11 @@
apiVersion: proxmox.yourorg.io/v1alpha1
apiVersion: proxmox.sankofa.nexus/v1alpha1
kind: VirtualMachineClaim
metadata:
name: "{{ .name }}"
namespace: "{{ .namespace | default "default" }}"
spec:
compositionRef:
name: virtualmachine.ubuntu.proxmox.yourorg.io
name: virtualmachine.debian.proxmox.sankofa.nexus
parameters:
name: "{{ .name }}"
node: "{{ .node }}"

View File

@@ -1,11 +1,11 @@
apiVersion: proxmox.yourorg.io/v1alpha1
apiVersion: proxmox.sankofa.nexus/v1alpha1
kind: VirtualMachineClaim
metadata:
name: "{{ .name }}"
namespace: "{{ .namespace | default "default" }}"
spec:
compositionRef:
name: virtualmachine.ubuntu.proxmox.yourorg.io
name: virtualmachine.ubuntu.proxmox.sankofa.nexus
parameters:
name: "{{ .name }}"
node: "{{ .node }}"

View File

@@ -1,11 +1,11 @@
apiVersion: proxmox.yourorg.io/v1alpha1
apiVersion: proxmox.sankofa.nexus/v1alpha1
kind: VirtualMachineClaim
metadata:
name: "{{ .name }}"
namespace: "{{ .namespace | default "default" }}"
spec:
compositionRef:
name: virtualmachine.ubuntu.proxmox.yourorg.io
name: virtualmachine.ubuntu.proxmox.sankofa.nexus
parameters:
name: "{{ .name }}"
node: "{{ .node }}"