Files
Sankofa/api/src/db/migrations/014_audit_logging.ts
defiQUG 9daf1fd378 Apply Composer changes: comprehensive API updates, migrations, middleware, and infrastructure improvements
- Add comprehensive database migrations (001-024) for schema evolution
- Enhance API schema with expanded type definitions and resolvers
- Add new middleware: audit logging, rate limiting, MFA enforcement, security, tenant auth
- Implement new services: AI optimization, billing, blockchain, compliance, marketplace
- Add adapter layer for cloud integrations (Cloudflare, Kubernetes, Proxmox, storage)
- Update Crossplane provider with enhanced VM management capabilities
- Add comprehensive test suite for API endpoints and services
- Update frontend components with improved GraphQL subscriptions and real-time updates
- Enhance security configurations and headers (CSP, CORS, etc.)
- Update documentation and configuration files
- Add new CI/CD workflows and validation scripts
- Implement design system improvements and UI enhancements
2025-12-12 18:01:35 -08:00

90 lines
3.2 KiB
TypeScript

import { Migration } from '../migrate.js'
export const up: Migration['up'] = async (db) => {
// Audit logs table - per NIST SP 800-53 AU-2, AU-3
await db.query(`
CREATE TABLE IF NOT EXISTS audit_logs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
event_type VARCHAR(100) NOT NULL,
result VARCHAR(50) NOT NULL CHECK (result IN ('SUCCESS', 'FAILURE', 'DENIED', 'ERROR')),
user_id UUID REFERENCES users(id) ON DELETE SET NULL,
user_name VARCHAR(255),
user_role VARCHAR(50),
tenant_id UUID REFERENCES tenants(id) ON DELETE SET NULL,
ip_address INET,
user_agent TEXT,
resource_type VARCHAR(100),
resource_id UUID,
action VARCHAR(255) NOT NULL,
details JSONB DEFAULT '{}'::jsonb,
classification_level VARCHAR(50) DEFAULT 'UNCLASSIFIED'
CHECK (classification_level IN ('UNCLASSIFIED', 'CUI', 'CONFIDENTIAL', 'SECRET', 'TOP_SECRET')),
timestamp TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
signature VARCHAR(255) NOT NULL, -- Cryptographic signature for tamper-proofing
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
)
`)
// Indexes for audit log queries
await db.query(`
CREATE INDEX IF NOT EXISTS idx_audit_logs_event_type ON audit_logs(event_type)
`)
await db.query(`
CREATE INDEX IF NOT EXISTS idx_audit_logs_user_id ON audit_logs(user_id)
`)
await db.query(`
CREATE INDEX IF NOT EXISTS idx_audit_logs_tenant_id ON audit_logs(tenant_id)
`)
await db.query(`
CREATE INDEX IF NOT EXISTS idx_audit_logs_timestamp ON audit_logs(timestamp DESC)
`)
await db.query(`
CREATE INDEX IF NOT EXISTS idx_audit_logs_resource ON audit_logs(resource_type, resource_id)
`)
await db.query(`
CREATE INDEX IF NOT EXISTS idx_audit_logs_classification ON audit_logs(classification_level)
`)
await db.query(`
CREATE INDEX IF NOT EXISTS idx_audit_logs_action ON audit_logs(action)
`)
// Composite index for common queries
await db.query(`
CREATE INDEX IF NOT EXISTS idx_audit_logs_user_time ON audit_logs(user_id, timestamp DESC)
`)
await db.query(`
CREATE INDEX IF NOT EXISTS idx_audit_logs_tenant_time ON audit_logs(tenant_id, timestamp DESC)
`)
// Partition table by year for better performance with large datasets
// Note: PostgreSQL 10+ supports native partitioning
// This is a simplified approach - in production, consider using table partitioning
// Create function to automatically archive old audit logs (7+ year retention per DoD requirements)
await db.query(`
CREATE OR REPLACE FUNCTION archive_audit_logs()
RETURNS void AS $$
BEGIN
-- Archive logs older than 7 years to separate archive table
-- This function should be called periodically (e.g., monthly)
-- For now, we'll just log a warning - actual archiving should be implemented
-- based on specific storage requirements
RAISE NOTICE 'Audit log archiving not yet implemented - logs retained for 7+ years per DoD requirements';
END;
$$ LANGUAGE plpgsql;
`)
}
export const down: Migration['down'] = async (db) => {
await db.query('DROP FUNCTION IF EXISTS archive_audit_logs CASCADE')
await db.query('DROP TABLE IF EXISTS audit_logs CASCADE')
}