Add full monorepo: virtual-banker, backend, frontend, docs, scripts, deployment
Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
102
backend/database/config/database.go
Normal file
102
backend/database/config/database.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// DatabaseConfig holds database configuration
|
||||
type DatabaseConfig struct {
|
||||
Host string
|
||||
Port int
|
||||
User string
|
||||
Password string
|
||||
Database string
|
||||
SSLMode string
|
||||
MaxConnections int
|
||||
MaxIdleTime time.Duration
|
||||
ConnMaxLifetime time.Duration
|
||||
}
|
||||
|
||||
// LoadDatabaseConfig loads database configuration from environment variables
|
||||
func LoadDatabaseConfig() *DatabaseConfig {
|
||||
maxConns, _ := strconv.Atoi(getEnv("DB_MAX_CONNECTIONS", "25"))
|
||||
maxIdle, _ := time.ParseDuration(getEnv("DB_MAX_IDLE_TIME", "5m"))
|
||||
maxLifetime, _ := time.ParseDuration(getEnv("DB_CONN_MAX_LIFETIME", "1h"))
|
||||
|
||||
return &DatabaseConfig{
|
||||
Host: getEnv("DB_HOST", "localhost"),
|
||||
Port: getIntEnv("DB_PORT", 5432),
|
||||
User: getEnv("DB_USER", "explorer"),
|
||||
Password: getEnv("DB_PASSWORD", ""),
|
||||
Database: getEnv("DB_NAME", "explorer"),
|
||||
SSLMode: getEnv("DB_SSLMODE", "disable"),
|
||||
MaxConnections: maxConns,
|
||||
MaxIdleTime: maxIdle,
|
||||
ConnMaxLifetime: maxLifetime,
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionString returns PostgreSQL connection string
|
||||
func (c *DatabaseConfig) ConnectionString() string {
|
||||
return fmt.Sprintf(
|
||||
"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s",
|
||||
c.Host, c.Port, c.User, c.Password, c.Database, c.SSLMode,
|
||||
)
|
||||
}
|
||||
|
||||
// PoolConfig returns pgxpool configuration
|
||||
func (c *DatabaseConfig) PoolConfig() (*pgxpool.Config, error) {
|
||||
config, err := pgxpool.ParseConfig(c.ConnectionString())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.MaxConns = int32(c.MaxConnections)
|
||||
config.MaxConnIdleTime = c.MaxIdleTime
|
||||
config.MaxConnLifetime = c.ConnMaxLifetime
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// ReadReplicaConfig holds read replica configuration
|
||||
type ReadReplicaConfig struct {
|
||||
Host string
|
||||
Port int
|
||||
User string
|
||||
Password string
|
||||
Database string
|
||||
SSLMode string
|
||||
}
|
||||
|
||||
// LoadReadReplicaConfig loads read replica configuration
|
||||
func LoadReadReplicaConfig() *ReadReplicaConfig {
|
||||
return &ReadReplicaConfig{
|
||||
Host: getEnv("DB_REPLICA_HOST", ""),
|
||||
Port: getIntEnv("DB_REPLICA_PORT", 5432),
|
||||
User: getEnv("DB_REPLICA_USER", ""),
|
||||
Password: getEnv("DB_REPLICA_PASSWORD", ""),
|
||||
Database: getEnv("DB_REPLICA_NAME", ""),
|
||||
SSLMode: getEnv("DB_REPLICA_SSLMODE", "disable"),
|
||||
}
|
||||
}
|
||||
|
||||
func getEnv(key, defaultValue string) string {
|
||||
if value := os.Getenv(key); value != "" {
|
||||
return value
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func getIntEnv(key string, defaultValue int) int {
|
||||
if value := os.Getenv(key); value != "" {
|
||||
if intValue, err := strconv.Atoi(value); err == nil {
|
||||
return intValue
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
16
backend/database/migrations/0001_initial_schema.down.sql
Normal file
16
backend/database/migrations/0001_initial_schema.down.sql
Normal file
@@ -0,0 +1,16 @@
|
||||
-- Rollback initial schema
|
||||
|
||||
DROP TABLE IF EXISTS address_labels CASCADE;
|
||||
DROP TABLE IF EXISTS watchlists CASCADE;
|
||||
DROP TABLE IF EXISTS api_keys CASCADE;
|
||||
DROP TABLE IF EXISTS users CASCADE;
|
||||
DROP TABLE IF EXISTS contracts CASCADE;
|
||||
DROP TABLE IF EXISTS token_transfers CASCADE;
|
||||
DROP TABLE IF EXISTS tokens CASCADE;
|
||||
DROP TABLE IF EXISTS logs CASCADE;
|
||||
DROP TABLE IF EXISTS transactions CASCADE;
|
||||
DROP TABLE IF EXISTS blocks CASCADE;
|
||||
|
||||
DROP EXTENSION IF EXISTS timescaledb;
|
||||
DROP EXTENSION IF EXISTS "uuid-ossp";
|
||||
|
||||
283
backend/database/migrations/0001_initial_schema.up.sql
Normal file
283
backend/database/migrations/0001_initial_schema.up.sql
Normal file
@@ -0,0 +1,283 @@
|
||||
-- Initial schema for ChainID 138 Explorer
|
||||
-- Supports multi-chain via chain_id partitioning
|
||||
|
||||
-- Enable UUID extension
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
-- Enable TimescaleDB extension (for time-series data)
|
||||
CREATE EXTENSION IF NOT EXISTS timescaledb;
|
||||
|
||||
-- Blocks table
|
||||
CREATE TABLE blocks (
|
||||
id BIGSERIAL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
number BIGINT NOT NULL,
|
||||
hash VARCHAR(66) NOT NULL,
|
||||
parent_hash VARCHAR(66) NOT NULL,
|
||||
nonce VARCHAR(18),
|
||||
sha3_uncles VARCHAR(66),
|
||||
logs_bloom TEXT,
|
||||
transactions_root VARCHAR(66),
|
||||
state_root VARCHAR(66),
|
||||
receipts_root VARCHAR(66),
|
||||
miner VARCHAR(42),
|
||||
difficulty NUMERIC,
|
||||
total_difficulty NUMERIC,
|
||||
size BIGINT,
|
||||
extra_data TEXT,
|
||||
gas_limit BIGINT,
|
||||
gas_used BIGINT,
|
||||
timestamp TIMESTAMP NOT NULL,
|
||||
transaction_count INTEGER DEFAULT 0,
|
||||
base_fee_per_gas BIGINT,
|
||||
orphaned BOOLEAN DEFAULT false,
|
||||
orphaned_at TIMESTAMP,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (chain_id, number),
|
||||
UNIQUE (chain_id, hash)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partition for ChainID 138
|
||||
CREATE TABLE blocks_chain_138 PARTITION OF blocks FOR VALUES IN (138);
|
||||
|
||||
-- Indexes for blocks
|
||||
CREATE INDEX idx_blocks_chain_number ON blocks(chain_id, number);
|
||||
CREATE INDEX idx_blocks_chain_hash ON blocks(chain_id, hash);
|
||||
CREATE INDEX idx_blocks_chain_timestamp ON blocks(chain_id, timestamp);
|
||||
|
||||
-- Transactions table
|
||||
CREATE TABLE transactions (
|
||||
id BIGSERIAL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
hash VARCHAR(66) NOT NULL,
|
||||
block_number BIGINT NOT NULL,
|
||||
block_hash VARCHAR(66) NOT NULL,
|
||||
transaction_index INTEGER NOT NULL,
|
||||
from_address VARCHAR(42) NOT NULL,
|
||||
to_address VARCHAR(42),
|
||||
value NUMERIC(78, 0) NOT NULL DEFAULT 0,
|
||||
gas_price BIGINT,
|
||||
max_fee_per_gas BIGINT,
|
||||
max_priority_fee_per_gas BIGINT,
|
||||
gas_limit BIGINT NOT NULL,
|
||||
gas_used BIGINT,
|
||||
nonce BIGINT NOT NULL,
|
||||
input_data TEXT,
|
||||
status INTEGER,
|
||||
contract_address VARCHAR(42),
|
||||
cumulative_gas_used BIGINT,
|
||||
effective_gas_price BIGINT,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (chain_id, hash),
|
||||
FOREIGN KEY (chain_id, block_number) REFERENCES blocks(chain_id, number)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partition for ChainID 138
|
||||
CREATE TABLE transactions_chain_138 PARTITION OF transactions FOR VALUES IN (138);
|
||||
|
||||
-- Indexes for transactions
|
||||
CREATE INDEX idx_transactions_chain_hash ON transactions(chain_id, hash);
|
||||
CREATE INDEX idx_transactions_chain_block ON transactions(chain_id, block_number, transaction_index);
|
||||
CREATE INDEX idx_transactions_chain_from ON transactions(chain_id, from_address);
|
||||
CREATE INDEX idx_transactions_chain_to ON transactions(chain_id, to_address);
|
||||
CREATE INDEX idx_transactions_chain_block_from ON transactions(chain_id, block_number, from_address);
|
||||
|
||||
-- Logs table
|
||||
CREATE TABLE logs (
|
||||
id BIGSERIAL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
transaction_hash VARCHAR(66) NOT NULL,
|
||||
block_number BIGINT NOT NULL,
|
||||
block_hash VARCHAR(66) NOT NULL,
|
||||
log_index INTEGER NOT NULL,
|
||||
address VARCHAR(42) NOT NULL,
|
||||
topic0 VARCHAR(66),
|
||||
topic1 VARCHAR(66),
|
||||
topic2 VARCHAR(66),
|
||||
topic3 VARCHAR(66),
|
||||
data TEXT,
|
||||
decoded_data JSONB,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (chain_id, transaction_hash, log_index),
|
||||
FOREIGN KEY (chain_id, transaction_hash) REFERENCES transactions(chain_id, hash)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partition for ChainID 138
|
||||
CREATE TABLE logs_chain_138 PARTITION OF logs FOR VALUES IN (138);
|
||||
|
||||
-- Indexes for logs
|
||||
CREATE INDEX idx_logs_chain_tx ON logs(chain_id, transaction_hash);
|
||||
CREATE INDEX idx_logs_chain_address ON logs(chain_id, address);
|
||||
CREATE INDEX idx_logs_chain_topic0 ON logs(chain_id, topic0);
|
||||
CREATE INDEX idx_logs_chain_block ON logs(chain_id, block_number);
|
||||
CREATE INDEX idx_logs_chain_address_topic0 ON logs(chain_id, address, topic0);
|
||||
|
||||
-- Tokens table
|
||||
CREATE TABLE tokens (
|
||||
id BIGSERIAL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
address VARCHAR(42) NOT NULL,
|
||||
type VARCHAR(10) NOT NULL CHECK (type IN ('ERC20', 'ERC721', 'ERC1155')),
|
||||
name VARCHAR(255),
|
||||
symbol VARCHAR(50),
|
||||
decimals INTEGER CHECK (decimals >= 0 AND decimals <= 18),
|
||||
total_supply NUMERIC(78, 0),
|
||||
holder_count INTEGER DEFAULT 0,
|
||||
transfer_count INTEGER DEFAULT 0,
|
||||
logo_url TEXT,
|
||||
website_url TEXT,
|
||||
description TEXT,
|
||||
verified BOOLEAN DEFAULT false,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (chain_id, address)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partition for ChainID 138
|
||||
CREATE TABLE tokens_chain_138 PARTITION OF tokens FOR VALUES IN (138);
|
||||
|
||||
-- Indexes for tokens
|
||||
CREATE INDEX idx_tokens_chain_address ON tokens(chain_id, address);
|
||||
CREATE INDEX idx_tokens_chain_type ON tokens(chain_id, type);
|
||||
CREATE INDEX idx_tokens_chain_symbol ON tokens(chain_id, symbol);
|
||||
|
||||
-- Token transfers table
|
||||
CREATE TABLE token_transfers (
|
||||
id BIGSERIAL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
transaction_hash VARCHAR(66) NOT NULL,
|
||||
block_number BIGINT NOT NULL,
|
||||
log_index INTEGER NOT NULL,
|
||||
token_address VARCHAR(42) NOT NULL,
|
||||
token_type VARCHAR(10) NOT NULL CHECK (token_type IN ('ERC20', 'ERC721', 'ERC1155')),
|
||||
from_address VARCHAR(42) NOT NULL,
|
||||
to_address VARCHAR(42) NOT NULL,
|
||||
amount NUMERIC(78, 0),
|
||||
token_id VARCHAR(78),
|
||||
operator VARCHAR(42),
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (id),
|
||||
FOREIGN KEY (chain_id, transaction_hash) REFERENCES transactions(chain_id, hash),
|
||||
FOREIGN KEY (chain_id, token_address) REFERENCES tokens(chain_id, address),
|
||||
UNIQUE (chain_id, transaction_hash, log_index)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partition for ChainID 138
|
||||
CREATE TABLE token_transfers_chain_138 PARTITION OF token_transfers FOR VALUES IN (138);
|
||||
|
||||
-- Indexes for token transfers
|
||||
CREATE INDEX idx_token_transfers_chain_token ON token_transfers(chain_id, token_address);
|
||||
CREATE INDEX idx_token_transfers_chain_from ON token_transfers(chain_id, from_address);
|
||||
CREATE INDEX idx_token_transfers_chain_to ON token_transfers(chain_id, to_address);
|
||||
CREATE INDEX idx_token_transfers_chain_tx ON token_transfers(chain_id, transaction_hash);
|
||||
CREATE INDEX idx_token_transfers_chain_block ON token_transfers(chain_id, block_number);
|
||||
CREATE INDEX idx_token_transfers_chain_token_from ON token_transfers(chain_id, token_address, from_address);
|
||||
CREATE INDEX idx_token_transfers_chain_token_to ON token_transfers(chain_id, token_address, to_address);
|
||||
|
||||
-- Contracts table
|
||||
CREATE TABLE contracts (
|
||||
id BIGSERIAL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
address VARCHAR(42) NOT NULL,
|
||||
name VARCHAR(255),
|
||||
compiler_version VARCHAR(50),
|
||||
optimization_enabled BOOLEAN,
|
||||
optimization_runs INTEGER,
|
||||
evm_version VARCHAR(20),
|
||||
source_code TEXT,
|
||||
abi JSONB,
|
||||
constructor_arguments TEXT,
|
||||
verification_status VARCHAR(20) NOT NULL CHECK (verification_status IN ('pending', 'verified', 'failed')),
|
||||
verified_at TIMESTAMP,
|
||||
verification_method VARCHAR(50),
|
||||
license VARCHAR(50),
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (chain_id, address)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partition for ChainID 138
|
||||
CREATE TABLE contracts_chain_138 PARTITION OF contracts FOR VALUES IN (138);
|
||||
|
||||
-- Indexes for contracts
|
||||
CREATE INDEX idx_contracts_chain_address ON contracts(chain_id, address);
|
||||
CREATE INDEX idx_contracts_chain_verified ON contracts(chain_id, verification_status);
|
||||
CREATE INDEX idx_contracts_abi_gin ON contracts USING GIN (abi);
|
||||
|
||||
-- Users table
|
||||
CREATE TABLE users (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
email VARCHAR(255) UNIQUE,
|
||||
username VARCHAR(100) UNIQUE,
|
||||
password_hash TEXT,
|
||||
api_key_hash TEXT,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
last_login_at TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_users_email ON users(email);
|
||||
CREATE INDEX idx_users_username ON users(username);
|
||||
|
||||
-- API keys table
|
||||
CREATE TABLE api_keys (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
user_id UUID NOT NULL,
|
||||
key_hash TEXT NOT NULL UNIQUE,
|
||||
name VARCHAR(255),
|
||||
tier VARCHAR(20) NOT NULL CHECK (tier IN ('free', 'pro', 'enterprise')),
|
||||
rate_limit_per_second INTEGER,
|
||||
rate_limit_per_minute INTEGER,
|
||||
ip_whitelist TEXT[],
|
||||
last_used_at TIMESTAMP,
|
||||
expires_at TIMESTAMP,
|
||||
revoked BOOLEAN DEFAULT false,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_api_keys_user ON api_keys(user_id);
|
||||
CREATE INDEX idx_api_keys_hash ON api_keys(key_hash);
|
||||
|
||||
-- Watchlists table
|
||||
CREATE TABLE watchlists (
|
||||
id BIGSERIAL,
|
||||
user_id UUID NOT NULL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
address VARCHAR(42) NOT NULL,
|
||||
label VARCHAR(255),
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (user_id, chain_id, address),
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_watchlists_user ON watchlists(user_id);
|
||||
CREATE INDEX idx_watchlists_chain_address ON watchlists(chain_id, address);
|
||||
|
||||
-- Address labels table
|
||||
CREATE TABLE address_labels (
|
||||
id BIGSERIAL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
address VARCHAR(42) NOT NULL,
|
||||
label VARCHAR(255) NOT NULL,
|
||||
label_type VARCHAR(20) NOT NULL CHECK (label_type IN ('user', 'public', 'contract_name')),
|
||||
user_id UUID,
|
||||
source VARCHAR(50),
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (chain_id, address, label_type, user_id),
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_labels_chain_address ON address_labels(chain_id, address);
|
||||
CREATE INDEX idx_labels_chain_user ON address_labels(chain_id, user_id);
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
-- Backfill checkpoints table for tracking backfill progress
|
||||
|
||||
CREATE TABLE IF NOT EXISTS backfill_checkpoints (
|
||||
chain_id INTEGER NOT NULL,
|
||||
last_block BIGINT NOT NULL,
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (chain_id)
|
||||
);
|
||||
|
||||
4
backend/database/migrations/0003_traces_table.down.sql
Normal file
4
backend/database/migrations/0003_traces_table.down.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
-- Rollback traces table
|
||||
|
||||
DROP TABLE IF EXISTS traces CASCADE;
|
||||
|
||||
19
backend/database/migrations/0003_traces_table.up.sql
Normal file
19
backend/database/migrations/0003_traces_table.up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
-- Traces table for storing transaction traces
|
||||
|
||||
CREATE TABLE IF NOT EXISTS traces (
|
||||
chain_id INTEGER NOT NULL,
|
||||
transaction_hash VARCHAR(66) NOT NULL,
|
||||
block_number BIGINT NOT NULL,
|
||||
trace_data JSONB NOT NULL,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (chain_id, transaction_hash)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partition for ChainID 138
|
||||
CREATE TABLE IF NOT EXISTS traces_chain_138 PARTITION OF traces FOR VALUES IN (138);
|
||||
|
||||
-- Index
|
||||
CREATE INDEX IF NOT EXISTS idx_traces_chain_tx ON traces(chain_id, transaction_hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_traces_chain_block ON traces(chain_id, block_number);
|
||||
CREATE INDEX IF NOT EXISTS idx_traces_data_gin ON traces USING GIN (trace_data);
|
||||
|
||||
4
backend/database/migrations/0004_ccip_messages.down.sql
Normal file
4
backend/database/migrations/0004_ccip_messages.down.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
-- Rollback CCIP messages table
|
||||
|
||||
DROP TABLE IF EXISTS ccip_messages CASCADE;
|
||||
|
||||
19
backend/database/migrations/0004_ccip_messages.up.sql
Normal file
19
backend/database/migrations/0004_ccip_messages.up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
-- CCIP messages table
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ccip_messages (
|
||||
message_id VARCHAR(255) PRIMARY KEY,
|
||||
source_chain_id INTEGER NOT NULL,
|
||||
dest_chain_id INTEGER NOT NULL,
|
||||
source_tx_hash VARCHAR(66),
|
||||
dest_tx_hash VARCHAR(66),
|
||||
status VARCHAR(20) NOT NULL CHECK (status IN ('pending', 'delivered', 'failed')),
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
delivered_at TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_ccip_source_chain ON ccip_messages(source_chain_id);
|
||||
CREATE INDEX idx_ccip_dest_chain ON ccip_messages(dest_chain_id);
|
||||
CREATE INDEX idx_ccip_status ON ccip_messages(status);
|
||||
CREATE INDEX idx_ccip_source_tx ON ccip_messages(source_tx_hash);
|
||||
CREATE INDEX idx_ccip_dest_tx ON ccip_messages(dest_tx_hash);
|
||||
|
||||
19
backend/database/migrations/0005_ledger_entries.up.sql
Normal file
19
backend/database/migrations/0005_ledger_entries.up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
-- Ledger entries table for double-entry accounting
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ledger_entries (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
customer_id UUID NOT NULL,
|
||||
account_type VARCHAR(20) NOT NULL CHECK (account_type IN ('asset', 'liability', 'equity')),
|
||||
amount NUMERIC(78, 0) NOT NULL,
|
||||
currency VARCHAR(10) NOT NULL DEFAULT 'USD',
|
||||
description TEXT,
|
||||
reference VARCHAR(255),
|
||||
side VARCHAR(10) NOT NULL CHECK (side IN ('debit', 'credit')),
|
||||
created_at TIMESTAMP DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_ledger_customer ON ledger_entries(customer_id);
|
||||
CREATE INDEX idx_ledger_account_type ON ledger_entries(account_type);
|
||||
CREATE INDEX idx_ledger_reference ON ledger_entries(reference);
|
||||
CREATE INDEX idx_ledger_created_at ON ledger_entries(created_at);
|
||||
|
||||
17
backend/database/migrations/0006_vtm_tables.up.sql
Normal file
17
backend/database/migrations/0006_vtm_tables.up.sql
Normal file
@@ -0,0 +1,17 @@
|
||||
-- VTM conversation states table
|
||||
|
||||
CREATE TABLE IF NOT EXISTS conversation_states (
|
||||
session_id VARCHAR(255) PRIMARY KEY,
|
||||
user_id UUID,
|
||||
workflow VARCHAR(50),
|
||||
step VARCHAR(50),
|
||||
context JSONB,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
updated_at TIMESTAMP DEFAULT NOW(),
|
||||
expires_at TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_conversation_user ON conversation_states(user_id);
|
||||
CREATE INDEX idx_conversation_workflow ON conversation_states(workflow);
|
||||
CREATE INDEX idx_conversation_expires ON conversation_states(expires_at);
|
||||
|
||||
16
backend/database/migrations/0007_address_tags.up.sql
Normal file
16
backend/database/migrations/0007_address_tags.up.sql
Normal file
@@ -0,0 +1,16 @@
|
||||
-- Address tags table
|
||||
|
||||
CREATE TABLE IF NOT EXISTS address_tags (
|
||||
id BIGSERIAL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
address VARCHAR(42) NOT NULL,
|
||||
tag VARCHAR(255) NOT NULL,
|
||||
source VARCHAR(50),
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
PRIMARY KEY (id),
|
||||
UNIQUE (chain_id, address, tag)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_address_tags_chain_address ON address_tags(chain_id, address);
|
||||
CREATE INDEX idx_address_tags_tag ON address_tags(tag);
|
||||
|
||||
18
backend/database/migrations/0008_add_iso_timestamps.down.sql
Normal file
18
backend/database/migrations/0008_add_iso_timestamps.down.sql
Normal file
@@ -0,0 +1,18 @@
|
||||
-- Rollback migration: Remove ISO timestamp columns
|
||||
|
||||
-- Drop triggers
|
||||
DROP TRIGGER IF EXISTS trigger_transactions_timestamp_iso ON transactions;
|
||||
DROP TRIGGER IF EXISTS trigger_blocks_timestamp_iso ON blocks;
|
||||
|
||||
-- Drop functions
|
||||
DROP FUNCTION IF EXISTS update_transaction_timestamp_iso();
|
||||
DROP FUNCTION IF EXISTS update_timestamp_iso();
|
||||
|
||||
-- Drop indexes
|
||||
DROP INDEX IF EXISTS idx_transactions_chain_timestamp_iso;
|
||||
DROP INDEX IF EXISTS idx_blocks_chain_timestamp_iso;
|
||||
|
||||
-- Drop columns
|
||||
ALTER TABLE transactions DROP COLUMN IF EXISTS timestamp_iso;
|
||||
ALTER TABLE blocks DROP COLUMN IF EXISTS timestamp_iso;
|
||||
|
||||
72
backend/database/migrations/0008_add_iso_timestamps.up.sql
Normal file
72
backend/database/migrations/0008_add_iso_timestamps.up.sql
Normal file
@@ -0,0 +1,72 @@
|
||||
-- Add ISO 8601 compliant timestamp columns to blocks and transactions
|
||||
-- This migration adds timestamp_iso columns that store ISO 8601 formatted timestamps
|
||||
|
||||
-- Add timestamp_iso column to blocks table
|
||||
ALTER TABLE blocks ADD COLUMN IF NOT EXISTS timestamp_iso VARCHAR(30);
|
||||
|
||||
-- Create index for timestamp_iso on blocks
|
||||
CREATE INDEX IF NOT EXISTS idx_blocks_chain_timestamp_iso ON blocks(chain_id, timestamp_iso);
|
||||
|
||||
-- Add timestamp_iso column to transactions table
|
||||
-- This will be populated from the block timestamp via trigger
|
||||
ALTER TABLE transactions ADD COLUMN IF NOT EXISTS timestamp_iso VARCHAR(30);
|
||||
|
||||
-- Create index for timestamp_iso on transactions
|
||||
CREATE INDEX IF NOT EXISTS idx_transactions_chain_timestamp_iso ON transactions(chain_id, timestamp_iso);
|
||||
|
||||
-- Function to update timestamp_iso from timestamp
|
||||
CREATE OR REPLACE FUNCTION update_timestamp_iso()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.timestamp_iso := to_char(NEW.timestamp, 'YYYY-MM-DD"T"HH24:MI:SS"Z"');
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger to automatically update timestamp_iso when timestamp changes in blocks
|
||||
DROP TRIGGER IF EXISTS trigger_blocks_timestamp_iso ON blocks;
|
||||
CREATE TRIGGER trigger_blocks_timestamp_iso
|
||||
BEFORE INSERT OR UPDATE OF timestamp ON blocks
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_timestamp_iso();
|
||||
|
||||
-- Function to update transaction timestamp_iso from block timestamp
|
||||
CREATE OR REPLACE FUNCTION update_transaction_timestamp_iso()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
block_timestamp TIMESTAMP;
|
||||
BEGIN
|
||||
-- Get the block timestamp
|
||||
SELECT b.timestamp INTO block_timestamp
|
||||
FROM blocks b
|
||||
WHERE b.chain_id = NEW.chain_id AND b.number = NEW.block_number;
|
||||
|
||||
-- If block timestamp exists, format it as ISO 8601
|
||||
IF block_timestamp IS NOT NULL THEN
|
||||
NEW.timestamp_iso := to_char(block_timestamp, 'YYYY-MM-DD"T"HH24:MI:SS"Z"');
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Trigger to automatically update timestamp_iso when transaction is inserted/updated
|
||||
DROP TRIGGER IF EXISTS trigger_transactions_timestamp_iso ON transactions;
|
||||
CREATE TRIGGER trigger_transactions_timestamp_iso
|
||||
BEFORE INSERT OR UPDATE OF block_number ON transactions
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_transaction_timestamp_iso();
|
||||
|
||||
-- Backfill existing blocks with ISO timestamps
|
||||
UPDATE blocks
|
||||
SET timestamp_iso = to_char(timestamp, 'YYYY-MM-DD"T"HH24:MI:SS"Z"')
|
||||
WHERE timestamp_iso IS NULL;
|
||||
|
||||
-- Backfill existing transactions with ISO timestamps from blocks
|
||||
UPDATE transactions t
|
||||
SET timestamp_iso = to_char(b.timestamp, 'YYYY-MM-DD"T"HH24:MI:SS"Z"')
|
||||
FROM blocks b
|
||||
WHERE t.chain_id = b.chain_id
|
||||
AND t.block_number = b.number
|
||||
AND t.timestamp_iso IS NULL;
|
||||
|
||||
7
backend/database/migrations/0009_add_link_token.down.sql
Normal file
7
backend/database/migrations/0009_add_link_token.down.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
-- Rollback: Remove LINK token from tokens table
|
||||
-- Note: This only removes if it matches the exact address
|
||||
|
||||
DELETE FROM tokens
|
||||
WHERE chain_id = 138
|
||||
AND address = '0x514910771AF9Ca656af840dff83E8264EcF986CA';
|
||||
|
||||
26
backend/database/migrations/0009_add_link_token.up.sql
Normal file
26
backend/database/migrations/0009_add_link_token.up.sql
Normal file
@@ -0,0 +1,26 @@
|
||||
-- Add LINK token to tokens table for ChainID 138
|
||||
-- Uses deployed MockLinkToken address on ChainID 138
|
||||
|
||||
INSERT INTO tokens (chain_id, address, type, name, symbol, decimals, verified, description, logo_url, website_url)
|
||||
VALUES (
|
||||
138,
|
||||
'0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03',
|
||||
'ERC20',
|
||||
'Chainlink Token',
|
||||
'LINK',
|
||||
18,
|
||||
true,
|
||||
'Official Chainlink LINK token from Ethereum Mainnet. Used for CCIP fees and Chainlink services.',
|
||||
'https://raw.githubusercontent.com/chainlink/chainlink-docs/main/docs/images/chainlink-logo.svg',
|
||||
'https://chain.link/'
|
||||
)
|
||||
ON CONFLICT (chain_id, address) DO UPDATE SET
|
||||
name = EXCLUDED.name,
|
||||
symbol = EXCLUDED.symbol,
|
||||
decimals = EXCLUDED.decimals,
|
||||
verified = EXCLUDED.verified,
|
||||
description = EXCLUDED.description,
|
||||
logo_url = EXCLUDED.logo_url,
|
||||
website_url = EXCLUDED.website_url,
|
||||
updated_at = NOW();
|
||||
|
||||
21
backend/database/migrations/0010_track_schema.down.sql
Normal file
21
backend/database/migrations/0010_track_schema.down.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
-- Rollback migration for Track 2-4 Schema
|
||||
|
||||
DROP TRIGGER IF EXISTS update_operator_roles_updated_at ON operator_roles;
|
||||
DROP TRIGGER IF EXISTS update_analytics_flows_updated_at ON analytics_flows;
|
||||
DROP TRIGGER IF EXISTS update_token_balances_updated_at ON token_balances;
|
||||
DROP TRIGGER IF EXISTS update_addresses_updated_at ON addresses;
|
||||
|
||||
DROP FUNCTION IF EXISTS update_updated_at_column();
|
||||
|
||||
DROP TABLE IF EXISTS wallet_nonces;
|
||||
DROP TABLE IF EXISTS operator_roles;
|
||||
DROP TABLE IF EXISTS operator_ip_whitelist;
|
||||
DROP TABLE IF EXISTS operator_events;
|
||||
DROP MATERIALIZED VIEW IF EXISTS token_distribution;
|
||||
DROP TABLE IF EXISTS analytics_bridge_history;
|
||||
DROP TABLE IF EXISTS analytics_flows;
|
||||
DROP TABLE IF EXISTS internal_transactions;
|
||||
DROP TABLE IF EXISTS token_balances;
|
||||
DROP TABLE IF EXISTS token_transfers;
|
||||
DROP TABLE IF EXISTS addresses;
|
||||
|
||||
234
backend/database/migrations/0010_track_schema.up.sql
Normal file
234
backend/database/migrations/0010_track_schema.up.sql
Normal file
@@ -0,0 +1,234 @@
|
||||
-- Migration: Track 2-4 Schema
|
||||
-- Description: Creates tables for indexed explorer (Track 2), analytics (Track 3), and operator tools (Track 4)
|
||||
|
||||
-- Track 2: Indexed Address Data
|
||||
CREATE TABLE IF NOT EXISTS addresses (
|
||||
id SERIAL PRIMARY KEY,
|
||||
address VARCHAR(42) NOT NULL UNIQUE,
|
||||
chain_id INTEGER NOT NULL,
|
||||
first_seen_block BIGINT,
|
||||
first_seen_timestamp TIMESTAMP WITH TIME ZONE,
|
||||
last_seen_block BIGINT,
|
||||
last_seen_timestamp TIMESTAMP WITH TIME ZONE,
|
||||
tx_count_sent INTEGER DEFAULT 0,
|
||||
tx_count_received INTEGER DEFAULT 0,
|
||||
total_sent_wei NUMERIC(78, 0) DEFAULT 0,
|
||||
total_received_wei NUMERIC(78, 0) DEFAULT 0,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_addresses_address ON addresses(address);
|
||||
CREATE INDEX idx_addresses_chain_id ON addresses(chain_id);
|
||||
CREATE INDEX idx_addresses_first_seen ON addresses(first_seen_timestamp);
|
||||
CREATE INDEX idx_addresses_last_seen ON addresses(last_seen_timestamp);
|
||||
|
||||
-- Track 2: Token Transfers (ERC-20)
|
||||
CREATE TABLE IF NOT EXISTS token_transfers (
|
||||
id SERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
transaction_hash VARCHAR(66) NOT NULL,
|
||||
log_index INTEGER NOT NULL,
|
||||
block_number BIGINT NOT NULL,
|
||||
block_hash VARCHAR(66) NOT NULL,
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
token_contract VARCHAR(42) NOT NULL,
|
||||
from_address VARCHAR(42) NOT NULL,
|
||||
to_address VARCHAR(42) NOT NULL,
|
||||
value NUMERIC(78, 0) NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(chain_id, transaction_hash, log_index)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_token_transfers_token ON token_transfers(token_contract);
|
||||
CREATE INDEX idx_token_transfers_from ON token_transfers(from_address);
|
||||
CREATE INDEX idx_token_transfers_to ON token_transfers(to_address);
|
||||
CREATE INDEX idx_token_transfers_block ON token_transfers(block_number);
|
||||
CREATE INDEX idx_token_transfers_timestamp ON token_transfers(timestamp);
|
||||
CREATE INDEX idx_token_transfers_tx_hash ON token_transfers(transaction_hash);
|
||||
|
||||
-- Track 2: Token Balances (Snapshots)
|
||||
CREATE TABLE IF NOT EXISTS token_balances (
|
||||
id SERIAL PRIMARY KEY,
|
||||
address VARCHAR(42) NOT NULL,
|
||||
token_contract VARCHAR(42) NOT NULL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
balance NUMERIC(78, 0) NOT NULL DEFAULT 0,
|
||||
balance_formatted NUMERIC(78, 18),
|
||||
last_updated_block BIGINT,
|
||||
last_updated_timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(address, token_contract, chain_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_token_balances_address ON token_balances(address);
|
||||
CREATE INDEX idx_token_balances_token ON token_balances(token_contract);
|
||||
CREATE INDEX idx_token_balances_chain ON token_balances(chain_id);
|
||||
|
||||
-- Track 2: Internal Transactions
|
||||
CREATE TABLE IF NOT EXISTS internal_transactions (
|
||||
id SERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
transaction_hash VARCHAR(66) NOT NULL,
|
||||
block_number BIGINT NOT NULL,
|
||||
block_hash VARCHAR(66) NOT NULL,
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
trace_address INTEGER[],
|
||||
from_address VARCHAR(42) NOT NULL,
|
||||
to_address VARCHAR(42),
|
||||
value NUMERIC(78, 0) NOT NULL DEFAULT 0,
|
||||
gas_limit NUMERIC(78, 0),
|
||||
gas_used NUMERIC(78, 0),
|
||||
call_type VARCHAR(50),
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_internal_txs_tx_hash ON internal_transactions(transaction_hash);
|
||||
CREATE INDEX idx_internal_txs_from ON internal_transactions(from_address);
|
||||
CREATE INDEX idx_internal_txs_to ON internal_transactions(to_address);
|
||||
CREATE INDEX idx_internal_txs_block ON internal_transactions(block_number);
|
||||
CREATE INDEX idx_internal_txs_timestamp ON internal_transactions(timestamp);
|
||||
|
||||
-- Track 3: Analytics Flows (Address → Address)
|
||||
CREATE TABLE IF NOT EXISTS analytics_flows (
|
||||
id SERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
from_address VARCHAR(42) NOT NULL,
|
||||
to_address VARCHAR(42) NOT NULL,
|
||||
token_contract VARCHAR(42),
|
||||
total_amount NUMERIC(78, 0) NOT NULL DEFAULT 0,
|
||||
transfer_count INTEGER NOT NULL DEFAULT 0,
|
||||
first_seen TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
last_seen TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(chain_id, from_address, to_address, token_contract)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_analytics_flows_from ON analytics_flows(from_address);
|
||||
CREATE INDEX idx_analytics_flows_to ON analytics_flows(to_address);
|
||||
CREATE INDEX idx_analytics_flows_token ON analytics_flows(token_contract);
|
||||
CREATE INDEX idx_analytics_flows_last_seen ON analytics_flows(last_seen);
|
||||
|
||||
-- Track 3: Bridge Analytics History
|
||||
CREATE TABLE IF NOT EXISTS analytics_bridge_history (
|
||||
id SERIAL PRIMARY KEY,
|
||||
chain_from INTEGER NOT NULL,
|
||||
chain_to INTEGER NOT NULL,
|
||||
token_contract VARCHAR(42),
|
||||
transfer_hash VARCHAR(66) NOT NULL,
|
||||
from_address VARCHAR(42) NOT NULL,
|
||||
to_address VARCHAR(42) NOT NULL,
|
||||
amount NUMERIC(78, 0) NOT NULL,
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
status VARCHAR(50) NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_bridge_history_chains ON analytics_bridge_history(chain_from, chain_to);
|
||||
CREATE INDEX idx_bridge_history_token ON analytics_bridge_history(token_contract);
|
||||
CREATE INDEX idx_bridge_history_timestamp ON analytics_bridge_history(timestamp);
|
||||
CREATE INDEX idx_bridge_history_from ON analytics_bridge_history(from_address);
|
||||
|
||||
-- Track 3: Token Distribution (Materialized View)
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS token_distribution AS
|
||||
SELECT
|
||||
token_contract,
|
||||
chain_id,
|
||||
COUNT(DISTINCT address) as holder_count,
|
||||
SUM(balance) as total_balance,
|
||||
AVG(balance) as avg_balance,
|
||||
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY balance) as median_balance,
|
||||
MAX(balance) as max_balance,
|
||||
MIN(balance) as min_balance,
|
||||
COUNT(*) FILTER (WHERE balance > 0) as active_holders,
|
||||
NOW() as last_updated
|
||||
FROM token_balances
|
||||
GROUP BY token_contract, chain_id;
|
||||
|
||||
CREATE UNIQUE INDEX idx_token_distribution_unique ON token_distribution(token_contract, chain_id);
|
||||
CREATE INDEX idx_token_distribution_holders ON token_distribution(holder_count);
|
||||
|
||||
-- Track 4: Operator Events (Audit Log)
|
||||
CREATE TABLE IF NOT EXISTS operator_events (
|
||||
id SERIAL PRIMARY KEY,
|
||||
event_type VARCHAR(100) NOT NULL,
|
||||
chain_id INTEGER,
|
||||
operator_address VARCHAR(42) NOT NULL,
|
||||
target_resource VARCHAR(200),
|
||||
action VARCHAR(100) NOT NULL,
|
||||
details JSONB,
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_operator_events_type ON operator_events(event_type);
|
||||
CREATE INDEX idx_operator_events_operator ON operator_events(operator_address);
|
||||
CREATE INDEX idx_operator_events_timestamp ON operator_events(timestamp);
|
||||
CREATE INDEX idx_operator_events_chain ON operator_events(chain_id);
|
||||
|
||||
-- Track 4: Operator IP Whitelist
|
||||
CREATE TABLE IF NOT EXISTS operator_ip_whitelist (
|
||||
id SERIAL PRIMARY KEY,
|
||||
operator_address VARCHAR(42) NOT NULL,
|
||||
ip_address INET NOT NULL,
|
||||
description TEXT,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(operator_address, ip_address)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_operator_whitelist_operator ON operator_ip_whitelist(operator_address);
|
||||
CREATE INDEX idx_operator_whitelist_ip ON operator_ip_whitelist(ip_address);
|
||||
|
||||
-- Track 4: Operator Roles
|
||||
CREATE TABLE IF NOT EXISTS operator_roles (
|
||||
id SERIAL PRIMARY KEY,
|
||||
address VARCHAR(42) NOT NULL UNIQUE,
|
||||
track_level INTEGER NOT NULL DEFAULT 4,
|
||||
roles TEXT[],
|
||||
approved BOOLEAN DEFAULT FALSE,
|
||||
approved_by VARCHAR(42),
|
||||
approved_at TIMESTAMP WITH TIME ZONE,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_operator_roles_address ON operator_roles(address);
|
||||
CREATE INDEX idx_operator_roles_approved ON operator_roles(approved);
|
||||
|
||||
-- Wallet Authentication: Nonce storage
|
||||
CREATE TABLE IF NOT EXISTS wallet_nonces (
|
||||
id SERIAL PRIMARY KEY,
|
||||
address VARCHAR(42) NOT NULL UNIQUE,
|
||||
nonce VARCHAR(64) NOT NULL,
|
||||
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_wallet_nonces_address ON wallet_nonces(address);
|
||||
CREATE INDEX idx_wallet_nonces_expires ON wallet_nonces(expires_at);
|
||||
|
||||
-- Update triggers for updated_at
|
||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_addresses_updated_at BEFORE UPDATE ON addresses
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
CREATE TRIGGER update_token_balances_updated_at BEFORE UPDATE ON token_balances
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
CREATE TRIGGER update_analytics_flows_updated_at BEFORE UPDATE ON analytics_flows
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
CREATE TRIGGER update_operator_roles_updated_at BEFORE UPDATE ON operator_roles
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
-- Migration: Token Aggregation Schema (Rollback)
|
||||
-- Description: Drops tables created for token aggregation
|
||||
|
||||
-- Drop tables in reverse order (respecting dependencies)
|
||||
DROP TABLE IF EXISTS swap_events CASCADE;
|
||||
DROP TABLE IF EXISTS token_signals CASCADE;
|
||||
DROP TABLE IF EXISTS external_api_cache CASCADE;
|
||||
DROP TABLE IF EXISTS token_ohlcv CASCADE;
|
||||
DROP TABLE IF EXISTS pool_reserves_history CASCADE;
|
||||
DROP TABLE IF EXISTS liquidity_pools CASCADE;
|
||||
DROP TABLE IF EXISTS token_market_data CASCADE;
|
||||
228
backend/database/migrations/0011_token_aggregation_schema.up.sql
Normal file
228
backend/database/migrations/0011_token_aggregation_schema.up.sql
Normal file
@@ -0,0 +1,228 @@
|
||||
-- Migration: Token Aggregation Schema
|
||||
-- Description: Creates tables for token market data, liquidity pools, OHLCV, and external API cache
|
||||
-- Supports ChainID 138 and 651940
|
||||
|
||||
-- Token Market Data - Aggregated market metrics per token
|
||||
CREATE TABLE IF NOT EXISTS token_market_data (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
token_address VARCHAR(42) NOT NULL,
|
||||
price_usd NUMERIC(30, 8),
|
||||
price_change_24h NUMERIC(10, 4),
|
||||
volume_24h NUMERIC(30, 8) DEFAULT 0,
|
||||
volume_7d NUMERIC(30, 8) DEFAULT 0,
|
||||
volume_30d NUMERIC(30, 8) DEFAULT 0,
|
||||
market_cap_usd NUMERIC(30, 8),
|
||||
liquidity_usd NUMERIC(30, 8) DEFAULT 0,
|
||||
holders_count INTEGER DEFAULT 0,
|
||||
transfers_24h INTEGER DEFAULT 0,
|
||||
last_updated TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(chain_id, token_address)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partitions for supported chains
|
||||
CREATE TABLE IF NOT EXISTS token_market_data_chain_138 PARTITION OF token_market_data FOR VALUES IN (138);
|
||||
CREATE TABLE IF NOT EXISTS token_market_data_chain_651940 PARTITION OF token_market_data FOR VALUES IN (651940);
|
||||
|
||||
CREATE INDEX idx_token_market_data_chain_token ON token_market_data(chain_id, token_address);
|
||||
CREATE INDEX idx_token_market_data_price ON token_market_data(price_usd) WHERE price_usd IS NOT NULL;
|
||||
CREATE INDEX idx_token_market_data_volume ON token_market_data(volume_24h) WHERE volume_24h > 0;
|
||||
CREATE INDEX idx_token_market_data_last_updated ON token_market_data(last_updated);
|
||||
|
||||
-- Liquidity Pools - DEX pool information
|
||||
CREATE TABLE IF NOT EXISTS liquidity_pools (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
pool_address VARCHAR(42) NOT NULL,
|
||||
token0_address VARCHAR(42) NOT NULL,
|
||||
token1_address VARCHAR(42) NOT NULL,
|
||||
dex_type VARCHAR(20) NOT NULL CHECK (dex_type IN ('uniswap_v2', 'uniswap_v3', 'dodo', 'custom')),
|
||||
factory_address VARCHAR(42),
|
||||
router_address VARCHAR(42),
|
||||
reserve0 NUMERIC(78, 0) DEFAULT 0,
|
||||
reserve1 NUMERIC(78, 0) DEFAULT 0,
|
||||
reserve0_usd NUMERIC(30, 8) DEFAULT 0,
|
||||
reserve1_usd NUMERIC(30, 8) DEFAULT 0,
|
||||
total_liquidity_usd NUMERIC(30, 8) DEFAULT 0,
|
||||
volume_24h NUMERIC(30, 8) DEFAULT 0,
|
||||
fee_tier INTEGER, -- For UniswapV3 (500, 3000, 10000)
|
||||
created_at_block BIGINT,
|
||||
created_at_timestamp TIMESTAMP WITH TIME ZONE,
|
||||
last_updated TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(chain_id, pool_address)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partitions for supported chains
|
||||
CREATE TABLE IF NOT EXISTS liquidity_pools_chain_138 PARTITION OF liquidity_pools FOR VALUES IN (138);
|
||||
CREATE TABLE IF NOT EXISTS liquidity_pools_chain_651940 PARTITION OF liquidity_pools FOR VALUES IN (651940);
|
||||
|
||||
CREATE INDEX idx_liquidity_pools_chain_pool ON liquidity_pools(chain_id, pool_address);
|
||||
CREATE INDEX idx_liquidity_pools_token0 ON liquidity_pools(chain_id, token0_address);
|
||||
CREATE INDEX idx_liquidity_pools_token1 ON liquidity_pools(chain_id, token1_address);
|
||||
CREATE INDEX idx_liquidity_pools_dex_type ON liquidity_pools(chain_id, dex_type);
|
||||
CREATE INDEX idx_liquidity_pools_tvl ON liquidity_pools(total_liquidity_usd) WHERE total_liquidity_usd > 0;
|
||||
|
||||
-- Pool Reserves History - Time-series snapshots of pool reserves
|
||||
CREATE TABLE IF NOT EXISTS pool_reserves_history (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
pool_address VARCHAR(42) NOT NULL,
|
||||
reserve0 NUMERIC(78, 0) NOT NULL,
|
||||
reserve1 NUMERIC(78, 0) NOT NULL,
|
||||
reserve0_usd NUMERIC(30, 8),
|
||||
reserve1_usd NUMERIC(30, 8),
|
||||
total_liquidity_usd NUMERIC(30, 8),
|
||||
block_number BIGINT NOT NULL,
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partitions for supported chains
|
||||
CREATE TABLE IF NOT EXISTS pool_reserves_history_chain_138 PARTITION OF pool_reserves_history FOR VALUES IN (138);
|
||||
CREATE TABLE IF NOT EXISTS pool_reserves_history_chain_651940 PARTITION OF pool_reserves_history FOR VALUES IN (651940);
|
||||
|
||||
-- Convert to hypertable for TimescaleDB time-series optimization
|
||||
SELECT create_hypertable('pool_reserves_history', 'timestamp',
|
||||
chunk_time_interval => INTERVAL '1 day',
|
||||
if_not_exists => TRUE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_pool_reserves_history_pool_time ON pool_reserves_history(chain_id, pool_address, timestamp DESC);
|
||||
CREATE INDEX idx_pool_reserves_history_timestamp ON pool_reserves_history(timestamp DESC);
|
||||
|
||||
-- Token OHLCV - Open, High, Low, Close, Volume data by interval
|
||||
CREATE TABLE IF NOT EXISTS token_ohlcv (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
token_address VARCHAR(42) NOT NULL,
|
||||
pool_address VARCHAR(42), -- Optional: specific pool, NULL = aggregated across all pools
|
||||
interval_type VARCHAR(10) NOT NULL CHECK (interval_type IN ('5m', '15m', '1h', '4h', '24h')),
|
||||
open_price NUMERIC(30, 8) NOT NULL,
|
||||
high_price NUMERIC(30, 8) NOT NULL,
|
||||
low_price NUMERIC(30, 8) NOT NULL,
|
||||
close_price NUMERIC(30, 8) NOT NULL,
|
||||
volume NUMERIC(30, 8) DEFAULT 0,
|
||||
volume_usd NUMERIC(30, 8) DEFAULT 0,
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(chain_id, token_address, pool_address, interval_type, timestamp)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partitions for supported chains
|
||||
CREATE TABLE IF NOT EXISTS token_ohlcv_chain_138 PARTITION OF token_ohlcv FOR VALUES IN (138);
|
||||
CREATE TABLE IF NOT EXISTS token_ohlcv_chain_651940 PARTITION OF token_ohlcv FOR VALUES IN (651940);
|
||||
|
||||
-- Convert to hypertable for TimescaleDB time-series optimization
|
||||
SELECT create_hypertable('token_ohlcv', 'timestamp',
|
||||
chunk_time_interval => INTERVAL '7 days',
|
||||
if_not_exists => TRUE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_token_ohlcv_token_time ON token_ohlcv(chain_id, token_address, interval_type, timestamp DESC);
|
||||
CREATE INDEX idx_token_ohlcv_pool_time ON token_ohlcv(chain_id, pool_address, interval_type, timestamp DESC) WHERE pool_address IS NOT NULL;
|
||||
CREATE INDEX idx_token_ohlcv_timestamp ON token_ohlcv(timestamp DESC);
|
||||
|
||||
-- External API Cache - Cached responses from external APIs
|
||||
CREATE TABLE IF NOT EXISTS external_api_cache (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
api_provider VARCHAR(50) NOT NULL CHECK (api_provider IN ('coingecko', 'coinmarketcap', 'dexscreener')),
|
||||
cache_key VARCHAR(255) NOT NULL,
|
||||
chain_id INTEGER,
|
||||
token_address VARCHAR(42),
|
||||
response_data JSONB NOT NULL,
|
||||
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(api_provider, cache_key)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_external_api_cache_provider_key ON external_api_cache(api_provider, cache_key);
|
||||
CREATE INDEX idx_external_api_cache_chain_token ON external_api_cache(chain_id, token_address) WHERE chain_id IS NOT NULL AND token_address IS NOT NULL;
|
||||
CREATE INDEX idx_external_api_cache_expires ON external_api_cache(expires_at);
|
||||
|
||||
-- Token Signals - Trending and growth metrics
|
||||
CREATE TABLE IF NOT EXISTS token_signals (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
token_address VARCHAR(42) NOT NULL,
|
||||
tx_count_growth_24h NUMERIC(10, 4) DEFAULT 0, -- Percentage change
|
||||
unique_wallets_24h INTEGER DEFAULT 0,
|
||||
unique_wallets_growth_24h NUMERIC(10, 4) DEFAULT 0,
|
||||
swap_count_24h INTEGER DEFAULT 0,
|
||||
swap_count_growth_24h NUMERIC(10, 4) DEFAULT 0,
|
||||
new_lp_creations_24h INTEGER DEFAULT 0,
|
||||
attention_score NUMERIC(10, 4) DEFAULT 0, -- Composite score 0-100
|
||||
trending_rank INTEGER, -- Rank among trending tokens
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(chain_id, token_address, timestamp)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partitions for supported chains
|
||||
CREATE TABLE IF NOT EXISTS token_signals_chain_138 PARTITION OF token_signals FOR VALUES IN (138);
|
||||
CREATE TABLE IF NOT EXISTS token_signals_chain_651940 PARTITION OF token_signals FOR VALUES IN (651940);
|
||||
|
||||
-- Convert to hypertable for TimescaleDB time-series optimization
|
||||
SELECT create_hypertable('token_signals', 'timestamp',
|
||||
chunk_time_interval => INTERVAL '1 day',
|
||||
if_not_exists => TRUE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_token_signals_token_time ON token_signals(chain_id, token_address, timestamp DESC);
|
||||
CREATE INDEX idx_token_signals_attention ON token_signals(chain_id, attention_score DESC, timestamp DESC);
|
||||
CREATE INDEX idx_token_signals_trending ON token_signals(chain_id, trending_rank, timestamp DESC) WHERE trending_rank IS NOT NULL;
|
||||
|
||||
-- Swap Events - Track individual swap events for volume calculation
|
||||
CREATE TABLE IF NOT EXISTS swap_events (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
pool_address VARCHAR(42) NOT NULL,
|
||||
transaction_hash VARCHAR(66) NOT NULL,
|
||||
block_number BIGINT NOT NULL,
|
||||
log_index INTEGER NOT NULL,
|
||||
token0_address VARCHAR(42) NOT NULL,
|
||||
token1_address VARCHAR(42) NOT NULL,
|
||||
amount0_in NUMERIC(78, 0) DEFAULT 0,
|
||||
amount1_in NUMERIC(78, 0) DEFAULT 0,
|
||||
amount0_out NUMERIC(78, 0) DEFAULT 0,
|
||||
amount1_out NUMERIC(78, 0) DEFAULT 0,
|
||||
amount_usd NUMERIC(30, 8), -- Calculated USD value
|
||||
sender VARCHAR(42),
|
||||
to_address VARCHAR(42),
|
||||
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(chain_id, transaction_hash, log_index)
|
||||
) PARTITION BY LIST (chain_id);
|
||||
|
||||
-- Create partitions for supported chains
|
||||
CREATE TABLE IF NOT EXISTS swap_events_chain_138 PARTITION OF swap_events FOR VALUES IN (138);
|
||||
CREATE TABLE IF NOT EXISTS swap_events_chain_651940 PARTITION OF swap_events FOR VALUES IN (651940);
|
||||
|
||||
-- Convert to hypertable for TimescaleDB time-series optimization
|
||||
SELECT create_hypertable('swap_events', 'timestamp',
|
||||
chunk_time_interval => INTERVAL '1 day',
|
||||
if_not_exists => TRUE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_swap_events_pool_time ON swap_events(chain_id, pool_address, timestamp DESC);
|
||||
CREATE INDEX idx_swap_events_token0 ON swap_events(chain_id, token0_address, timestamp DESC);
|
||||
CREATE INDEX idx_swap_events_token1 ON swap_events(chain_id, token1_address, timestamp DESC);
|
||||
CREATE INDEX idx_swap_events_tx_hash ON swap_events(chain_id, transaction_hash);
|
||||
CREATE INDEX idx_swap_events_block ON swap_events(chain_id, block_number);
|
||||
|
||||
-- Update triggers for last_updated
|
||||
CREATE TRIGGER update_token_market_data_updated_at BEFORE UPDATE ON token_market_data
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
CREATE TRIGGER update_liquidity_pools_updated_at BEFORE UPDATE ON liquidity_pools
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
|
||||
-- Comments for documentation
|
||||
COMMENT ON TABLE token_market_data IS 'Aggregated market data per token including price, volume, market cap, and liquidity';
|
||||
COMMENT ON TABLE liquidity_pools IS 'DEX liquidity pool information with reserves and TVL';
|
||||
COMMENT ON TABLE pool_reserves_history IS 'Time-series history of pool reserve snapshots';
|
||||
COMMENT ON TABLE token_ohlcv IS 'OHLCV (Open, High, Low, Close, Volume) data for token price charts';
|
||||
COMMENT ON TABLE external_api_cache IS 'Cached responses from external APIs (CoinGecko, CMC, DexScreener)';
|
||||
COMMENT ON TABLE token_signals IS 'Trending signals and growth metrics for tokens';
|
||||
COMMENT ON TABLE swap_events IS 'Individual swap events from DEX pools for volume calculation';
|
||||
@@ -0,0 +1,9 @@
|
||||
-- Migration: Admin Configuration Schema (Rollback)
|
||||
-- Description: Drops tables created for admin configuration
|
||||
|
||||
DROP TABLE IF EXISTS admin_audit_log CASCADE;
|
||||
DROP TABLE IF EXISTS admin_sessions CASCADE;
|
||||
DROP TABLE IF EXISTS admin_users CASCADE;
|
||||
DROP TABLE IF EXISTS dex_factory_config CASCADE;
|
||||
DROP TABLE IF EXISTS api_endpoints CASCADE;
|
||||
DROP TABLE IF EXISTS api_keys CASCADE;
|
||||
133
backend/database/migrations/0012_admin_config_schema.up.sql
Normal file
133
backend/database/migrations/0012_admin_config_schema.up.sql
Normal file
@@ -0,0 +1,133 @@
|
||||
-- Migration: Admin Configuration Schema
|
||||
-- Description: Creates tables for managing API keys, endpoints, and service configuration
|
||||
-- For Token Aggregation Service Control Panel
|
||||
|
||||
-- API Keys Management
|
||||
CREATE TABLE IF NOT EXISTS api_keys (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
provider VARCHAR(50) NOT NULL CHECK (provider IN ('coingecko', 'coinmarketcap', 'dexscreener', 'custom')),
|
||||
key_name VARCHAR(255) NOT NULL,
|
||||
api_key_encrypted TEXT NOT NULL,
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
rate_limit_per_minute INTEGER,
|
||||
rate_limit_per_day INTEGER,
|
||||
last_used_at TIMESTAMP WITH TIME ZONE,
|
||||
expires_at TIMESTAMP WITH TIME ZONE,
|
||||
created_by VARCHAR(255),
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(provider, key_name)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_api_keys_provider ON api_keys(provider);
|
||||
CREATE INDEX idx_api_keys_active ON api_keys(is_active) WHERE is_active = true;
|
||||
|
||||
-- API Endpoints Configuration
|
||||
CREATE TABLE IF NOT EXISTS api_endpoints (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
endpoint_type VARCHAR(50) NOT NULL CHECK (endpoint_type IN ('rpc', 'explorer', 'indexer', 'custom')),
|
||||
endpoint_name VARCHAR(255) NOT NULL,
|
||||
endpoint_url TEXT NOT NULL,
|
||||
is_primary BOOLEAN DEFAULT false,
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
requires_auth BOOLEAN DEFAULT false,
|
||||
auth_type VARCHAR(50),
|
||||
auth_config JSONB,
|
||||
rate_limit_per_minute INTEGER,
|
||||
timeout_ms INTEGER DEFAULT 10000,
|
||||
health_check_enabled BOOLEAN DEFAULT true,
|
||||
last_health_check TIMESTAMP WITH TIME ZONE,
|
||||
health_check_status VARCHAR(20),
|
||||
created_by VARCHAR(255),
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(chain_id, endpoint_type, endpoint_name)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_api_endpoints_chain ON api_endpoints(chain_id);
|
||||
CREATE INDEX idx_api_endpoints_type ON api_endpoints(endpoint_type);
|
||||
CREATE INDEX idx_api_endpoints_active ON api_endpoints(is_active) WHERE is_active = true;
|
||||
|
||||
-- DEX Factory Configuration
|
||||
CREATE TABLE IF NOT EXISTS dex_factory_config (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
chain_id INTEGER NOT NULL,
|
||||
dex_type VARCHAR(20) NOT NULL CHECK (dex_type IN ('uniswap_v2', 'uniswap_v3', 'dodo', 'custom')),
|
||||
factory_address VARCHAR(42) NOT NULL,
|
||||
router_address VARCHAR(42),
|
||||
pool_manager_address VARCHAR(42),
|
||||
start_block BIGINT DEFAULT 0,
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
description TEXT,
|
||||
created_by VARCHAR(255),
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
UNIQUE(chain_id, dex_type, factory_address)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_dex_factory_chain ON dex_factory_config(chain_id);
|
||||
CREATE INDEX idx_dex_factory_type ON dex_factory_config(dex_type);
|
||||
|
||||
-- Admin Users
|
||||
CREATE TABLE IF NOT EXISTS admin_users (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
username VARCHAR(255) NOT NULL UNIQUE,
|
||||
email VARCHAR(255),
|
||||
password_hash TEXT NOT NULL,
|
||||
role VARCHAR(50) DEFAULT 'admin' CHECK (role IN ('super_admin', 'admin', 'operator', 'viewer')),
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
last_login TIMESTAMP WITH TIME ZONE,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_admin_users_username ON admin_users(username);
|
||||
CREATE INDEX idx_admin_users_active ON admin_users(is_active) WHERE is_active = true;
|
||||
|
||||
-- Admin Sessions
|
||||
CREATE TABLE IF NOT EXISTS admin_sessions (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id BIGINT NOT NULL REFERENCES admin_users(id) ON DELETE CASCADE,
|
||||
session_token VARCHAR(255) NOT NULL UNIQUE,
|
||||
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_admin_sessions_token ON admin_sessions(session_token);
|
||||
CREATE INDEX idx_admin_sessions_user ON admin_sessions(user_id);
|
||||
|
||||
-- Audit Log
|
||||
CREATE TABLE IF NOT EXISTS admin_audit_log (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id BIGINT REFERENCES admin_users(id),
|
||||
action VARCHAR(100) NOT NULL,
|
||||
resource_type VARCHAR(50) NOT NULL,
|
||||
resource_id BIGINT,
|
||||
old_values JSONB,
|
||||
new_values JSONB,
|
||||
ip_address INET,
|
||||
user_agent TEXT,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_audit_log_user ON admin_audit_log(user_id);
|
||||
CREATE INDEX idx_audit_log_resource ON admin_audit_log(resource_type, resource_id);
|
||||
CREATE INDEX idx_audit_log_created ON admin_audit_log(created_at DESC);
|
||||
|
||||
-- Update triggers (if update_updated_at_column function exists)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM pg_proc WHERE proname = 'update_updated_at_column') THEN
|
||||
CREATE TRIGGER update_api_keys_updated_at BEFORE UPDATE ON api_keys
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
CREATE TRIGGER update_api_endpoints_updated_at BEFORE UPDATE ON api_endpoints
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
CREATE TRIGGER update_dex_factory_config_updated_at BEFORE UPDATE ON dex_factory_config
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
CREATE TRIGGER update_admin_users_updated_at BEFORE UPDATE ON admin_users
|
||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||
END IF;
|
||||
END $$;
|
||||
165
backend/database/migrations/migrate.go
Normal file
165
backend/database/migrations/migrate.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
_ "github.com/jackc/pgx/v5/stdlib"
|
||||
)
|
||||
|
||||
// Migration represents a database migration
|
||||
type Migration struct {
|
||||
Version string
|
||||
Up string
|
||||
Down string
|
||||
}
|
||||
|
||||
// Migrator handles database migrations
|
||||
type Migrator struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
// NewMigrator creates a new migrator
|
||||
func NewMigrator(db *sql.DB) *Migrator {
|
||||
return &Migrator{db: db}
|
||||
}
|
||||
|
||||
// RunMigrations runs all pending migrations
|
||||
func (m *Migrator) RunMigrations(migrationsDir string) error {
|
||||
// Create migrations table if it doesn't exist
|
||||
if err := m.createMigrationsTable(); err != nil {
|
||||
return fmt.Errorf("failed to create migrations table: %w", err)
|
||||
}
|
||||
|
||||
// Load migration files
|
||||
migrations, err := m.loadMigrations(migrationsDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load migrations: %w", err)
|
||||
}
|
||||
|
||||
// Get applied migrations
|
||||
applied, err := m.getAppliedMigrations()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get applied migrations: %w", err)
|
||||
}
|
||||
|
||||
// Run pending migrations
|
||||
for _, migration := range migrations {
|
||||
if applied[migration.Version] {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := m.runMigration(migration); err != nil {
|
||||
return fmt.Errorf("failed to run migration %s: %w", migration.Version, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Migrator) createMigrationsTable() error {
|
||||
query := `
|
||||
CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||
version VARCHAR(255) PRIMARY KEY,
|
||||
applied_at TIMESTAMP DEFAULT NOW()
|
||||
)
|
||||
`
|
||||
_, err := m.db.Exec(query)
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Migrator) loadMigrations(dir string) ([]Migration, error) {
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
migrations := make(map[string]*Migration)
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
filename := file.Name()
|
||||
if !strings.HasSuffix(filename, ".up.sql") && !strings.HasSuffix(filename, ".down.sql") {
|
||||
continue
|
||||
}
|
||||
|
||||
version := strings.TrimSuffix(filename, ".up.sql")
|
||||
version = strings.TrimSuffix(version, ".down.sql")
|
||||
|
||||
if migrations[version] == nil {
|
||||
migrations[version] = &Migration{Version: version}
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(filepath.Join(dir, filename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.HasSuffix(filename, ".up.sql") {
|
||||
migrations[version].Up = string(content)
|
||||
} else if strings.HasSuffix(filename, ".down.sql") {
|
||||
migrations[version].Down = string(content)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to slice and sort
|
||||
result := make([]Migration, 0, len(migrations))
|
||||
for _, m := range migrations {
|
||||
result = append(result, *m)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i].Version < result[j].Version
|
||||
})
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *Migrator) getAppliedMigrations() (map[string]bool, error) {
|
||||
rows, err := m.db.Query("SELECT version FROM schema_migrations")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
applied := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var version string
|
||||
if err := rows.Scan(&version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
applied[version] = true
|
||||
}
|
||||
|
||||
return applied, rows.Err()
|
||||
}
|
||||
|
||||
func (m *Migrator) runMigration(migration Migration) error {
|
||||
tx, err := m.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Execute migration
|
||||
if _, err := tx.Exec(migration.Up); err != nil {
|
||||
return fmt.Errorf("failed to execute migration: %w", err)
|
||||
}
|
||||
|
||||
// Record migration
|
||||
if _, err := tx.Exec(
|
||||
"INSERT INTO schema_migrations (version) VALUES ($1)",
|
||||
migration.Version,
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to record migration: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
87
backend/database/timeseries/mempool_schema.sql
Normal file
87
backend/database/timeseries/mempool_schema.sql
Normal file
@@ -0,0 +1,87 @@
|
||||
-- TimescaleDB schema for mempool transactions
|
||||
-- This extends the main database with time-series capabilities
|
||||
|
||||
-- Mempool transactions hypertable
|
||||
CREATE TABLE IF NOT EXISTS mempool_transactions (
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
hash VARCHAR(66) NOT NULL,
|
||||
from_address VARCHAR(42) NOT NULL,
|
||||
to_address VARCHAR(42),
|
||||
value NUMERIC(78, 0),
|
||||
gas_price BIGINT,
|
||||
max_fee_per_gas BIGINT,
|
||||
max_priority_fee_per_gas BIGINT,
|
||||
gas_limit BIGINT,
|
||||
nonce BIGINT,
|
||||
input_data_length INTEGER,
|
||||
first_seen TIMESTAMPTZ NOT NULL,
|
||||
status VARCHAR(20) DEFAULT 'pending',
|
||||
confirmed_block_number BIGINT,
|
||||
confirmed_at TIMESTAMPTZ,
|
||||
PRIMARY KEY (time, chain_id, hash)
|
||||
);
|
||||
|
||||
-- Convert to hypertable
|
||||
SELECT create_hypertable('mempool_transactions', 'time', if_not_exists => TRUE);
|
||||
|
||||
-- Indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_mempool_chain_hash ON mempool_transactions(chain_id, hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_mempool_chain_from ON mempool_transactions(chain_id, from_address);
|
||||
CREATE INDEX IF NOT EXISTS idx_mempool_chain_status ON mempool_transactions(chain_id, status, time);
|
||||
|
||||
-- Network metrics hypertable
|
||||
CREATE TABLE IF NOT EXISTS network_metrics (
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
block_number BIGINT,
|
||||
tps DOUBLE PRECISION,
|
||||
gps DOUBLE PRECISION,
|
||||
avg_gas_price BIGINT,
|
||||
pending_transactions INTEGER,
|
||||
block_time_seconds DOUBLE PRECISION,
|
||||
PRIMARY KEY (time, chain_id)
|
||||
);
|
||||
|
||||
SELECT create_hypertable('network_metrics', 'time', if_not_exists => TRUE);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_network_metrics_chain_time ON network_metrics(chain_id, time DESC);
|
||||
|
||||
-- Gas price history hypertable
|
||||
CREATE TABLE IF NOT EXISTS gas_price_history (
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
chain_id INTEGER NOT NULL,
|
||||
block_number BIGINT,
|
||||
min_gas_price BIGINT,
|
||||
max_gas_price BIGINT,
|
||||
avg_gas_price BIGINT,
|
||||
p25_gas_price BIGINT,
|
||||
p50_gas_price BIGINT,
|
||||
p75_gas_price BIGINT,
|
||||
p95_gas_price BIGINT,
|
||||
p99_gas_price BIGINT,
|
||||
PRIMARY KEY (time, chain_id)
|
||||
);
|
||||
|
||||
SELECT create_hypertable('gas_price_history', 'time', if_not_exists => TRUE);
|
||||
|
||||
-- Continuous aggregate for 1-minute network metrics
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS network_metrics_1m
|
||||
WITH (timescaledb.continuous) AS
|
||||
SELECT
|
||||
time_bucket('1 minute', time) AS bucket,
|
||||
chain_id,
|
||||
AVG(tps) AS avg_tps,
|
||||
AVG(gps) AS avg_gps,
|
||||
AVG(avg_gas_price) AS avg_gas_price,
|
||||
AVG(pending_transactions) AS avg_pending_tx
|
||||
FROM network_metrics
|
||||
GROUP BY bucket, chain_id;
|
||||
|
||||
-- Add refresh policy for continuous aggregate
|
||||
SELECT add_continuous_aggregate_policy('network_metrics_1m',
|
||||
start_offset => INTERVAL '1 hour',
|
||||
end_offset => INTERVAL '1 minute',
|
||||
schedule_interval => INTERVAL '1 minute',
|
||||
if_not_exists => TRUE);
|
||||
|
||||
Reference in New Issue
Block a user