Files

162 lines
3.9 KiB
Go
Raw Permalink Normal View History

package analytics
import (
"context"
"fmt"
"strings"
"time"
"github.com/jackc/pgx/v5/pgxpool"
)
// BridgeAnalytics provides bridge analytics
type BridgeAnalytics struct {
db *pgxpool.Pool
}
// NewBridgeAnalytics creates a new bridge analytics instance
func NewBridgeAnalytics(db *pgxpool.Pool) *BridgeAnalytics {
return &BridgeAnalytics{db: db}
}
// BridgeStats represents bridge statistics
type BridgeStats struct {
Transfers24h int
Volume24h string
Chains map[int]ChainStats
TopTokens []TokenStats
}
// ChainStats represents chain statistics
type ChainStats struct {
Outbound int
Inbound int
VolumeOut string
VolumeIn string
}
// TokenStats represents token statistics
type TokenStats struct {
Token string
Symbol string
Transfers int
Volume string
}
// GetBridgeStats gets bridge statistics
func (ba *BridgeAnalytics) GetBridgeStats(ctx context.Context, chainFrom, chainTo *int, startDate, endDate *time.Time) (*BridgeStats, error) {
clauses := []string{"timestamp >= NOW() - INTERVAL '24 hours'"}
args := []interface{}{}
argIndex := 1
if chainFrom != nil {
clauses = append(clauses, fmt.Sprintf("chain_from = $%d", argIndex))
args = append(args, *chainFrom)
argIndex++
}
if chainTo != nil {
clauses = append(clauses, fmt.Sprintf("chain_to = $%d", argIndex))
args = append(args, *chainTo)
argIndex++
}
if startDate != nil {
clauses = append(clauses, fmt.Sprintf("timestamp >= $%d", argIndex))
args = append(args, *startDate)
argIndex++
}
if endDate != nil {
clauses = append(clauses, fmt.Sprintf("timestamp <= $%d", argIndex))
args = append(args, *endDate)
argIndex++
}
filteredCTE := fmt.Sprintf(`
WITH filtered AS (
SELECT chain_from, chain_to, token_contract, amount
FROM analytics_bridge_history
WHERE %s
)
`, strings.Join(clauses, " AND "))
var transfers24h int
var volume24h string
err := ba.db.QueryRow(ctx, filteredCTE+`
SELECT COUNT(*) as transfers_24h, COALESCE(SUM(amount)::text, '0') as volume_24h
FROM filtered
`, args...).Scan(&transfers24h, &volume24h)
if err != nil {
return nil, fmt.Errorf("failed to get bridge stats: %w", err)
}
stats := &BridgeStats{
Transfers24h: transfers24h,
Volume24h: volume24h,
Chains: make(map[int]ChainStats),
TopTokens: []TokenStats{},
}
rows, err := ba.db.Query(ctx, filteredCTE+`
SELECT
chain_id,
SUM(outbound) as outbound,
SUM(inbound) as inbound,
COALESCE(SUM(volume_out)::text, '0') as volume_out,
COALESCE(SUM(volume_in)::text, '0') as volume_in
FROM (
SELECT chain_from AS chain_id, 1 AS outbound, 0 AS inbound, amount AS volume_out, 0::numeric AS volume_in
FROM filtered
UNION ALL
SELECT chain_to AS chain_id, 0 AS outbound, 1 AS inbound, 0::numeric AS volume_out, amount AS volume_in
FROM filtered
) chain_rollup
GROUP BY chain_id
ORDER BY chain_id
`, args...)
if err != nil {
return nil, fmt.Errorf("failed to get chain breakdown: %w", err)
}
defer rows.Close()
for rows.Next() {
var chainID, outbound, inbound int
var volumeOut, volumeIn string
if err := rows.Scan(&chainID, &outbound, &inbound, &volumeOut, &volumeIn); err == nil {
stats.Chains[chainID] = ChainStats{
Outbound: outbound,
Inbound: inbound,
VolumeOut: volumeOut,
VolumeIn: volumeIn,
}
}
}
tokenRows, err := ba.db.Query(ctx, filteredCTE+`
SELECT
token_contract,
COUNT(*) as transfers,
COALESCE(SUM(amount)::text, '0') as volume
FROM filtered
WHERE token_contract IS NOT NULL AND token_contract <> ''
GROUP BY token_contract
ORDER BY transfers DESC, volume DESC
LIMIT 10
`, args...)
if err != nil {
return nil, fmt.Errorf("failed to get top bridge tokens: %w", err)
}
defer tokenRows.Close()
for tokenRows.Next() {
var token TokenStats
if err := tokenRows.Scan(&token.Token, &token.Transfers, &token.Volume); err != nil {
continue
}
stats.TopTokens = append(stats.TopTokens, token)
}
return stats, nil
}