Compare commits
1 Commits
devin/1776
...
devin/1776
| Author | SHA1 | Date | |
|---|---|---|---|
| 945e637d1d |
File diff suppressed because it is too large
Load Diff
381
backend/api/rest/ai_context.go
Normal file
381
backend/api/rest/ai_context.go
Normal file
@@ -0,0 +1,381 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
addressPattern = regexp.MustCompile(`0x[a-fA-F0-9]{40}`)
|
||||
transactionPattern = regexp.MustCompile(`0x[a-fA-F0-9]{64}`)
|
||||
blockRefPattern = regexp.MustCompile(`(?i)\bblock\s+#?(\d+)\b`)
|
||||
)
|
||||
|
||||
func (s *Server) buildAIContext(ctx context.Context, query string, pageContext map[string]string) (AIContextEnvelope, []string) {
|
||||
warnings := []string{}
|
||||
envelope := AIContextEnvelope{
|
||||
ChainID: s.chainID,
|
||||
Explorer: "SolaceScan",
|
||||
PageContext: compactStringMap(pageContext),
|
||||
CapabilityNotice: "This assistant is wired for read-only explorer analysis. It can summarize indexed chain data, liquidity routes, and curated workspace docs, but it does not sign transactions or execute private operations.",
|
||||
}
|
||||
|
||||
sources := []AIContextSource{
|
||||
{Type: "system", Label: "Explorer REST backend"},
|
||||
}
|
||||
|
||||
if stats, err := s.queryAIStats(ctx); err == nil {
|
||||
envelope.Stats = stats
|
||||
sources = append(sources, AIContextSource{Type: "database", Label: "Explorer indexer database"})
|
||||
} else if err != nil {
|
||||
warnings = append(warnings, "indexed explorer stats unavailable: "+err.Error())
|
||||
}
|
||||
|
||||
if strings.TrimSpace(query) != "" {
|
||||
if txHash := firstRegexMatch(transactionPattern, query); txHash != "" && s.db != nil {
|
||||
if tx, err := s.queryAITransaction(ctx, txHash); err == nil && len(tx) > 0 {
|
||||
envelope.Transaction = tx
|
||||
} else if err != nil {
|
||||
warnings = append(warnings, "transaction context unavailable: "+err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if addr := firstRegexMatch(addressPattern, query); addr != "" && s.db != nil {
|
||||
if addressInfo, err := s.queryAIAddress(ctx, addr); err == nil && len(addressInfo) > 0 {
|
||||
envelope.Address = addressInfo
|
||||
} else if err != nil {
|
||||
warnings = append(warnings, "address context unavailable: "+err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if blockNumber := extractBlockReference(query); blockNumber > 0 && s.db != nil {
|
||||
if block, err := s.queryAIBlock(ctx, blockNumber); err == nil && len(block) > 0 {
|
||||
envelope.Block = block
|
||||
} else if err != nil {
|
||||
warnings = append(warnings, "block context unavailable: "+err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if routeMatches, routeWarning := s.queryAIRoutes(ctx, query); len(routeMatches) > 0 {
|
||||
envelope.RouteMatches = routeMatches
|
||||
sources = append(sources, AIContextSource{Type: "routes", Label: "Token aggregation live routes", Origin: firstNonEmptyEnv("TOKEN_AGGREGATION_API_BASE", "TOKEN_AGGREGATION_URL", "TOKEN_AGGREGATION_BASE_URL")})
|
||||
} else if routeWarning != "" {
|
||||
warnings = append(warnings, routeWarning)
|
||||
}
|
||||
|
||||
if docs, root, docWarning := loadAIDocSnippets(query); len(docs) > 0 {
|
||||
envelope.DocSnippets = docs
|
||||
sources = append(sources, AIContextSource{Type: "docs", Label: "Workspace docs", Origin: root})
|
||||
} else if docWarning != "" {
|
||||
warnings = append(warnings, docWarning)
|
||||
}
|
||||
|
||||
envelope.Sources = sources
|
||||
return envelope, uniqueStrings(warnings)
|
||||
}
|
||||
|
||||
func (s *Server) queryAIStats(ctx context.Context) (map[string]any, error) {
|
||||
if s.db == nil {
|
||||
return nil, fmt.Errorf("database unavailable")
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
stats := map[string]any{}
|
||||
|
||||
var totalBlocks int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM blocks WHERE chain_id = $1`, s.chainID).Scan(&totalBlocks); err == nil {
|
||||
stats["total_blocks"] = totalBlocks
|
||||
}
|
||||
|
||||
var totalTransactions int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM transactions WHERE chain_id = $1`, s.chainID).Scan(&totalTransactions); err == nil {
|
||||
stats["total_transactions"] = totalTransactions
|
||||
}
|
||||
|
||||
var totalAddresses int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM (
|
||||
SELECT from_address AS address
|
||||
FROM transactions
|
||||
WHERE chain_id = $1 AND from_address IS NOT NULL AND from_address <> ''
|
||||
UNION
|
||||
SELECT to_address AS address
|
||||
FROM transactions
|
||||
WHERE chain_id = $1 AND to_address IS NOT NULL AND to_address <> ''
|
||||
) unique_addresses`, s.chainID).Scan(&totalAddresses); err == nil {
|
||||
stats["total_addresses"] = totalAddresses
|
||||
}
|
||||
|
||||
var latestBlock int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COALESCE(MAX(number), 0) FROM blocks WHERE chain_id = $1`, s.chainID).Scan(&latestBlock); err == nil {
|
||||
stats["latest_block"] = latestBlock
|
||||
}
|
||||
|
||||
if len(stats) == 0 {
|
||||
var totalBlocks int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM blocks`).Scan(&totalBlocks); err == nil {
|
||||
stats["total_blocks"] = totalBlocks
|
||||
}
|
||||
|
||||
var totalTransactions int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM transactions`).Scan(&totalTransactions); err == nil {
|
||||
stats["total_transactions"] = totalTransactions
|
||||
}
|
||||
|
||||
var totalAddresses int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM addresses`).Scan(&totalAddresses); err == nil {
|
||||
stats["total_addresses"] = totalAddresses
|
||||
}
|
||||
|
||||
var latestBlock int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COALESCE(MAX(number), 0) FROM blocks`).Scan(&latestBlock); err == nil {
|
||||
stats["latest_block"] = latestBlock
|
||||
}
|
||||
}
|
||||
|
||||
if len(stats) == 0 {
|
||||
return nil, fmt.Errorf("no indexed stats available")
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (s *Server) queryAITransaction(ctx context.Context, hash string) (map[string]any, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
query := `
|
||||
SELECT hash, block_number, from_address, to_address, value, gas_used, gas_price, status, timestamp_iso
|
||||
FROM transactions
|
||||
WHERE chain_id = $1 AND hash = $2
|
||||
LIMIT 1
|
||||
`
|
||||
|
||||
var txHash, fromAddress, value string
|
||||
var blockNumber int64
|
||||
var toAddress *string
|
||||
var gasUsed, gasPrice *int64
|
||||
var status *int64
|
||||
var timestampISO *string
|
||||
|
||||
err := s.db.QueryRow(ctx, query, s.chainID, hash).Scan(
|
||||
&txHash, &blockNumber, &fromAddress, &toAddress, &value, &gasUsed, &gasPrice, &status, ×tampISO,
|
||||
)
|
||||
if err != nil {
|
||||
normalizedHash := normalizeHexString(hash)
|
||||
blockscoutQuery := `
|
||||
SELECT
|
||||
concat('0x', encode(hash, 'hex')) AS hash,
|
||||
block_number,
|
||||
concat('0x', encode(from_address_hash, 'hex')) AS from_address,
|
||||
CASE
|
||||
WHEN to_address_hash IS NULL THEN NULL
|
||||
ELSE concat('0x', encode(to_address_hash, 'hex'))
|
||||
END AS to_address,
|
||||
COALESCE(value::text, '0') AS value,
|
||||
gas_used,
|
||||
gas_price,
|
||||
status,
|
||||
TO_CHAR(block_timestamp AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS"Z"') AS timestamp_iso
|
||||
FROM transactions
|
||||
WHERE hash = decode($1, 'hex')
|
||||
LIMIT 1
|
||||
`
|
||||
if fallbackErr := s.db.QueryRow(ctx, blockscoutQuery, normalizedHash).Scan(
|
||||
&txHash, &blockNumber, &fromAddress, &toAddress, &value, &gasUsed, &gasPrice, &status, ×tampISO,
|
||||
); fallbackErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tx := map[string]any{
|
||||
"hash": txHash,
|
||||
"block_number": blockNumber,
|
||||
"from_address": fromAddress,
|
||||
"value": value,
|
||||
}
|
||||
if toAddress != nil {
|
||||
tx["to_address"] = *toAddress
|
||||
}
|
||||
if gasUsed != nil {
|
||||
tx["gas_used"] = *gasUsed
|
||||
}
|
||||
if gasPrice != nil {
|
||||
tx["gas_price"] = *gasPrice
|
||||
}
|
||||
if status != nil {
|
||||
tx["status"] = *status
|
||||
}
|
||||
if timestampISO != nil {
|
||||
tx["timestamp_iso"] = *timestampISO
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
func (s *Server) queryAIAddress(ctx context.Context, address string) (map[string]any, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
address = normalizeAddress(address)
|
||||
|
||||
result := map[string]any{
|
||||
"address": address,
|
||||
}
|
||||
|
||||
var txCount int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM transactions WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)`, s.chainID, address).Scan(&txCount); err == nil {
|
||||
result["transaction_count"] = txCount
|
||||
}
|
||||
|
||||
var tokenCount int64
|
||||
if err := s.db.QueryRow(ctx, `SELECT COUNT(DISTINCT token_contract) FROM token_transfers WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)`, s.chainID, address).Scan(&tokenCount); err == nil {
|
||||
result["token_count"] = tokenCount
|
||||
}
|
||||
|
||||
var recentHashes []string
|
||||
rows, err := s.db.Query(ctx, `
|
||||
SELECT hash
|
||||
FROM transactions
|
||||
WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)
|
||||
ORDER BY block_number DESC, transaction_index DESC
|
||||
LIMIT 5
|
||||
`, s.chainID, address)
|
||||
if err == nil {
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var hash string
|
||||
if scanErr := rows.Scan(&hash); scanErr == nil {
|
||||
recentHashes = append(recentHashes, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(recentHashes) > 0 {
|
||||
result["recent_transactions"] = recentHashes
|
||||
}
|
||||
|
||||
if len(result) == 1 {
|
||||
normalizedAddress := normalizeHexString(address)
|
||||
|
||||
var blockscoutTxCount int64
|
||||
var blockscoutTokenCount int64
|
||||
blockscoutAddressQuery := `
|
||||
SELECT
|
||||
COALESCE(transactions_count, 0),
|
||||
COALESCE(token_transfers_count, 0)
|
||||
FROM addresses
|
||||
WHERE hash = decode($1, 'hex')
|
||||
LIMIT 1
|
||||
`
|
||||
if err := s.db.QueryRow(ctx, blockscoutAddressQuery, normalizedAddress).Scan(&blockscoutTxCount, &blockscoutTokenCount); err == nil {
|
||||
result["transaction_count"] = blockscoutTxCount
|
||||
result["token_count"] = blockscoutTokenCount
|
||||
}
|
||||
|
||||
var liveTxCount int64
|
||||
if err := s.db.QueryRow(ctx, `
|
||||
SELECT COUNT(*)
|
||||
FROM transactions
|
||||
WHERE from_address_hash = decode($1, 'hex') OR to_address_hash = decode($1, 'hex')
|
||||
`, normalizedAddress).Scan(&liveTxCount); err == nil && liveTxCount > 0 {
|
||||
result["transaction_count"] = liveTxCount
|
||||
}
|
||||
|
||||
var liveTokenCount int64
|
||||
if err := s.db.QueryRow(ctx, `
|
||||
SELECT COUNT(DISTINCT token_contract_address_hash)
|
||||
FROM token_transfers
|
||||
WHERE from_address_hash = decode($1, 'hex') OR to_address_hash = decode($1, 'hex')
|
||||
`, normalizedAddress).Scan(&liveTokenCount); err == nil && liveTokenCount > 0 {
|
||||
result["token_count"] = liveTokenCount
|
||||
}
|
||||
|
||||
rows, err := s.db.Query(ctx, `
|
||||
SELECT concat('0x', encode(hash, 'hex'))
|
||||
FROM transactions
|
||||
WHERE from_address_hash = decode($1, 'hex') OR to_address_hash = decode($1, 'hex')
|
||||
ORDER BY block_number DESC, index DESC
|
||||
LIMIT 5
|
||||
`, normalizedAddress)
|
||||
if err == nil {
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var hash string
|
||||
if scanErr := rows.Scan(&hash); scanErr == nil {
|
||||
recentHashes = append(recentHashes, hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(recentHashes) > 0 {
|
||||
result["recent_transactions"] = recentHashes
|
||||
}
|
||||
}
|
||||
|
||||
if len(result) == 1 {
|
||||
return nil, fmt.Errorf("address not found")
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *Server) queryAIBlock(ctx context.Context, blockNumber int64) (map[string]any, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
|
||||
defer cancel()
|
||||
|
||||
query := `
|
||||
SELECT number, hash, parent_hash, transaction_count, gas_used, gas_limit, timestamp_iso
|
||||
FROM blocks
|
||||
WHERE chain_id = $1 AND number = $2
|
||||
LIMIT 1
|
||||
`
|
||||
|
||||
var number int64
|
||||
var hash, parentHash string
|
||||
var transactionCount int64
|
||||
var gasUsed, gasLimit int64
|
||||
var timestampISO *string
|
||||
|
||||
err := s.db.QueryRow(ctx, query, s.chainID, blockNumber).Scan(&number, &hash, &parentHash, &transactionCount, &gasUsed, &gasLimit, ×tampISO)
|
||||
if err != nil {
|
||||
blockscoutQuery := `
|
||||
SELECT
|
||||
number,
|
||||
concat('0x', encode(hash, 'hex')) AS hash,
|
||||
concat('0x', encode(parent_hash, 'hex')) AS parent_hash,
|
||||
(SELECT COUNT(*) FROM transactions WHERE block_number = b.number) AS transaction_count,
|
||||
gas_used,
|
||||
gas_limit,
|
||||
TO_CHAR(timestamp AT TIME ZONE 'UTC', 'YYYY-MM-DD"T"HH24:MI:SS"Z"') AS timestamp_iso
|
||||
FROM blocks b
|
||||
WHERE number = $1
|
||||
LIMIT 1
|
||||
`
|
||||
if fallbackErr := s.db.QueryRow(ctx, blockscoutQuery, blockNumber).Scan(&number, &hash, &parentHash, &transactionCount, &gasUsed, &gasLimit, ×tampISO); fallbackErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
block := map[string]any{
|
||||
"number": number,
|
||||
"hash": hash,
|
||||
"parent_hash": parentHash,
|
||||
"transaction_count": transactionCount,
|
||||
"gas_used": gasUsed,
|
||||
"gas_limit": gasLimit,
|
||||
}
|
||||
if timestampISO != nil {
|
||||
block["timestamp_iso"] = *timestampISO
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func extractBlockReference(query string) int64 {
|
||||
match := blockRefPattern.FindStringSubmatch(query)
|
||||
if len(match) != 2 {
|
||||
return 0
|
||||
}
|
||||
var value int64
|
||||
fmt.Sscan(match[1], &value)
|
||||
return value
|
||||
}
|
||||
136
backend/api/rest/ai_docs.go
Normal file
136
backend/api/rest/ai_docs.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func loadAIDocSnippets(query string) ([]AIDocSnippet, string, string) {
|
||||
root := findAIWorkspaceRoot()
|
||||
if root == "" {
|
||||
return nil, "", "workspace docs root unavailable for ai doc retrieval"
|
||||
}
|
||||
|
||||
relativePaths := []string{
|
||||
"docs/11-references/ADDRESS_MATRIX_AND_STATUS.md",
|
||||
"docs/11-references/LIQUIDITY_POOLS_MASTER_MAP.md",
|
||||
"docs/11-references/DEPLOYED_TOKENS_BRIDGES_LPS_AND_ROUTING_STATUS.md",
|
||||
"docs/11-references/EXPLORER_TOKEN_LIST_CROSSCHECK.md",
|
||||
"explorer-monorepo/docs/EXPLORER_API_ACCESS.md",
|
||||
}
|
||||
|
||||
terms := buildDocSearchTerms(query)
|
||||
if len(terms) == 0 {
|
||||
terms = []string{"chain 138", "bridge", "liquidity"}
|
||||
}
|
||||
|
||||
snippets := []AIDocSnippet{}
|
||||
for _, rel := range relativePaths {
|
||||
fullPath := filepath.Join(root, rel)
|
||||
fileSnippets := scanDocForTerms(fullPath, rel, terms)
|
||||
snippets = append(snippets, fileSnippets...)
|
||||
if len(snippets) >= maxExplorerAIDocSnippets {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(snippets) == 0 {
|
||||
return nil, root, "no matching workspace docs found for ai context"
|
||||
}
|
||||
if len(snippets) > maxExplorerAIDocSnippets {
|
||||
snippets = snippets[:maxExplorerAIDocSnippets]
|
||||
}
|
||||
return snippets, root, ""
|
||||
}
|
||||
|
||||
func findAIWorkspaceRoot() string {
|
||||
candidates := []string{}
|
||||
if envRoot := strings.TrimSpace(os.Getenv("EXPLORER_AI_WORKSPACE_ROOT")); envRoot != "" {
|
||||
candidates = append(candidates, envRoot)
|
||||
}
|
||||
if cwd, err := os.Getwd(); err == nil {
|
||||
candidates = append(candidates, cwd)
|
||||
dir := cwd
|
||||
for i := 0; i < 4; i++ {
|
||||
dir = filepath.Dir(dir)
|
||||
candidates = append(candidates, dir)
|
||||
}
|
||||
}
|
||||
candidates = append(candidates, "/opt/explorer-monorepo", "/home/intlc/projects/proxmox")
|
||||
|
||||
for _, candidate := range candidates {
|
||||
if candidate == "" {
|
||||
continue
|
||||
}
|
||||
if fileExists(filepath.Join(candidate, "docs")) && (fileExists(filepath.Join(candidate, "explorer-monorepo")) || fileExists(filepath.Join(candidate, "smom-dbis-138")) || fileExists(filepath.Join(candidate, "config"))) {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func scanDocForTerms(fullPath, relativePath string, terms []string) []AIDocSnippet {
|
||||
file, err := os.Open(fullPath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
normalizedTerms := make([]string, 0, len(terms))
|
||||
for _, term := range terms {
|
||||
term = strings.ToLower(strings.TrimSpace(term))
|
||||
if len(term) >= 3 {
|
||||
normalizedTerms = append(normalizedTerms, term)
|
||||
}
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
lineNumber := 0
|
||||
snippets := []AIDocSnippet{}
|
||||
for scanner.Scan() {
|
||||
lineNumber++
|
||||
line := scanner.Text()
|
||||
lower := strings.ToLower(line)
|
||||
for _, term := range normalizedTerms {
|
||||
if strings.Contains(lower, term) {
|
||||
snippets = append(snippets, AIDocSnippet{
|
||||
Path: relativePath,
|
||||
Line: lineNumber,
|
||||
Snippet: clipString(strings.TrimSpace(line), 280),
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(snippets) >= 2 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return snippets
|
||||
}
|
||||
|
||||
func buildDocSearchTerms(query string) []string {
|
||||
words := strings.Fields(strings.ToLower(query))
|
||||
stopWords := map[string]bool{
|
||||
"what": true, "when": true, "where": true, "which": true, "with": true, "from": true,
|
||||
"that": true, "this": true, "have": true, "about": true, "into": true, "show": true,
|
||||
"live": true, "help": true, "explain": true, "tell": true,
|
||||
}
|
||||
terms := []string{}
|
||||
for _, word := range words {
|
||||
word = strings.Trim(word, ".,:;!?()[]{}\"'")
|
||||
if len(word) < 4 || stopWords[word] {
|
||||
continue
|
||||
}
|
||||
terms = append(terms, word)
|
||||
}
|
||||
for _, match := range addressPattern.FindAllString(query, -1) {
|
||||
terms = append(terms, strings.ToLower(match))
|
||||
}
|
||||
for _, symbol := range []string{"cUSDT", "cUSDC", "cXAUC", "cEURT", "USDT", "USDC", "WETH", "WETH10", "Mainnet", "bridge", "liquidity", "pool"} {
|
||||
if strings.Contains(strings.ToLower(query), strings.ToLower(symbol)) {
|
||||
terms = append(terms, strings.ToLower(symbol))
|
||||
}
|
||||
}
|
||||
return uniqueStrings(terms)
|
||||
}
|
||||
112
backend/api/rest/ai_helpers.go
Normal file
112
backend/api/rest/ai_helpers.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func firstRegexMatch(pattern *regexp.Regexp, value string) string {
|
||||
match := pattern.FindString(value)
|
||||
return strings.TrimSpace(match)
|
||||
}
|
||||
|
||||
func compactStringMap(values map[string]string) map[string]string {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := map[string]string{}
|
||||
for key, value := range values {
|
||||
if trimmed := strings.TrimSpace(value); trimmed != "" {
|
||||
out[key] = trimmed
|
||||
}
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func compactAnyMap(values map[string]any) map[string]any {
|
||||
out := map[string]any{}
|
||||
for key, value := range values {
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
switch typed := value.(type) {
|
||||
case string:
|
||||
if strings.TrimSpace(typed) == "" {
|
||||
continue
|
||||
}
|
||||
case []string:
|
||||
if len(typed) == 0 {
|
||||
continue
|
||||
}
|
||||
case []any:
|
||||
if len(typed) == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
out[key] = value
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func stringValue(value any) string {
|
||||
switch typed := value.(type) {
|
||||
case string:
|
||||
return typed
|
||||
case fmt.Stringer:
|
||||
return typed.String()
|
||||
default:
|
||||
return fmt.Sprintf("%v", value)
|
||||
}
|
||||
}
|
||||
|
||||
func stringSliceValue(value any) []string {
|
||||
switch typed := value.(type) {
|
||||
case []string:
|
||||
return typed
|
||||
case []any:
|
||||
out := make([]string, 0, len(typed))
|
||||
for _, item := range typed {
|
||||
out = append(out, stringValue(item))
|
||||
}
|
||||
return out
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func uniqueStrings(values []string) []string {
|
||||
seen := map[string]bool{}
|
||||
out := []string{}
|
||||
for _, value := range values {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed == "" || seen[trimmed] {
|
||||
continue
|
||||
}
|
||||
seen[trimmed] = true
|
||||
out = append(out, trimmed)
|
||||
}
|
||||
sort.Strings(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func clipString(value string, limit int) string {
|
||||
value = strings.TrimSpace(value)
|
||||
if limit <= 0 || len(value) <= limit {
|
||||
return value
|
||||
}
|
||||
return strings.TrimSpace(value[:limit]) + "..."
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
if path == "" {
|
||||
return false
|
||||
}
|
||||
info, err := os.Stat(path)
|
||||
return err == nil && info != nil
|
||||
}
|
||||
139
backend/api/rest/ai_routes.go
Normal file
139
backend/api/rest/ai_routes.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) queryAIRoutes(ctx context.Context, query string) ([]map[string]any, string) {
|
||||
baseURL := strings.TrimSpace(firstNonEmptyEnv(
|
||||
"TOKEN_AGGREGATION_API_BASE",
|
||||
"TOKEN_AGGREGATION_URL",
|
||||
"TOKEN_AGGREGATION_BASE_URL",
|
||||
))
|
||||
if baseURL == "" {
|
||||
return nil, "token aggregation api base url is not configured for ai route retrieval"
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, strings.TrimRight(baseURL, "/")+"/api/v1/routes/ingestion?fromChainId=138", nil)
|
||||
if err != nil {
|
||||
return nil, "unable to build token aggregation ai request"
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 6 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, "token aggregation live routes unavailable: " + err.Error()
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return nil, fmt.Sprintf("token aggregation live routes returned %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var payload struct {
|
||||
Routes []map[string]any `json:"routes"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil {
|
||||
return nil, "unable to decode token aggregation live routes"
|
||||
}
|
||||
if len(payload.Routes) == 0 {
|
||||
return nil, "token aggregation returned no live routes"
|
||||
}
|
||||
|
||||
matches := filterAIRouteMatches(payload.Routes, query)
|
||||
return matches, ""
|
||||
}
|
||||
|
||||
func filterAIRouteMatches(routes []map[string]any, query string) []map[string]any {
|
||||
query = strings.ToLower(strings.TrimSpace(query))
|
||||
matches := make([]map[string]any, 0, 6)
|
||||
for _, route := range routes {
|
||||
if query != "" && !routeMatchesQuery(route, query) {
|
||||
continue
|
||||
}
|
||||
trimmed := map[string]any{
|
||||
"routeId": route["routeId"],
|
||||
"status": route["status"],
|
||||
"routeType": route["routeType"],
|
||||
"fromChainId": route["fromChainId"],
|
||||
"toChainId": route["toChainId"],
|
||||
"tokenInSymbol": route["tokenInSymbol"],
|
||||
"tokenOutSymbol": route["tokenOutSymbol"],
|
||||
"assetSymbol": route["assetSymbol"],
|
||||
"label": route["label"],
|
||||
"aggregatorFamilies": route["aggregatorFamilies"],
|
||||
"hopCount": route["hopCount"],
|
||||
"bridgeType": route["bridgeType"],
|
||||
"tags": route["tags"],
|
||||
}
|
||||
matches = append(matches, compactAnyMap(trimmed))
|
||||
if len(matches) >= 6 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(matches) == 0 {
|
||||
for _, route := range routes {
|
||||
trimmed := map[string]any{
|
||||
"routeId": route["routeId"],
|
||||
"status": route["status"],
|
||||
"routeType": route["routeType"],
|
||||
"fromChainId": route["fromChainId"],
|
||||
"toChainId": route["toChainId"],
|
||||
"tokenInSymbol": route["tokenInSymbol"],
|
||||
"tokenOutSymbol": route["tokenOutSymbol"],
|
||||
"assetSymbol": route["assetSymbol"],
|
||||
"label": route["label"],
|
||||
"aggregatorFamilies": route["aggregatorFamilies"],
|
||||
}
|
||||
matches = append(matches, compactAnyMap(trimmed))
|
||||
if len(matches) >= 4 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
func normalizeHexString(value string) string {
|
||||
trimmed := strings.TrimSpace(strings.ToLower(value))
|
||||
return strings.TrimPrefix(trimmed, "0x")
|
||||
}
|
||||
|
||||
func routeMatchesQuery(route map[string]any, query string) bool {
|
||||
fields := []string{
|
||||
stringValue(route["routeId"]),
|
||||
stringValue(route["routeType"]),
|
||||
stringValue(route["tokenInSymbol"]),
|
||||
stringValue(route["tokenOutSymbol"]),
|
||||
stringValue(route["assetSymbol"]),
|
||||
stringValue(route["label"]),
|
||||
}
|
||||
for _, field := range fields {
|
||||
if strings.Contains(strings.ToLower(field), query) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, value := range stringSliceValue(route["aggregatorFamilies"]) {
|
||||
if strings.Contains(strings.ToLower(value), query) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, value := range stringSliceValue(route["tags"]) {
|
||||
if strings.Contains(strings.ToLower(value), query) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, symbol := range []string{"cusdt", "cusdc", "cxauc", "ceurt", "usdt", "usdc", "weth"} {
|
||||
if strings.Contains(query, symbol) {
|
||||
if strings.Contains(strings.ToLower(strings.Join(fields, " ")), symbol) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
267
backend/api/rest/ai_xai.go
Normal file
267
backend/api/rest/ai_xai.go
Normal file
@@ -0,0 +1,267 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type xAIChatCompletionsRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []xAIChatMessageReq `json:"messages"`
|
||||
Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
type xAIChatMessageReq struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type xAIChatCompletionsResponse struct {
|
||||
Model string `json:"model"`
|
||||
Choices []xAIChoice `json:"choices"`
|
||||
OutputText string `json:"output_text,omitempty"`
|
||||
Output []openAIOutputItem `json:"output,omitempty"`
|
||||
}
|
||||
|
||||
type xAIChoice struct {
|
||||
Message xAIChoiceMessage `json:"message"`
|
||||
}
|
||||
|
||||
type xAIChoiceMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type openAIOutputItem struct {
|
||||
Type string `json:"type"`
|
||||
Content []openAIOutputContent `json:"content"`
|
||||
}
|
||||
|
||||
type openAIOutputContent struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text"`
|
||||
}
|
||||
|
||||
func normalizeAIMessages(messages []AIChatMessage) []AIChatMessage {
|
||||
normalized := make([]AIChatMessage, 0, len(messages))
|
||||
for _, message := range messages {
|
||||
role := strings.ToLower(strings.TrimSpace(message.Role))
|
||||
if role != "assistant" && role != "user" && role != "system" {
|
||||
continue
|
||||
}
|
||||
content := clipString(strings.TrimSpace(message.Content), maxExplorerAIMessageChars)
|
||||
if content == "" {
|
||||
continue
|
||||
}
|
||||
normalized = append(normalized, AIChatMessage{
|
||||
Role: role,
|
||||
Content: content,
|
||||
})
|
||||
}
|
||||
if len(normalized) > maxExplorerAIMessages {
|
||||
normalized = normalized[len(normalized)-maxExplorerAIMessages:]
|
||||
}
|
||||
return normalized
|
||||
}
|
||||
|
||||
func latestUserMessage(messages []AIChatMessage) string {
|
||||
for i := len(messages) - 1; i >= 0; i-- {
|
||||
if messages[i].Role == "user" {
|
||||
return messages[i].Content
|
||||
}
|
||||
}
|
||||
if len(messages) == 0 {
|
||||
return ""
|
||||
}
|
||||
return messages[len(messages)-1].Content
|
||||
}
|
||||
|
||||
func (s *Server) callXAIChatCompletions(ctx context.Context, messages []AIChatMessage, contextEnvelope AIContextEnvelope) (string, string, error) {
|
||||
apiKey := strings.TrimSpace(os.Getenv("XAI_API_KEY"))
|
||||
if apiKey == "" {
|
||||
return "", "", fmt.Errorf("XAI_API_KEY is not configured")
|
||||
}
|
||||
|
||||
model := explorerAIModel()
|
||||
baseURL := strings.TrimRight(strings.TrimSpace(os.Getenv("XAI_BASE_URL")), "/")
|
||||
if baseURL == "" {
|
||||
baseURL = "https://api.x.ai/v1"
|
||||
}
|
||||
|
||||
contextJSON, _ := json.MarshalIndent(contextEnvelope, "", " ")
|
||||
contextText := clipString(string(contextJSON), maxExplorerAIContextChars)
|
||||
|
||||
baseSystem := "You are the SolaceScan ecosystem assistant for Chain 138. Answer using the supplied indexed explorer data, route inventory, and workspace documentation. Be concise, operationally useful, and explicit about uncertainty. Never claim a route, deployment, or production status is live unless the provided context says it is live. If data is missing, say exactly what is missing."
|
||||
if !explorerAIOperatorToolsEnabled() {
|
||||
baseSystem += " Never instruct users to paste private keys or seed phrases. Do not direct users to run privileged mint, liquidity, or bridge execution from the public explorer UI. Operator changes belong on LAN-gated workflows and authenticated Track 4 APIs; PMM/MCP-style execution tools are disabled on this deployment unless EXPLORER_AI_OPERATOR_TOOLS_ENABLED=1."
|
||||
}
|
||||
|
||||
input := []xAIChatMessageReq{
|
||||
{
|
||||
Role: "system",
|
||||
Content: baseSystem,
|
||||
},
|
||||
{
|
||||
Role: "system",
|
||||
Content: "Retrieved ecosystem context:\n" + contextText,
|
||||
},
|
||||
}
|
||||
|
||||
for _, message := range messages {
|
||||
input = append(input, xAIChatMessageReq{
|
||||
Role: message.Role,
|
||||
Content: message.Content,
|
||||
})
|
||||
}
|
||||
|
||||
payload := xAIChatCompletionsRequest{
|
||||
Model: model,
|
||||
Messages: input,
|
||||
Stream: false,
|
||||
}
|
||||
|
||||
body, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return "", model, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, baseURL+"/chat/completions", bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return "", model, err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{Timeout: 45 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return "", model, &AIUpstreamError{
|
||||
StatusCode: http.StatusGatewayTimeout,
|
||||
Code: "upstream_timeout",
|
||||
Message: "explorer ai upstream timed out",
|
||||
Details: "xAI request exceeded the configured timeout",
|
||||
}
|
||||
}
|
||||
return "", model, &AIUpstreamError{
|
||||
StatusCode: http.StatusBadGateway,
|
||||
Code: "upstream_transport_error",
|
||||
Message: "explorer ai upstream transport failed",
|
||||
Details: err.Error(),
|
||||
}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", model, &AIUpstreamError{
|
||||
StatusCode: http.StatusBadGateway,
|
||||
Code: "upstream_bad_response",
|
||||
Message: "explorer ai upstream body could not be read",
|
||||
Details: err.Error(),
|
||||
}
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return "", model, parseXAIError(resp.StatusCode, responseBody)
|
||||
}
|
||||
|
||||
var response xAIChatCompletionsResponse
|
||||
if err := json.Unmarshal(responseBody, &response); err != nil {
|
||||
return "", model, &AIUpstreamError{
|
||||
StatusCode: http.StatusBadGateway,
|
||||
Code: "upstream_bad_response",
|
||||
Message: "explorer ai upstream returned invalid JSON",
|
||||
Details: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
reply := ""
|
||||
if len(response.Choices) > 0 {
|
||||
reply = strings.TrimSpace(response.Choices[0].Message.Content)
|
||||
}
|
||||
if reply == "" {
|
||||
reply = strings.TrimSpace(response.OutputText)
|
||||
}
|
||||
if reply == "" {
|
||||
reply = strings.TrimSpace(extractOutputText(response.Output))
|
||||
}
|
||||
if reply == "" {
|
||||
return "", model, &AIUpstreamError{
|
||||
StatusCode: http.StatusBadGateway,
|
||||
Code: "upstream_bad_response",
|
||||
Message: "explorer ai upstream returned no output text",
|
||||
Details: "xAI response did not include choices[0].message.content or output text",
|
||||
}
|
||||
}
|
||||
if strings.TrimSpace(response.Model) != "" {
|
||||
model = response.Model
|
||||
}
|
||||
return reply, model, nil
|
||||
}
|
||||
|
||||
func parseXAIError(statusCode int, responseBody []byte) error {
|
||||
var parsed struct {
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
Type string `json:"type"`
|
||||
Code string `json:"code"`
|
||||
} `json:"error"`
|
||||
}
|
||||
_ = json.Unmarshal(responseBody, &parsed)
|
||||
|
||||
details := clipString(strings.TrimSpace(parsed.Error.Message), 280)
|
||||
if details == "" {
|
||||
details = clipString(strings.TrimSpace(string(responseBody)), 280)
|
||||
}
|
||||
|
||||
switch statusCode {
|
||||
case http.StatusUnauthorized, http.StatusForbidden:
|
||||
return &AIUpstreamError{
|
||||
StatusCode: statusCode,
|
||||
Code: "upstream_auth_failed",
|
||||
Message: "explorer ai upstream authentication failed",
|
||||
Details: details,
|
||||
}
|
||||
case http.StatusTooManyRequests:
|
||||
return &AIUpstreamError{
|
||||
StatusCode: statusCode,
|
||||
Code: "upstream_quota_exhausted",
|
||||
Message: "explorer ai upstream quota exhausted",
|
||||
Details: details,
|
||||
}
|
||||
case http.StatusRequestTimeout, http.StatusGatewayTimeout:
|
||||
return &AIUpstreamError{
|
||||
StatusCode: statusCode,
|
||||
Code: "upstream_timeout",
|
||||
Message: "explorer ai upstream timed out",
|
||||
Details: details,
|
||||
}
|
||||
default:
|
||||
return &AIUpstreamError{
|
||||
StatusCode: statusCode,
|
||||
Code: "upstream_error",
|
||||
Message: "explorer ai upstream request failed",
|
||||
Details: details,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func extractOutputText(items []openAIOutputItem) string {
|
||||
parts := []string{}
|
||||
for _, item := range items {
|
||||
for _, content := range item.Content {
|
||||
if strings.TrimSpace(content.Text) != "" {
|
||||
parts = append(parts, strings.TrimSpace(content.Text))
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, "\n\n")
|
||||
}
|
||||
@@ -141,12 +141,49 @@ type internalValidateAPIKeyRequest struct {
|
||||
LastIP string `json:"last_ip"`
|
||||
}
|
||||
|
||||
// rpcAccessProducts returns the Chain 138 RPC access catalog. The source
|
||||
// of truth lives in config/rpc_products.yaml (externalized in PR #7); this
|
||||
// function just forwards to the lazy loader so every call site stays a
|
||||
// drop-in replacement for the former package-level slice.
|
||||
func rpcAccessProducts() []accessProduct {
|
||||
return rpcAccessProductCatalog()
|
||||
var rpcAccessProducts = []accessProduct{
|
||||
{
|
||||
Slug: "core-rpc",
|
||||
Name: "Core RPC",
|
||||
Provider: "besu-core",
|
||||
VMID: 2101,
|
||||
HTTPURL: "https://rpc-http-prv.d-bis.org",
|
||||
WSURL: "wss://rpc-ws-prv.d-bis.org",
|
||||
DefaultTier: "enterprise",
|
||||
RequiresApproval: true,
|
||||
BillingModel: "contract",
|
||||
Description: "Private Chain 138 Core RPC for operator-grade administration and sensitive workloads.",
|
||||
UseCases: []string{"core deployments", "operator automation", "private infrastructure integration"},
|
||||
ManagementFeatures: []string{"dedicated API key", "higher rate ceiling", "operator-oriented access controls"},
|
||||
},
|
||||
{
|
||||
Slug: "alltra-rpc",
|
||||
Name: "Alltra RPC",
|
||||
Provider: "alltra",
|
||||
VMID: 2102,
|
||||
HTTPURL: "http://192.168.11.212:8545",
|
||||
WSURL: "ws://192.168.11.212:8546",
|
||||
DefaultTier: "pro",
|
||||
RequiresApproval: false,
|
||||
BillingModel: "subscription",
|
||||
Description: "Dedicated Alltra-managed RPC lane for partner traffic, subscription access, and API-key-gated usage.",
|
||||
UseCases: []string{"tenant RPC access", "managed partner workloads", "metered commercial usage"},
|
||||
ManagementFeatures: []string{"subscription-ready key issuance", "rate governance", "partner-specific traffic lane"},
|
||||
},
|
||||
{
|
||||
Slug: "thirdweb-rpc",
|
||||
Name: "Thirdweb RPC",
|
||||
Provider: "thirdweb",
|
||||
VMID: 2103,
|
||||
HTTPURL: "http://192.168.11.217:8545",
|
||||
WSURL: "ws://192.168.11.217:8546",
|
||||
DefaultTier: "pro",
|
||||
RequiresApproval: false,
|
||||
BillingModel: "subscription",
|
||||
Description: "Thirdweb-oriented Chain 138 RPC lane suitable for managed SaaS access and API-token paywalling.",
|
||||
UseCases: []string{"thirdweb integrations", "commercial API access", "managed dApp traffic"},
|
||||
ManagementFeatures: []string{"API token issuance", "usage tiering", "future paywall/subscription hooks"},
|
||||
},
|
||||
}
|
||||
|
||||
func (s *Server) generateUserJWT(user *auth.User) (string, time.Time, error) {
|
||||
@@ -329,7 +366,7 @@ func (s *Server) handleAccessProducts(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(map[string]any{
|
||||
"products": rpcAccessProducts(),
|
||||
"products": rpcAccessProducts,
|
||||
"note": "Products are ready for auth, API key, and subscription gating. Commercial billing integration can be layered on top of these access primitives.",
|
||||
})
|
||||
}
|
||||
@@ -587,7 +624,7 @@ func firstNonEmpty(values ...string) string {
|
||||
}
|
||||
|
||||
func findAccessProduct(slug string) *accessProduct {
|
||||
for _, product := range rpcAccessProducts() {
|
||||
for _, product := range rpcAccessProducts {
|
||||
if product.Slug == slug {
|
||||
copy := product
|
||||
return ©
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// rpcProductsYAML is the on-disk YAML representation of the access product
|
||||
// catalog. It matches config/rpc_products.yaml at the repo root.
|
||||
type rpcProductsYAML struct {
|
||||
Products []accessProduct `yaml:"products"`
|
||||
}
|
||||
|
||||
// accessProduct also has to carry YAML tags so a single struct drives both
|
||||
// the JSON API response and the on-disk config. (JSON tags are unchanged.)
|
||||
// These yaml tags mirror the json tags exactly to avoid drift.
|
||||
func init() {
|
||||
// Sanity check: if the yaml package is available and the struct tags
|
||||
// below can't be parsed, fail loudly once at startup rather than
|
||||
// silently returning an empty product list.
|
||||
var _ yaml.Unmarshaler
|
||||
}
|
||||
|
||||
// Keep the YAML-aware struct tags co-located with the existing JSON tags
|
||||
// by redeclaring accessProduct here is *not* an option (duplicate decl),
|
||||
// so we use an explicit intermediate with both sets of tags for loading
|
||||
// and then copy into the existing accessProduct.
|
||||
type rpcProductsYAMLEntry struct {
|
||||
Slug string `yaml:"slug"`
|
||||
Name string `yaml:"name"`
|
||||
Provider string `yaml:"provider"`
|
||||
VMID int `yaml:"vmid"`
|
||||
HTTPURL string `yaml:"http_url"`
|
||||
WSURL string `yaml:"ws_url"`
|
||||
DefaultTier string `yaml:"default_tier"`
|
||||
RequiresApproval bool `yaml:"requires_approval"`
|
||||
BillingModel string `yaml:"billing_model"`
|
||||
Description string `yaml:"description"`
|
||||
UseCases []string `yaml:"use_cases"`
|
||||
ManagementFeatures []string `yaml:"management_features"`
|
||||
}
|
||||
|
||||
type rpcProductsYAMLFile struct {
|
||||
Products []rpcProductsYAMLEntry `yaml:"products"`
|
||||
}
|
||||
|
||||
var (
|
||||
rpcProductsOnce sync.Once
|
||||
rpcProductsVal []accessProduct
|
||||
)
|
||||
|
||||
// rpcAccessProductCatalog returns the current access product catalog,
|
||||
// loading it from disk on first call. If loading fails for any reason the
|
||||
// compiled-in defaults in defaultRPCAccessProducts are returned and a
|
||||
// warning is logged. Callers should treat the returned slice as read-only.
|
||||
func rpcAccessProductCatalog() []accessProduct {
|
||||
rpcProductsOnce.Do(func() {
|
||||
loaded, path, err := loadRPCAccessProducts()
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Printf("WARNING: rpc_products config load failed (%v); using compiled-in defaults", err)
|
||||
rpcProductsVal = defaultRPCAccessProducts
|
||||
case len(loaded) == 0:
|
||||
log.Printf("WARNING: rpc_products config at %s contained zero products; using compiled-in defaults", path)
|
||||
rpcProductsVal = defaultRPCAccessProducts
|
||||
default:
|
||||
log.Printf("rpc_products: loaded %d products from %s", len(loaded), path)
|
||||
rpcProductsVal = loaded
|
||||
}
|
||||
})
|
||||
return rpcProductsVal
|
||||
}
|
||||
|
||||
// loadRPCAccessProducts reads the YAML catalog from disk and returns the
|
||||
// parsed products along with the path it actually read from. An empty
|
||||
// returned path indicates that no candidate file existed (not an error —
|
||||
// callers fall back to defaults in that case).
|
||||
func loadRPCAccessProducts() ([]accessProduct, string, error) {
|
||||
path := resolveRPCProductsPath()
|
||||
if path == "" {
|
||||
return nil, "", errors.New("no rpc_products.yaml found (set RPC_PRODUCTS_PATH or place config/rpc_products.yaml next to the binary)")
|
||||
}
|
||||
raw, err := os.ReadFile(path) // #nosec G304 -- path comes from env/repo-known locations
|
||||
if err != nil {
|
||||
return nil, path, fmt.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
var decoded rpcProductsYAMLFile
|
||||
if err := yaml.Unmarshal(raw, &decoded); err != nil {
|
||||
return nil, path, fmt.Errorf("parse %s: %w", path, err)
|
||||
}
|
||||
products := make([]accessProduct, 0, len(decoded.Products))
|
||||
seen := make(map[string]struct{}, len(decoded.Products))
|
||||
for i, entry := range decoded.Products {
|
||||
if strings.TrimSpace(entry.Slug) == "" {
|
||||
return nil, path, fmt.Errorf("%s: product[%d] has empty slug", path, i)
|
||||
}
|
||||
if _, dup := seen[entry.Slug]; dup {
|
||||
return nil, path, fmt.Errorf("%s: duplicate product slug %q", path, entry.Slug)
|
||||
}
|
||||
seen[entry.Slug] = struct{}{}
|
||||
if strings.TrimSpace(entry.HTTPURL) == "" {
|
||||
return nil, path, fmt.Errorf("%s: product %q is missing http_url", path, entry.Slug)
|
||||
}
|
||||
products = append(products, accessProduct{
|
||||
Slug: entry.Slug,
|
||||
Name: entry.Name,
|
||||
Provider: entry.Provider,
|
||||
VMID: entry.VMID,
|
||||
HTTPURL: strings.TrimSpace(entry.HTTPURL),
|
||||
WSURL: strings.TrimSpace(entry.WSURL),
|
||||
DefaultTier: entry.DefaultTier,
|
||||
RequiresApproval: entry.RequiresApproval,
|
||||
BillingModel: entry.BillingModel,
|
||||
Description: strings.TrimSpace(entry.Description),
|
||||
UseCases: entry.UseCases,
|
||||
ManagementFeatures: entry.ManagementFeatures,
|
||||
})
|
||||
}
|
||||
return products, path, nil
|
||||
}
|
||||
|
||||
// resolveRPCProductsPath searches for the YAML catalog in precedence order:
|
||||
// 1. $RPC_PRODUCTS_PATH (absolute or relative to cwd)
|
||||
// 2. $EXPLORER_BACKEND_DIR/config/rpc_products.yaml
|
||||
// 3. <cwd>/backend/config/rpc_products.yaml
|
||||
// 4. <cwd>/config/rpc_products.yaml
|
||||
//
|
||||
// Returns "" when no candidate exists.
|
||||
func resolveRPCProductsPath() string {
|
||||
if explicit := strings.TrimSpace(os.Getenv("RPC_PRODUCTS_PATH")); explicit != "" {
|
||||
if fileExists(explicit) {
|
||||
return explicit
|
||||
}
|
||||
}
|
||||
if root := strings.TrimSpace(os.Getenv("EXPLORER_BACKEND_DIR")); root != "" {
|
||||
candidate := filepath.Join(root, "config", "rpc_products.yaml")
|
||||
if fileExists(candidate) {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
for _, candidate := range []string{
|
||||
filepath.Join("backend", "config", "rpc_products.yaml"),
|
||||
filepath.Join("config", "rpc_products.yaml"),
|
||||
} {
|
||||
if fileExists(candidate) {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// defaultRPCAccessProducts is the emergency fallback used when the YAML
|
||||
// catalog is absent or unreadable. Kept in sync with config/rpc_products.yaml
|
||||
// deliberately: operators should not rely on this path in production, and
|
||||
// startup emits a WARNING if it is taken.
|
||||
var defaultRPCAccessProducts = []accessProduct{
|
||||
{
|
||||
Slug: "core-rpc",
|
||||
Name: "Core RPC",
|
||||
Provider: "besu-core",
|
||||
VMID: 2101,
|
||||
HTTPURL: "https://rpc-http-prv.d-bis.org",
|
||||
WSURL: "wss://rpc-ws-prv.d-bis.org",
|
||||
DefaultTier: "enterprise",
|
||||
RequiresApproval: true,
|
||||
BillingModel: "contract",
|
||||
Description: "Private Chain 138 Core RPC for operator-grade administration and sensitive workloads.",
|
||||
UseCases: []string{"core deployments", "operator automation", "private infrastructure integration"},
|
||||
ManagementFeatures: []string{"dedicated API key", "higher rate ceiling", "operator-oriented access controls"},
|
||||
},
|
||||
{
|
||||
Slug: "alltra-rpc",
|
||||
Name: "Alltra RPC",
|
||||
Provider: "alltra",
|
||||
VMID: 2102,
|
||||
HTTPURL: "http://192.168.11.212:8545",
|
||||
WSURL: "ws://192.168.11.212:8546",
|
||||
DefaultTier: "pro",
|
||||
RequiresApproval: false,
|
||||
BillingModel: "subscription",
|
||||
Description: "Dedicated Alltra-managed RPC lane for partner traffic, subscription access, and API-key-gated usage.",
|
||||
UseCases: []string{"tenant RPC access", "managed partner workloads", "metered commercial usage"},
|
||||
ManagementFeatures: []string{"subscription-ready key issuance", "rate governance", "partner-specific traffic lane"},
|
||||
},
|
||||
{
|
||||
Slug: "thirdweb-rpc",
|
||||
Name: "Thirdweb RPC",
|
||||
Provider: "thirdweb",
|
||||
VMID: 2103,
|
||||
HTTPURL: "http://192.168.11.217:8545",
|
||||
WSURL: "ws://192.168.11.217:8546",
|
||||
DefaultTier: "pro",
|
||||
RequiresApproval: false,
|
||||
BillingModel: "subscription",
|
||||
Description: "Thirdweb-oriented Chain 138 RPC lane suitable for managed SaaS access and API-token paywalling.",
|
||||
UseCases: []string{"thirdweb integrations", "commercial API access", "managed dApp traffic"},
|
||||
ManagementFeatures: []string{"API token issuance", "usage tiering", "future paywall/subscription hooks"},
|
||||
},
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLoadRPCAccessProductsFromRepoDefault(t *testing.T) {
|
||||
// The repo ships config/rpc_products.yaml relative to backend/. When
|
||||
// running `go test ./...` from the repo root, the loader's relative
|
||||
// search path finds it there. Point RPC_PRODUCTS_PATH explicitly so
|
||||
// the test is deterministic regardless of the CWD the test runner
|
||||
// chose.
|
||||
repoRoot, err := findBackendRoot()
|
||||
if err != nil {
|
||||
t.Fatalf("locate backend root: %v", err)
|
||||
}
|
||||
t.Setenv("RPC_PRODUCTS_PATH", filepath.Join(repoRoot, "config", "rpc_products.yaml"))
|
||||
|
||||
products, path, err := loadRPCAccessProducts()
|
||||
if err != nil {
|
||||
t.Fatalf("loadRPCAccessProducts: %v", err)
|
||||
}
|
||||
if path == "" {
|
||||
t.Fatalf("loadRPCAccessProducts returned empty path")
|
||||
}
|
||||
if len(products) < 3 {
|
||||
t.Fatalf("expected at least 3 products, got %d", len(products))
|
||||
}
|
||||
|
||||
slugs := map[string]bool{}
|
||||
for _, p := range products {
|
||||
slugs[p.Slug] = true
|
||||
if p.HTTPURL == "" {
|
||||
t.Errorf("product %q has empty http_url", p.Slug)
|
||||
}
|
||||
}
|
||||
for _, required := range []string{"core-rpc", "alltra-rpc", "thirdweb-rpc"} {
|
||||
if !slugs[required] {
|
||||
t.Errorf("expected product slug %q in catalog", required)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadRPCAccessProductsRejectsDuplicateSlug(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "rpc_products.yaml")
|
||||
yaml := `products:
|
||||
- slug: a
|
||||
http_url: https://a.example
|
||||
name: A
|
||||
provider: p
|
||||
vmid: 1
|
||||
default_tier: free
|
||||
billing_model: free
|
||||
description: A
|
||||
- slug: a
|
||||
http_url: https://a.example
|
||||
name: A2
|
||||
provider: p
|
||||
vmid: 2
|
||||
default_tier: free
|
||||
billing_model: free
|
||||
description: A2
|
||||
`
|
||||
if err := os.WriteFile(path, []byte(yaml), 0o600); err != nil {
|
||||
t.Fatalf("write fixture: %v", err)
|
||||
}
|
||||
t.Setenv("RPC_PRODUCTS_PATH", path)
|
||||
|
||||
if _, _, err := loadRPCAccessProducts(); err == nil {
|
||||
t.Fatal("expected duplicate-slug error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadRPCAccessProductsRejectsMissingHTTPURL(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "rpc_products.yaml")
|
||||
if err := os.WriteFile(path, []byte("products:\n - slug: x\n name: X\n"), 0o600); err != nil {
|
||||
t.Fatalf("write fixture: %v", err)
|
||||
}
|
||||
t.Setenv("RPC_PRODUCTS_PATH", path)
|
||||
|
||||
if _, _, err := loadRPCAccessProducts(); err == nil {
|
||||
t.Fatal("expected missing-http_url error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
// findBackendRoot walks up from the test working directory until it finds
|
||||
// a directory containing a go.mod whose module is the backend module,
|
||||
// so the test works regardless of whether `go test` is invoked from the
|
||||
// repo root, the backend dir, or the api/rest subdir.
|
||||
func findBackendRoot() (string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for {
|
||||
goMod := filepath.Join(cwd, "go.mod")
|
||||
if _, err := os.Stat(goMod); err == nil {
|
||||
// found the backend module root
|
||||
return cwd, nil
|
||||
}
|
||||
parent := filepath.Dir(cwd)
|
||||
if parent == cwd {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
cwd = parent
|
||||
}
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
# Chain 138 RPC access product catalog.
|
||||
#
|
||||
# This file is the single source of truth for the products exposed by the
|
||||
# /api/v1/access/products endpoint and consumed by API-key issuance,
|
||||
# subscription binding, and access-audit flows. Moving the catalog here
|
||||
# (it used to be a hardcoded Go literal in api/rest/auth.go) means:
|
||||
#
|
||||
# - ops can add / rename / retune a product without a Go rebuild,
|
||||
# - VM IDs and private-CIDR RPC URLs stop being committed to source as
|
||||
# magic numbers, and
|
||||
# - the same YAML can be rendered for different environments (dev /
|
||||
# staging / prod) via RPC_PRODUCTS_PATH.
|
||||
#
|
||||
# Path resolution at startup:
|
||||
# 1. $RPC_PRODUCTS_PATH if set (absolute or relative to the working dir),
|
||||
# 2. $EXPLORER_BACKEND_DIR/config/rpc_products.yaml if that env var is set,
|
||||
# 3. the first of <cwd>/backend/config/rpc_products.yaml or
|
||||
# <cwd>/config/rpc_products.yaml that exists,
|
||||
# 4. the compiled-in fallback slice (legacy behaviour; logs a warning).
|
||||
#
|
||||
# Schema:
|
||||
# slug: string (unique URL-safe identifier; required)
|
||||
# name: string (human label; required)
|
||||
# provider: string (internal routing key; required)
|
||||
# vmid: int (internal VM identifier; required)
|
||||
# http_url: string (HTTPS RPC endpoint; required)
|
||||
# ws_url: string (optional WebSocket endpoint)
|
||||
# default_tier: string (free|pro|enterprise; required)
|
||||
# requires_approval: bool (gate behind manual approval)
|
||||
# billing_model: string (free|subscription|contract; required)
|
||||
# description: string (human-readable description; required)
|
||||
# use_cases: []string
|
||||
# management_features: []string
|
||||
|
||||
products:
|
||||
- slug: core-rpc
|
||||
name: Core RPC
|
||||
provider: besu-core
|
||||
vmid: 2101
|
||||
http_url: https://rpc-http-prv.d-bis.org
|
||||
ws_url: wss://rpc-ws-prv.d-bis.org
|
||||
default_tier: enterprise
|
||||
requires_approval: true
|
||||
billing_model: contract
|
||||
description: >-
|
||||
Private Chain 138 Core RPC for operator-grade administration and
|
||||
sensitive workloads.
|
||||
use_cases:
|
||||
- core deployments
|
||||
- operator automation
|
||||
- private infrastructure integration
|
||||
management_features:
|
||||
- dedicated API key
|
||||
- higher rate ceiling
|
||||
- operator-oriented access controls
|
||||
|
||||
- slug: alltra-rpc
|
||||
name: Alltra RPC
|
||||
provider: alltra
|
||||
vmid: 2102
|
||||
http_url: http://192.168.11.212:8545
|
||||
ws_url: ws://192.168.11.212:8546
|
||||
default_tier: pro
|
||||
requires_approval: false
|
||||
billing_model: subscription
|
||||
description: >-
|
||||
Dedicated Alltra-managed RPC lane for partner traffic, subscription
|
||||
access, and API-key-gated usage.
|
||||
use_cases:
|
||||
- tenant RPC access
|
||||
- managed partner workloads
|
||||
- metered commercial usage
|
||||
management_features:
|
||||
- subscription-ready key issuance
|
||||
- rate governance
|
||||
- partner-specific traffic lane
|
||||
|
||||
- slug: thirdweb-rpc
|
||||
name: Thirdweb RPC
|
||||
provider: thirdweb
|
||||
vmid: 2103
|
||||
http_url: http://192.168.11.217:8545
|
||||
ws_url: ws://192.168.11.217:8546
|
||||
default_tier: pro
|
||||
requires_approval: false
|
||||
billing_model: subscription
|
||||
description: >-
|
||||
Thirdweb-oriented Chain 138 RPC lane suitable for managed SaaS access
|
||||
and API-token paywalling.
|
||||
use_cases:
|
||||
- thirdweb integrations
|
||||
- commercial API access
|
||||
- managed dApp traffic
|
||||
management_features:
|
||||
- API token issuance
|
||||
- usage tiering
|
||||
- future paywall/subscription hooks
|
||||
@@ -13,7 +13,6 @@ require (
|
||||
github.com/redis/go-redis/v9 v9.17.2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/crypto v0.36.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -52,5 +51,6 @@ require (
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user