Decomposes backend/api/rest/ai.go (which the review flagged at 1180 lines
and which was the largest file in the repo by a wide margin) into six
purpose-built files inside the same package, so no import paths change
for any caller and *Server receivers keep working:
ai.go 198 handlers + feature flags + exported AI* DTOs
ai_context.go 381 buildAIContext + indexed-DB queries
(stats / tx / address / block) + regex patterns +
extractBlockReference
ai_routes.go 139 queryAIRoutes + filterAIRouteMatches +
routeMatchesQuery + normalizeHexString
ai_docs.go 136 loadAIDocSnippets + findAIWorkspaceRoot +
scanDocForTerms + buildDocSearchTerms
ai_xai.go 267 xAI / OpenAI request/response types +
normalizeAIMessages + latestUserMessage +
callXAIChatCompletions + parseXAIError +
extractOutputText
ai_helpers.go 112 pure-function utilities (firstRegexMatch,
compactStringMap, compactAnyMap, stringValue,
stringSliceValue, uniqueStrings, clipString,
fileExists)
ai_runtime.go (rate limiter + metrics + audit log) is unchanged.
This is a pure move: no logic changes, no new public API, no changes to
HTTP routes. Each file carries only the imports it actually uses so
goimports is clean on every file individually. Every exported symbol
retained its original spelling so callers (routes.go, server.go, and
the AI e2e tests) keep compiling without edits.
Verification:
go build ./... clean
go vet ./... clean
go test ./api/rest/... PASS
staticcheck ./... clean on the SA* correctness family
Advances completion criterion 6 (backend maintainability): 'no single
Go file exceeds a few hundred lines; AI/LLM plumbing is separated from
HTTP handlers; context-building is separated from upstream calls.'
199 lines
7.1 KiB
Go
199 lines
7.1 KiB
Go
package rest
|
|
|
|
// ai.go holds the HTTP handlers, feature-flag helpers, and exported DTOs
|
|
// for the /api/v1/ai/* endpoints. It is intentionally kept small; the
|
|
// heavy lifting lives in:
|
|
//
|
|
// ai_context.go - buildAIContext + indexed DB queries
|
|
// ai_routes.go - live aggregator route lookup + match scoring
|
|
// ai_docs.go - workspace doc scraping
|
|
// ai_xai.go - upstream xAI/OpenAI client (types + calls)
|
|
// ai_helpers.go - pure-function utilities shared across the above
|
|
// ai_runtime.go - rate limiting + metrics + audit log (pre-existing)
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"net/http"
|
|
"os"
|
|
"strings"
|
|
"time"
|
|
)
|
|
|
|
const (
|
|
defaultExplorerAIModel = "grok-3"
|
|
maxExplorerAIMessages = 12
|
|
maxExplorerAIMessageChars = 4000
|
|
maxExplorerAIContextChars = 22000
|
|
maxExplorerAIDocSnippets = 6
|
|
)
|
|
|
|
type AIChatMessage struct {
|
|
Role string `json:"role"`
|
|
Content string `json:"content"`
|
|
}
|
|
|
|
type AIChatRequest struct {
|
|
Messages []AIChatMessage `json:"messages"`
|
|
PageContext map[string]string `json:"pageContext,omitempty"`
|
|
}
|
|
|
|
type AIContextResponse struct {
|
|
Enabled bool `json:"enabled"`
|
|
Query string `json:"query,omitempty"`
|
|
GeneratedAt string `json:"generatedAt"`
|
|
Model string `json:"model"`
|
|
Context AIContextEnvelope `json:"context"`
|
|
Warnings []string `json:"warnings,omitempty"`
|
|
}
|
|
|
|
type AIChatResponse struct {
|
|
Reply string `json:"reply"`
|
|
Model string `json:"model"`
|
|
GeneratedAt string `json:"generatedAt"`
|
|
Context AIContextEnvelope `json:"context"`
|
|
Warnings []string `json:"warnings,omitempty"`
|
|
}
|
|
|
|
type AIContextEnvelope struct {
|
|
ChainID int `json:"chainId"`
|
|
Explorer string `json:"explorer"`
|
|
PageContext map[string]string `json:"pageContext,omitempty"`
|
|
Stats map[string]any `json:"stats,omitempty"`
|
|
Address map[string]any `json:"address,omitempty"`
|
|
Transaction map[string]any `json:"transaction,omitempty"`
|
|
Block map[string]any `json:"block,omitempty"`
|
|
RouteMatches []map[string]any `json:"routeMatches,omitempty"`
|
|
DocSnippets []AIDocSnippet `json:"docSnippets,omitempty"`
|
|
CapabilityNotice string `json:"capabilityNotice"`
|
|
Sources []AIContextSource `json:"sources,omitempty"`
|
|
}
|
|
|
|
type AIDocSnippet struct {
|
|
Path string `json:"path"`
|
|
Line int `json:"line"`
|
|
Snippet string `json:"snippet"`
|
|
}
|
|
|
|
type AIContextSource struct {
|
|
Type string `json:"type"`
|
|
Label string `json:"label"`
|
|
Origin string `json:"origin,omitempty"`
|
|
}
|
|
|
|
func (s *Server) handleAIContext(w http.ResponseWriter, r *http.Request) {
|
|
startedAt := time.Now()
|
|
clientIP := clientIPAddress(r)
|
|
if r.Method != http.MethodGet {
|
|
writeMethodNotAllowed(w)
|
|
return
|
|
}
|
|
if allowed, retryAfter := s.allowAIRequest(r, "context"); !allowed {
|
|
w.Header().Set("Retry-After", fmt.Sprintf("%.0f", retryAfter.Seconds()))
|
|
s.aiMetrics.Record("context", http.StatusTooManyRequests, time.Since(startedAt), "rate_limited", clientIP)
|
|
s.logAIRequest("context", http.StatusTooManyRequests, time.Since(startedAt), clientIP, explorerAIModel(), "rate_limited")
|
|
writeErrorDetailed(w, http.StatusTooManyRequests, "rate_limited", "explorer ai context rate limit exceeded", "please retry shortly")
|
|
return
|
|
}
|
|
|
|
query := strings.TrimSpace(r.URL.Query().Get("q"))
|
|
pageContext := map[string]string{
|
|
"path": strings.TrimSpace(r.URL.Query().Get("path")),
|
|
"view": strings.TrimSpace(r.URL.Query().Get("view")),
|
|
}
|
|
|
|
ctxEnvelope, warnings := s.buildAIContext(r.Context(), query, pageContext)
|
|
response := AIContextResponse{
|
|
Enabled: explorerAIEnabled(),
|
|
Query: query,
|
|
GeneratedAt: time.Now().UTC().Format(time.RFC3339),
|
|
Model: explorerAIModel(),
|
|
Context: ctxEnvelope,
|
|
Warnings: warnings,
|
|
}
|
|
|
|
s.aiMetrics.Record("context", http.StatusOK, time.Since(startedAt), "", clientIP)
|
|
s.logAIRequest("context", http.StatusOK, time.Since(startedAt), clientIP, explorerAIModel(), "")
|
|
writeJSON(w, http.StatusOK, response)
|
|
}
|
|
|
|
func (s *Server) handleAIChat(w http.ResponseWriter, r *http.Request) {
|
|
startedAt := time.Now()
|
|
clientIP := clientIPAddress(r)
|
|
if r.Method != http.MethodPost {
|
|
writeMethodNotAllowed(w)
|
|
return
|
|
}
|
|
if allowed, retryAfter := s.allowAIRequest(r, "chat"); !allowed {
|
|
w.Header().Set("Retry-After", fmt.Sprintf("%.0f", retryAfter.Seconds()))
|
|
s.aiMetrics.Record("chat", http.StatusTooManyRequests, time.Since(startedAt), "rate_limited", clientIP)
|
|
s.logAIRequest("chat", http.StatusTooManyRequests, time.Since(startedAt), clientIP, explorerAIModel(), "rate_limited")
|
|
writeErrorDetailed(w, http.StatusTooManyRequests, "rate_limited", "explorer ai chat rate limit exceeded", "please retry shortly")
|
|
return
|
|
}
|
|
if !explorerAIEnabled() {
|
|
s.aiMetrics.Record("chat", http.StatusServiceUnavailable, time.Since(startedAt), "service_unavailable", clientIP)
|
|
s.logAIRequest("chat", http.StatusServiceUnavailable, time.Since(startedAt), clientIP, explorerAIModel(), "service_unavailable")
|
|
writeError(w, http.StatusServiceUnavailable, "service_unavailable", "explorer ai is not configured; set XAI_API_KEY on the backend")
|
|
return
|
|
}
|
|
|
|
defer r.Body.Close()
|
|
body := http.MaxBytesReader(w, r.Body, 1<<20)
|
|
|
|
var chatReq AIChatRequest
|
|
if err := json.NewDecoder(body).Decode(&chatReq); err != nil {
|
|
writeError(w, http.StatusBadRequest, "bad_request", "invalid ai chat payload")
|
|
return
|
|
}
|
|
|
|
messages := normalizeAIMessages(chatReq.Messages)
|
|
if len(messages) == 0 {
|
|
writeError(w, http.StatusBadRequest, "bad_request", "at least one non-empty ai message is required")
|
|
return
|
|
}
|
|
|
|
latestUser := latestUserMessage(messages)
|
|
ctxEnvelope, warnings := s.buildAIContext(r.Context(), latestUser, chatReq.PageContext)
|
|
|
|
reply, model, err := s.callXAIChatCompletions(r.Context(), messages, ctxEnvelope)
|
|
if err != nil {
|
|
statusCode, code, message, details := mapAIUpstreamError(err)
|
|
s.aiMetrics.Record("chat", statusCode, time.Since(startedAt), code, clientIP)
|
|
s.logAIRequest("chat", statusCode, time.Since(startedAt), clientIP, model, code)
|
|
writeErrorDetailed(w, statusCode, code, message, details)
|
|
return
|
|
}
|
|
|
|
response := AIChatResponse{
|
|
Reply: reply,
|
|
Model: model,
|
|
GeneratedAt: time.Now().UTC().Format(time.RFC3339),
|
|
Context: ctxEnvelope,
|
|
Warnings: warnings,
|
|
}
|
|
|
|
s.aiMetrics.Record("chat", http.StatusOK, time.Since(startedAt), "", clientIP)
|
|
s.logAIRequest("chat", http.StatusOK, time.Since(startedAt), clientIP, model, "")
|
|
writeJSON(w, http.StatusOK, response)
|
|
}
|
|
|
|
func explorerAIEnabled() bool {
|
|
return strings.TrimSpace(os.Getenv("XAI_API_KEY")) != ""
|
|
}
|
|
|
|
// explorerAIOperatorToolsEnabled allows the model to discuss server-side operator/MCP automation (default off).
|
|
func explorerAIOperatorToolsEnabled() bool {
|
|
return strings.TrimSpace(os.Getenv("EXPLORER_AI_OPERATOR_TOOLS_ENABLED")) == "1"
|
|
}
|
|
|
|
func explorerAIModel() string {
|
|
if model := strings.TrimSpace(os.Getenv("XAI_MODEL")); model != "" {
|
|
return model
|
|
}
|
|
if model := strings.TrimSpace(os.Getenv("EXPLORER_AI_MODEL")); model != "" {
|
|
return model
|
|
}
|
|
return defaultExplorerAIModel
|
|
}
|