Files
explorer-monorepo/backend/api/rest/ai_xai.go
Devin 945e637d1d refactor(ai): split the 1180-line ai.go into focused files
Decomposes backend/api/rest/ai.go (which the review flagged at 1180 lines
and which was the largest file in the repo by a wide margin) into six
purpose-built files inside the same package, so no import paths change
for any caller and *Server receivers keep working:

  ai.go           198  handlers + feature flags + exported AI* DTOs
  ai_context.go   381  buildAIContext + indexed-DB queries
                       (stats / tx / address / block) + regex patterns +
                       extractBlockReference
  ai_routes.go    139  queryAIRoutes + filterAIRouteMatches +
                       routeMatchesQuery + normalizeHexString
  ai_docs.go      136  loadAIDocSnippets + findAIWorkspaceRoot +
                       scanDocForTerms + buildDocSearchTerms
  ai_xai.go       267  xAI / OpenAI request/response types +
                       normalizeAIMessages + latestUserMessage +
                       callXAIChatCompletions + parseXAIError +
                       extractOutputText
  ai_helpers.go   112  pure-function utilities (firstRegexMatch,
                       compactStringMap, compactAnyMap, stringValue,
                       stringSliceValue, uniqueStrings, clipString,
                       fileExists)

ai_runtime.go (rate limiter + metrics + audit log) is unchanged.

This is a pure move: no logic changes, no new public API, no changes to
HTTP routes. Each file carries only the imports it actually uses so
goimports is clean on every file individually. Every exported symbol
retained its original spelling so callers (routes.go, server.go, and
the AI e2e tests) keep compiling without edits.

Verification:
  go build  ./...  clean
  go vet    ./...  clean
  go test   ./api/rest/...  PASS
  staticcheck ./...  clean on the SA* correctness family

Advances completion criterion 6 (backend maintainability): 'no single
Go file exceeds a few hundred lines; AI/LLM plumbing is separated from
HTTP handlers; context-building is separated from upstream calls.'
2026-04-18 19:13:38 +00:00

268 lines
7.5 KiB
Go

package rest
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
)
type xAIChatCompletionsRequest struct {
Model string `json:"model"`
Messages []xAIChatMessageReq `json:"messages"`
Stream bool `json:"stream"`
}
type xAIChatMessageReq struct {
Role string `json:"role"`
Content string `json:"content"`
}
type xAIChatCompletionsResponse struct {
Model string `json:"model"`
Choices []xAIChoice `json:"choices"`
OutputText string `json:"output_text,omitempty"`
Output []openAIOutputItem `json:"output,omitempty"`
}
type xAIChoice struct {
Message xAIChoiceMessage `json:"message"`
}
type xAIChoiceMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type openAIOutputItem struct {
Type string `json:"type"`
Content []openAIOutputContent `json:"content"`
}
type openAIOutputContent struct {
Type string `json:"type"`
Text string `json:"text"`
}
func normalizeAIMessages(messages []AIChatMessage) []AIChatMessage {
normalized := make([]AIChatMessage, 0, len(messages))
for _, message := range messages {
role := strings.ToLower(strings.TrimSpace(message.Role))
if role != "assistant" && role != "user" && role != "system" {
continue
}
content := clipString(strings.TrimSpace(message.Content), maxExplorerAIMessageChars)
if content == "" {
continue
}
normalized = append(normalized, AIChatMessage{
Role: role,
Content: content,
})
}
if len(normalized) > maxExplorerAIMessages {
normalized = normalized[len(normalized)-maxExplorerAIMessages:]
}
return normalized
}
func latestUserMessage(messages []AIChatMessage) string {
for i := len(messages) - 1; i >= 0; i-- {
if messages[i].Role == "user" {
return messages[i].Content
}
}
if len(messages) == 0 {
return ""
}
return messages[len(messages)-1].Content
}
func (s *Server) callXAIChatCompletions(ctx context.Context, messages []AIChatMessage, contextEnvelope AIContextEnvelope) (string, string, error) {
apiKey := strings.TrimSpace(os.Getenv("XAI_API_KEY"))
if apiKey == "" {
return "", "", fmt.Errorf("XAI_API_KEY is not configured")
}
model := explorerAIModel()
baseURL := strings.TrimRight(strings.TrimSpace(os.Getenv("XAI_BASE_URL")), "/")
if baseURL == "" {
baseURL = "https://api.x.ai/v1"
}
contextJSON, _ := json.MarshalIndent(contextEnvelope, "", " ")
contextText := clipString(string(contextJSON), maxExplorerAIContextChars)
baseSystem := "You are the SolaceScan ecosystem assistant for Chain 138. Answer using the supplied indexed explorer data, route inventory, and workspace documentation. Be concise, operationally useful, and explicit about uncertainty. Never claim a route, deployment, or production status is live unless the provided context says it is live. If data is missing, say exactly what is missing."
if !explorerAIOperatorToolsEnabled() {
baseSystem += " Never instruct users to paste private keys or seed phrases. Do not direct users to run privileged mint, liquidity, or bridge execution from the public explorer UI. Operator changes belong on LAN-gated workflows and authenticated Track 4 APIs; PMM/MCP-style execution tools are disabled on this deployment unless EXPLORER_AI_OPERATOR_TOOLS_ENABLED=1."
}
input := []xAIChatMessageReq{
{
Role: "system",
Content: baseSystem,
},
{
Role: "system",
Content: "Retrieved ecosystem context:\n" + contextText,
},
}
for _, message := range messages {
input = append(input, xAIChatMessageReq{
Role: message.Role,
Content: message.Content,
})
}
payload := xAIChatCompletionsRequest{
Model: model,
Messages: input,
Stream: false,
}
body, err := json.Marshal(payload)
if err != nil {
return "", model, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, baseURL+"/chat/completions", bytes.NewReader(body))
if err != nil {
return "", model, err
}
req.Header.Set("Authorization", "Bearer "+apiKey)
req.Header.Set("Content-Type", "application/json")
client := &http.Client{Timeout: 45 * time.Second}
resp, err := client.Do(req)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) {
return "", model, &AIUpstreamError{
StatusCode: http.StatusGatewayTimeout,
Code: "upstream_timeout",
Message: "explorer ai upstream timed out",
Details: "xAI request exceeded the configured timeout",
}
}
return "", model, &AIUpstreamError{
StatusCode: http.StatusBadGateway,
Code: "upstream_transport_error",
Message: "explorer ai upstream transport failed",
Details: err.Error(),
}
}
defer resp.Body.Close()
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return "", model, &AIUpstreamError{
StatusCode: http.StatusBadGateway,
Code: "upstream_bad_response",
Message: "explorer ai upstream body could not be read",
Details: err.Error(),
}
}
if resp.StatusCode >= 400 {
return "", model, parseXAIError(resp.StatusCode, responseBody)
}
var response xAIChatCompletionsResponse
if err := json.Unmarshal(responseBody, &response); err != nil {
return "", model, &AIUpstreamError{
StatusCode: http.StatusBadGateway,
Code: "upstream_bad_response",
Message: "explorer ai upstream returned invalid JSON",
Details: err.Error(),
}
}
reply := ""
if len(response.Choices) > 0 {
reply = strings.TrimSpace(response.Choices[0].Message.Content)
}
if reply == "" {
reply = strings.TrimSpace(response.OutputText)
}
if reply == "" {
reply = strings.TrimSpace(extractOutputText(response.Output))
}
if reply == "" {
return "", model, &AIUpstreamError{
StatusCode: http.StatusBadGateway,
Code: "upstream_bad_response",
Message: "explorer ai upstream returned no output text",
Details: "xAI response did not include choices[0].message.content or output text",
}
}
if strings.TrimSpace(response.Model) != "" {
model = response.Model
}
return reply, model, nil
}
func parseXAIError(statusCode int, responseBody []byte) error {
var parsed struct {
Error struct {
Message string `json:"message"`
Type string `json:"type"`
Code string `json:"code"`
} `json:"error"`
}
_ = json.Unmarshal(responseBody, &parsed)
details := clipString(strings.TrimSpace(parsed.Error.Message), 280)
if details == "" {
details = clipString(strings.TrimSpace(string(responseBody)), 280)
}
switch statusCode {
case http.StatusUnauthorized, http.StatusForbidden:
return &AIUpstreamError{
StatusCode: statusCode,
Code: "upstream_auth_failed",
Message: "explorer ai upstream authentication failed",
Details: details,
}
case http.StatusTooManyRequests:
return &AIUpstreamError{
StatusCode: statusCode,
Code: "upstream_quota_exhausted",
Message: "explorer ai upstream quota exhausted",
Details: details,
}
case http.StatusRequestTimeout, http.StatusGatewayTimeout:
return &AIUpstreamError{
StatusCode: statusCode,
Code: "upstream_timeout",
Message: "explorer ai upstream timed out",
Details: details,
}
default:
return &AIUpstreamError{
StatusCode: statusCode,
Code: "upstream_error",
Message: "explorer ai upstream request failed",
Details: details,
}
}
}
func extractOutputText(items []openAIOutputItem) string {
parts := []string{}
for _, item := range items {
for _, content := range item.Content {
if strings.TrimSpace(content.Text) != "" {
parts = append(parts, strings.TrimSpace(content.Text))
}
}
}
return strings.Join(parts, "\n\n")
}