Initial commit: add .gitignore and README
Some checks failed
Tests / test (3.10) (push) Has been cancelled
Tests / test (3.11) (push) Has been cancelled
Tests / test (3.12) (push) Has been cancelled
Tests / lint (push) Has been cancelled
Tests / docker (push) Has been cancelled

This commit is contained in:
defiQUG
2026-02-09 21:51:42 -08:00
commit c052b07662
3146 changed files with 808305 additions and 0 deletions

View File

@@ -0,0 +1,5 @@
"""FusionAGI API: FastAPI gateway for Dvādaśa sessions and prompts."""
from fusionagi.api.app import create_app
__all__ = ["create_app"]

63
fusionagi/api/app.py Normal file
View File

@@ -0,0 +1,63 @@
"""FastAPI application factory for FusionAGI Dvādaśa API."""
from typing import Any
from fusionagi.api.dependencies import SessionStore, default_orchestrator, set_app_state
from fusionagi.api.routes import router as api_router
def create_app(
adapter: Any = None,
cors_origins: list[str] | None = None,
) -> Any:
"""Create FastAPI app with Dvādaśa routes.
Args:
adapter: Optional LLMAdapter for head/Witness LLM calls.
cors_origins: Optional list of CORS allowed origins (e.g. ["*"] or ["https://example.com"]).
If None, no CORS middleware is added.
"""
try:
from fastapi import FastAPI
except ImportError as e:
raise ImportError("Install with: pip install fusionagi[api]") from e
app = FastAPI(
title="FusionAGI Dvādaśa API",
description="12-headed multi-agent orchestration API",
version="0.1.0",
)
app.state.llm_adapter = adapter
from fusionagi.api.dependencies import set_default_adapter
set_default_adapter(adapter)
@app.on_event("startup")
async def startup():
"""Initialize orchestrator and session store."""
if getattr(app.state, "_dvadasa_ready", False):
return
adapter_inner = getattr(app.state, "llm_adapter", None)
orch, bus = default_orchestrator(adapter_inner)
store = SessionStore()
set_app_state(orch, bus, store)
app.state._dvadasa_ready = True
app.include_router(api_router, prefix="/v1", tags=["dvadasa"])
if cors_origins is not None:
try:
from fastapi.middleware.cors import CORSMiddleware
app.add_middleware(
CORSMiddleware,
allow_origins=cors_origins,
allow_methods=["*"],
allow_headers=["*"],
)
except ImportError:
pass # CORS optional
return app
# Default app instance for uvicorn/gunicorn
app = create_app()

View File

@@ -0,0 +1,183 @@
"""API dependencies: orchestrator, session store, guardrails."""
import os
from dataclasses import dataclass
from typing import Any
from fusionagi import Orchestrator, EventBus, StateManager
from fusionagi.agents import WitnessAgent
from fusionagi.agents.heads import create_all_content_heads
from fusionagi.adapters.base import LLMAdapter
from fusionagi.adapters.native_adapter import NativeAdapter
from fusionagi.schemas.head import HeadId
from fusionagi.governance import SafetyPipeline, AuditLog
def _get_reasoning_provider() -> Any:
"""Return reasoning provider based on SUPER_BIG_BRAIN_ENABLED env."""
if os.environ.get("SUPER_BIG_BRAIN_ENABLED", "false").lower() in ("true", "1", "yes"):
from fusionagi.core.super_big_brain import SuperBigBrainReasoningProvider
from fusionagi.memory import SemanticGraphMemory
return SuperBigBrainReasoningProvider(semantic_graph=SemanticGraphMemory())
return None
# App state populated by lifespan or lazy init
_app_state: dict[str, Any] = {}
_default_adapter: Any = None
def set_default_adapter(adapter: Any) -> None:
global _default_adapter
_default_adapter = adapter
def default_orchestrator(adapter: LLMAdapter | None = None) -> tuple[Orchestrator, Any]:
"""Create default Orchestrator with Dvādaśa heads and Witness registered.
When adapter is None, uses native reasoning throughout: heads use NativeReasoningProvider,
Witness uses NativeAdapter for synthesis. No external LLM calls.
"""
bus = EventBus()
state = StateManager()
orch = Orchestrator(event_bus=bus, state_manager=state)
# Heads: use native or Super Big Brain reasoning when no adapter
reasoning_provider = _get_reasoning_provider()
heads = create_all_content_heads(
adapter=adapter,
reasoning_provider=reasoning_provider,
use_native_reasoning=reasoning_provider is None,
)
for hid, agent in heads.items():
orch.register_agent(hid.value, agent)
# Witness: use NativeAdapter when no adapter for native synthesis
witness_adapter = adapter if adapter is not None else NativeAdapter()
orch.register_agent(HeadId.WITNESS.value, WitnessAgent(adapter=witness_adapter))
return orch, bus
class SessionStore:
"""In-memory session store for API sessions."""
def __init__(self) -> None:
self._sessions: dict[str, dict[str, Any]] = {}
def create(self, session_id: str, user_id: str | None = None) -> dict[str, Any]:
sess = {"session_id": session_id, "user_id": user_id, "history": []}
self._sessions[session_id] = sess
return sess
def get(self, session_id: str) -> dict[str, Any] | None:
return self._sessions.get(session_id)
def append_history(self, session_id: str, entry: dict[str, Any]) -> None:
sess = self._sessions.get(session_id)
if sess:
sess.setdefault("history", []).append(entry)
def get_orchestrator() -> Any:
return _app_state.get("orchestrator")
def get_event_bus() -> Any:
return _app_state.get("event_bus")
def get_session_store() -> SessionStore | None:
return _app_state.get("session_store")
def get_safety_pipeline() -> Any:
return _app_state.get("safety_pipeline")
def get_telemetry_tracer() -> Any:
return _app_state.get("telemetry_tracer")
def set_app_state(orchestrator: Any, event_bus: Any, session_store: SessionStore) -> None:
_app_state["orchestrator"] = orchestrator
_app_state["event_bus"] = event_bus
_app_state["session_store"] = session_store
if "safety_pipeline" not in _app_state:
_app_state["safety_pipeline"] = SafetyPipeline(audit_log=AuditLog())
try:
from fusionagi.telemetry import TelemetryTracer, set_tracer
tracer = TelemetryTracer()
tracer.subscribe(event_bus)
set_tracer(tracer)
_app_state["telemetry_tracer"] = tracer
except Exception:
pass
def ensure_initialized(adapter: Any = None) -> None:
"""Lazy init: ensure orchestrator and store exist (for TestClient)."""
if _app_state.get("orchestrator") is not None:
return
adj = adapter if adapter is not None else _default_adapter
orch, bus = default_orchestrator(adj)
set_app_state(orch, bus, SessionStore())
@dataclass
class OpenAIBridgeConfig:
"""Configuration for OpenAI-compatible API bridge."""
model_id: str
auth_enabled: bool
api_key: str | None
timeout_per_head: float
@classmethod
def from_env(cls) -> "OpenAIBridgeConfig":
"""Load config from environment variables."""
auth = os.environ.get("OPENAI_BRIDGE_AUTH", "disabled").lower()
auth_enabled = auth not in ("disabled", "false", "0", "no")
return cls(
model_id=os.environ.get("OPENAI_BRIDGE_MODEL_ID", "fusionagi-dvadasa"),
auth_enabled=auth_enabled,
api_key=os.environ.get("OPENAI_BRIDGE_API_KEY") if auth_enabled else None,
timeout_per_head=float(os.environ.get("OPENAI_BRIDGE_TIMEOUT_PER_HEAD", "60")),
)
def get_openai_bridge_config() -> OpenAIBridgeConfig:
"""Return OpenAI bridge config from app state or env."""
cfg = _app_state.get("openai_bridge_config")
if cfg is not None:
return cfg
return OpenAIBridgeConfig.from_env()
def verify_openai_bridge_auth(authorization: str | None) -> None:
"""
Verify OpenAI bridge auth. Raises HTTPException(401) if auth enabled and invalid.
Call from route dependencies.
"""
try:
from fastapi import HTTPException
cfg = get_openai_bridge_config()
if not cfg.auth_enabled:
return
if not cfg.api_key:
return # Auth enabled but no key configured: allow (misconfig)
if not authorization or not authorization.startswith("Bearer "):
raise HTTPException(
status_code=401,
detail={"error": {"message": "Missing or invalid Authorization header", "type": "authentication_error"}},
)
token = authorization[7:].strip()
if token != cfg.api_key:
raise HTTPException(
status_code=401,
detail={"error": {"message": "Invalid API key", "type": "authentication_error"}},
)
except HTTPException:
raise
except Exception:
pass

View File

@@ -0,0 +1,13 @@
"""OpenAI-compatible API bridge for Cursor Composer and other OpenAI API consumers."""
from fusionagi.api.openai_compat.translators import (
messages_to_prompt,
estimate_usage,
final_response_to_openai,
)
__all__ = [
"messages_to_prompt",
"estimate_usage",
"final_response_to_openai",
]

View File

@@ -0,0 +1,146 @@
"""Translators for OpenAI API request/response format to FusionAGI."""
import time
from typing import Any
from fusionagi.schemas.witness import FinalResponse
def _extract_content(msg: dict[str, Any]) -> str:
"""Extract text content from a message. Handles string or array content parts."""
content = msg.get("content")
if content is None:
return ""
if isinstance(content, str):
return content
if isinstance(content, list):
parts: list[str] = []
for part in content:
if isinstance(part, dict) and part.get("type") == "text":
parts.append(part.get("text", "") or "")
elif isinstance(part, str):
parts.append(part)
return "\n".join(parts)
return str(content)
def messages_to_prompt(messages: list[dict[str, Any]]) -> str:
"""
Translate OpenAI messages array to a single prompt string for Dvādaśa.
Format:
[System]: {system_content}
[User]: {user_msg_1}
[Assistant]: {assistant_msg_1}
[User]: {user_msg_2}
...
[User]: {last_user_message} <- primary goal for submit_task
Tool result messages (role: "tool") are appended as "Tool {name} returned: {content}".
Falls back to last non-system message if no explicit user turn.
Args:
messages: List of message dicts with 'role' and 'content'.
Returns:
Single prompt string for orch.submit_task / run_dvadasa.
"""
if not messages:
return ""
parts: list[str] = []
system_content = ""
last_user_content = ""
for msg in messages:
role = (msg.get("role") or "user").lower()
content = _extract_content(msg)
if role == "system":
system_content = content
elif role == "user":
last_user_content = content
if system_content and not parts:
parts.append(f"[System]: {system_content}")
parts.append(f"[User]: {content}")
elif role == "assistant":
if system_content and not parts:
parts.append(f"[System]: {system_content}")
parts.append(f"[Assistant]: {content}")
elif role == "tool":
name = msg.get("name", "unknown")
tool_id = msg.get("tool_call_id", "")
parts.append(f"[Tool {name}]{f' (id={tool_id})' if tool_id else ''} returned: {content}")
if not parts:
return last_user_content or system_content
return "\n\n".join(parts)
def estimate_usage(
messages: list[dict[str, Any]],
completion_text: str,
chars_per_token: int = 4,
) -> dict[str, int]:
"""
Estimate token usage from character counts (OpenAI-like heuristic).
Args:
messages: Request messages for prompt_tokens.
completion_text: Response text for completion_tokens.
chars_per_token: Approximate chars per token (default 4).
Returns:
Dict with prompt_tokens, completion_tokens, total_tokens.
"""
prompt_chars = sum(len(_extract_content(m)) for m in messages)
completion_chars = len(completion_text)
prompt_tokens = max(1, prompt_chars // chars_per_token)
completion_tokens = max(1, completion_chars // chars_per_token)
return {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
}
def final_response_to_openai(
final: FinalResponse,
task_id: str,
request_model: str | None = None,
messages: list[dict[str, Any]] | None = None,
) -> dict[str, Any]:
"""
Map FusionAGI FinalResponse to OpenAI Chat Completion format.
Args:
final: FinalResponse from run_dvadasa.
task_id: Task ID for response id.
request_model: Model ID from request, or default fusionagi-dvadasa.
messages: Original request messages for usage estimation.
Returns:
OpenAI-compatible chat completion dict.
"""
model = request_model or "fusionagi-dvadasa"
usage = estimate_usage(messages or [], final.final_answer)
return {
"id": f"chatcmpl-{task_id[:24]}" if len(task_id) >= 24 else f"chatcmpl-{task_id}",
"object": "chat.completion",
"created": int(time.time()),
"model": model,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": final.final_answer,
"tool_calls": None,
},
"finish_reason": "stop",
}
],
"usage": usage,
}

View File

@@ -0,0 +1,14 @@
"""API routes for Dvādaśa sessions and prompts."""
from fastapi import APIRouter
from fusionagi.api.routes.sessions import router as sessions_router
from fusionagi.api.routes.tts import router as tts_router
from fusionagi.api.routes.admin import router as admin_router
from fusionagi.api.routes.openai_compat import router as openai_compat_router
router = APIRouter()
router.include_router(sessions_router, prefix="/sessions", tags=["sessions"])
router.include_router(tts_router, prefix="/sessions", tags=["tts"])
router.include_router(admin_router, prefix="/admin", tags=["admin"])
router.include_router(openai_compat_router)

View File

@@ -0,0 +1,17 @@
"""Admin routes: telemetry, etc."""
from fastapi import APIRouter
from fusionagi.api.dependencies import get_telemetry_tracer
router = APIRouter()
@router.get("/telemetry")
def get_telemetry(task_id: str | None = None, limit: int = 100) -> dict:
"""Return telemetry traces (admin). Filter by task_id if provided."""
tracer = get_telemetry_tracer()
if not tracer:
return {"traces": []}
traces = tracer.get_traces(task_id=task_id, limit=limit)
return {"traces": traces}

View File

@@ -0,0 +1,265 @@
"""OpenAI-compatible API routes for Cursor Composer and other consumers."""
import asyncio
import json
import uuid
from concurrent.futures import ThreadPoolExecutor
from typing import Any
from fastapi import APIRouter, Depends, Header, HTTPException, Request
from starlette.responses import StreamingResponse
from fusionagi.api.dependencies import (
ensure_initialized,
get_event_bus,
get_orchestrator,
get_safety_pipeline,
get_openai_bridge_config,
verify_openai_bridge_auth,
)
from fusionagi.api.openai_compat.translators import (
messages_to_prompt,
final_response_to_openai,
estimate_usage,
)
from fusionagi.core import run_dvadasa
from fusionagi.schemas.commands import parse_user_input
router = APIRouter(tags=["openai-compat"])
# Chunk size for streaming (chars per SSE delta)
_STREAM_CHUNK_SIZE = 50
def _openai_error(status_code: int, message: str, error_type: str) -> HTTPException:
"""Raise HTTPException with OpenAI-style error body."""
return HTTPException(
status_code=status_code,
detail={"error": {"message": message, "type": error_type}},
)
def _ensure_openai_init() -> None:
"""Ensure orchestrator and dependencies are initialized."""
ensure_initialized()
async def _verify_auth_dep(authorization: str | None = Header(default=None)) -> None:
"""Dependency: verify auth for OpenAI bridge routes."""
verify_openai_bridge_auth(authorization)
@router.get("/models", dependencies=[Depends(_verify_auth_dep)])
async def list_models() -> dict[str, Any]:
"""
List available models (OpenAI-compatible).
Returns fusionagi-dvadasa as the single model.
"""
cfg = get_openai_bridge_config()
return {
"object": "list",
"data": [
{
"id": cfg.model_id,
"object": "model",
"created": 1704067200,
"owned_by": "fusionagi",
}
],
}
@router.post(
"/chat/completions",
dependencies=[Depends(_verify_auth_dep)],
response_model=None,
)
async def create_chat_completion(request: Request):
"""
Create chat completion (OpenAI-compatible).
Supports both sync (stream=false) and streaming (stream=true).
"""
_ensure_openai_init()
try:
body = await request.json()
except Exception as e:
raise _openai_error(400, f"Invalid JSON body: {e}", "invalid_request_error")
messages = body.get("messages")
if not messages or not isinstance(messages, list):
raise _openai_error(
400,
"messages is required and must be a non-empty array",
"invalid_request_error",
)
from fusionagi.api.openai_compat.translators import _extract_content
has_content = any(_extract_content(m).strip() for m in messages)
if not has_content:
raise _openai_error(
400,
"messages must contain at least one user or assistant message with content",
"invalid_request_error",
)
prompt = messages_to_prompt(messages)
if not prompt.strip():
raise _openai_error(
400,
"messages must contain at least one user or assistant message with content",
"invalid_request_error",
)
pipeline = get_safety_pipeline()
if pipeline:
pre_result = pipeline.pre_check(prompt)
if not pre_result.allowed:
raise _openai_error(
400,
pre_result.reason or "Input moderation failed",
"invalid_request_error",
)
orch = get_orchestrator()
bus = get_event_bus()
if not orch:
raise _openai_error(503, "Service not initialized", "internal_error")
cfg = get_openai_bridge_config()
request_model = body.get("model") or cfg.model_id
stream = body.get("stream", False) is True
task_id = orch.submit_task(goal=prompt[:200])
parsed = parse_user_input(prompt)
if stream:
return StreamingResponse(
_stream_chat_completion(
orch=orch,
bus=bus,
task_id=task_id,
prompt=prompt,
parsed=parsed,
request_model=request_model,
messages=messages,
pipeline=pipeline,
cfg=cfg,
),
media_type="text/event-stream",
)
# Sync path
final = run_dvadasa(
orchestrator=orch,
task_id=task_id,
user_prompt=prompt,
parsed=parsed,
event_bus=bus,
timeout_per_head=cfg.timeout_per_head,
)
if not final:
raise _openai_error(500, "Dvādaśa failed to produce response", "internal_error")
if pipeline:
post_result = pipeline.post_check(final.final_answer)
if not post_result.passed:
raise _openai_error(
400,
f"Output scan failed: {', '.join(post_result.flags)}",
"invalid_request_error",
)
result = final_response_to_openai(
final=final,
task_id=task_id,
request_model=request_model,
messages=messages,
)
return result
async def _stream_chat_completion(
orch: Any,
bus: Any,
task_id: str,
prompt: str,
parsed: Any,
request_model: str,
messages: list[dict[str, Any]],
pipeline: Any,
cfg: Any,
):
"""
Async generator that runs Dvādaśa and streams the final_answer as SSE chunks.
"""
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=1)
def run() -> Any:
return run_dvadasa(
orchestrator=orch,
task_id=task_id,
user_prompt=prompt,
parsed=parsed,
event_bus=bus,
timeout_per_head=cfg.timeout_per_head,
)
try:
final = await loop.run_in_executor(executor, run)
except Exception as e:
yield f"data: {json.dumps({'error': {'message': str(e), 'type': 'internal_error'}})}\n\n"
return
if not final:
yield f"data: {json.dumps({'error': {'message': 'Dvādaśa failed', 'type': 'internal_error'}})}\n\n"
return
if pipeline:
post_result = pipeline.post_check(final.final_answer)
if not post_result.passed:
yield f"data: {json.dumps({'error': {'message': 'Output scan failed', 'type': 'invalid_request_error'}})}\n\n"
return
chat_id = f"chatcmpl-{task_id[:24]}" if len(task_id) >= 24 else f"chatcmpl-{task_id}"
# Stream final_answer in chunks
text = final.final_answer
for i in range(0, len(text), _STREAM_CHUNK_SIZE):
chunk = text[i : i + _STREAM_CHUNK_SIZE]
chunk_json = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": 0,
"model": request_model,
"choices": [
{
"index": 0,
"delta": {"content": chunk},
"finish_reason": None,
}
],
}
yield f"data: {json.dumps(chunk_json)}\n\n"
# Final chunk with finish_reason
usage = estimate_usage(messages, text)
final_chunk = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": 0,
"model": request_model,
"choices": [
{
"index": 0,
"delta": {},
"finish_reason": "stop",
}
],
"usage": usage,
}
yield f"data: {json.dumps(final_chunk)}\n\n"
yield "data: [DONE]\n\n"

View File

@@ -0,0 +1,147 @@
"""Session and prompt routes."""
import json
import uuid
from typing import Any
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect
from fusionagi.api.dependencies import get_orchestrator, get_session_store, get_event_bus, get_safety_pipeline
from fusionagi.api.websocket import handle_stream
from fusionagi.core import run_dvadasa, select_heads_for_complexity, extract_sources_from_head_outputs
from fusionagi.schemas.commands import parse_user_input, UserIntent
router = APIRouter()
def _ensure_init():
from fusionagi.api.dependencies import ensure_initialized
ensure_initialized()
@router.post("")
def create_session(user_id: str | None = None) -> dict[str, Any]:
"""Create a new session."""
_ensure_init()
store = get_session_store()
if not store:
raise HTTPException(status_code=503, detail="Session store not initialized")
session_id = str(uuid.uuid4())
store.create(session_id, user_id)
return {"session_id": session_id, "user_id": user_id}
@router.post("/{session_id}/prompt")
def submit_prompt(session_id: str, body: dict[str, Any]) -> dict[str, Any]:
"""Submit a prompt and receive FinalResponse (sync)."""
_ensure_init()
store = get_session_store()
orch = get_orchestrator()
bus = get_event_bus()
if not store or not orch:
raise HTTPException(status_code=503, detail="Service not initialized")
sess = store.get(session_id)
if not sess:
raise HTTPException(status_code=404, detail="Session not found")
prompt = body.get("prompt", "")
parsed = parse_user_input(prompt)
if not prompt or not parsed.cleaned_prompt.strip():
if parsed.intent in (UserIntent.SHOW_DISSENT, UserIntent.RERUN_RISK, UserIntent.EXPLAIN_REASONING, UserIntent.SOURCES):
hist = sess.get("history", [])
if hist:
prompt = hist[-1].get("prompt", "")
if not prompt:
raise HTTPException(status_code=400, detail="No previous prompt; provide a prompt for this command")
else:
raise HTTPException(status_code=400, detail="prompt is required")
effective_prompt = parsed.cleaned_prompt.strip() or prompt
pipeline = get_safety_pipeline()
if pipeline:
pre_result = pipeline.pre_check(effective_prompt)
if not pre_result.allowed:
raise HTTPException(status_code=400, detail=pre_result.reason or "Input moderation failed")
task_id = orch.submit_task(goal=effective_prompt[:200])
# Dynamic head selection
head_ids = select_heads_for_complexity(effective_prompt)
if parsed.intent.value == "head_strategy" and parsed.head_id:
head_ids = [parsed.head_id]
force_second = parsed.intent == UserIntent.RERUN_RISK
return_heads = parsed.intent == UserIntent.SOURCES
result = run_dvadasa(
orchestrator=orch,
task_id=task_id,
user_prompt=effective_prompt,
parsed=parsed,
head_ids=head_ids if parsed.intent.value != "normal" or body.get("use_all_heads") else None,
event_bus=bus,
force_second_pass=force_second,
return_head_outputs=return_heads,
)
if return_heads and isinstance(result, tuple):
final, head_outputs = result
else:
final = result
head_outputs = []
if not final:
raise HTTPException(status_code=500, detail="Failed to produce response")
if pipeline:
post_result = pipeline.post_check(final.final_answer)
if not post_result.passed:
raise HTTPException(
status_code=400,
detail=f"Output scan failed: {', '.join(post_result.flags)}",
)
entry = {
"prompt": effective_prompt,
"final_answer": final.final_answer,
"confidence_score": final.confidence_score,
"head_contributions": final.head_contributions,
}
store.append_history(session_id, entry)
response: dict[str, Any] = {
"task_id": task_id,
"final_answer": final.final_answer,
"transparency_report": final.transparency_report.model_dump(),
"head_contributions": final.head_contributions,
"confidence_score": final.confidence_score,
}
if parsed.intent == UserIntent.SHOW_DISSENT:
response["response_mode"] = "show_dissent"
response["disputed_claims"] = final.transparency_report.agreement_map.disputed_claims
elif parsed.intent == UserIntent.EXPLAIN_REASONING:
response["response_mode"] = "explain"
elif parsed.intent == UserIntent.SOURCES and head_outputs:
response["sources"] = extract_sources_from_head_outputs(head_outputs)
return response
@router.websocket("/{session_id}/stream")
async def stream_websocket(websocket: WebSocket, session_id: str) -> None:
"""WebSocket for streaming Dvādaśa response. Send {\"prompt\": \"...\"} to start."""
await websocket.accept()
try:
data = await websocket.receive_json()
prompt = data.get("prompt", "")
async def send_evt(evt: dict) -> None:
await websocket.send_json(evt)
await handle_stream(session_id, prompt, send_evt)
except WebSocketDisconnect:
pass
except Exception as e:
try:
await websocket.send_json({"type": "error", "message": str(e)})
except Exception:
pass

View File

@@ -0,0 +1,49 @@
"""TTS synthesis routes for per-head voice output."""
from typing import Any
from fastapi import APIRouter, HTTPException
from fusionagi.api.dependencies import get_session_store
from fusionagi.config.head_voices import get_voice_id_for_head
from fusionagi.schemas.head import HeadId
router = APIRouter()
@router.post("/{session_id}/synthesize")
async def synthesize(
session_id: str,
body: dict[str, Any],
) -> dict[str, Any]:
"""
Synthesize text to audio for a head.
Body: { "text": "...", "head_id": "logic" }
Returns: { "audio_base64": "..." } or { "audio_base64": null } if TTS not configured.
"""
store = get_session_store()
if not store:
raise HTTPException(status_code=503, detail="Service not initialized")
sess = store.get(session_id)
if not sess:
raise HTTPException(status_code=404, detail="Session not found")
text = body.get("text", "")
head_id_str = body.get("head_id", "")
if not text:
raise HTTPException(status_code=400, detail="text is required")
try:
head_id = HeadId(head_id_str)
except ValueError:
head_id = HeadId.LOGIC
voice_id = get_voice_id_for_head(head_id)
audio_base64 = None
# TODO: Wire TTSAdapter (ElevenLabs, Azure, etc.) and synthesize
# if tts_adapter:
# audio_bytes = await tts_adapter.synthesize(text, voice_id=voice_id)
# if audio_bytes:
# import base64
# audio_base64 = base64.b64encode(audio_bytes).decode()
return {"audio_base64": audio_base64, "voice_id": voice_id}

View File

@@ -0,0 +1,99 @@
"""WebSocket streaming for Dvādaśa responses."""
import asyncio
import json
from concurrent.futures import ThreadPoolExecutor
from typing import Any
from fusionagi.api.dependencies import get_orchestrator, get_session_store, get_event_bus
from fusionagi.core import run_heads_parallel, run_witness, select_heads_for_complexity
from fusionagi.schemas.commands import parse_user_input
from fusionagi.schemas.head import HeadId, HeadOutput
async def handle_stream(
session_id: str,
prompt: str,
send_fn: Any,
) -> None:
"""
Run Dvādaśa flow and stream events to WebSocket.
Events: heads_running, head_complete, heads_done, witness_running, complete.
"""
from fusionagi.api.dependencies import ensure_initialized
ensure_initialized()
store = get_session_store()
orch = get_orchestrator()
bus = get_event_bus()
if not store or not orch:
await send_fn({"type": "error", "message": "Service not initialized"})
return
sess = store.get(session_id)
if not sess:
await send_fn({"type": "error", "message": "Session not found"})
return
if not prompt:
await send_fn({"type": "error", "message": "prompt is required"})
return
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=1)
parsed = parse_user_input(prompt)
task_id = orch.submit_task(goal=prompt[:200])
head_ids = select_heads_for_complexity(prompt)
if parsed.intent.value == "head_strategy" and parsed.head_id:
head_ids = [parsed.head_id]
await send_fn({"type": "heads_running", "message": "Heads running…"})
def run_heads():
return run_heads_parallel(orch, task_id, prompt, head_ids=head_ids)
try:
head_outputs = await loop.run_in_executor(executor, run_heads)
except Exception as e:
await send_fn({"type": "error", "message": str(e)})
return
for ho in head_outputs:
await send_fn({
"type": "head_complete",
"head_id": ho.head_id.value,
"summary": ho.summary,
})
await send_fn({
"type": "head_speak",
"head_id": ho.head_id.value,
"summary": ho.summary,
"audio_base64": None,
})
await send_fn({"type": "witness_running", "message": "Witness composing…"})
def run_wit():
return run_witness(orch, task_id, head_outputs, prompt)
try:
final = await loop.run_in_executor(executor, run_wit)
except Exception as e:
await send_fn({"type": "error", "message": str(e)})
return
if final:
await send_fn({
"type": "complete",
"final_answer": final.final_answer,
"transparency_report": final.transparency_report.model_dump(),
"head_contributions": final.head_contributions,
"confidence_score": final.confidence_score,
})
store.append_history(session_id, {
"prompt": prompt,
"final_answer": final.final_answer,
"confidence_score": final.confidence_score,
})
else:
await send_fn({"type": "error", "message": "Failed to produce response"})