Files
FusionAGI/tests/test_openai_compat.py

269 lines
8.8 KiB
Python
Raw Permalink Normal View History

"""Tests for OpenAI-compatible API bridge."""
import json
import os
import pytest
from starlette.testclient import TestClient
from fusionagi.adapters import StubAdapter
from fusionagi.api.app import create_app
from fusionagi.api.openai_compat.translators import (
messages_to_prompt,
estimate_usage,
final_response_to_openai,
)
from fusionagi.schemas.witness import AgreementMap, FinalResponse, TransparencyReport
# Stub adapter responses for Dvādaśa heads and Witness
HEAD_OUTPUT = {
"head_id": "logic",
"summary": "Stub summary",
"claims": [],
"risks": [],
"questions": [],
"recommended_actions": [],
"tone_guidance": "",
}
def test_messages_to_prompt_simple():
"""Test message translation with single user message."""
prompt = messages_to_prompt([{"role": "user", "content": "Hello"}])
assert "[User]: Hello" in prompt
assert prompt.strip() == "[User]: Hello"
def test_messages_to_prompt_system_user():
"""Test message translation with system and user."""
prompt = messages_to_prompt([
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "Hi"},
])
assert "[System]: You are helpful." in prompt
assert "[User]: Hi" in prompt
def test_messages_to_prompt_conversation():
"""Test multi-turn conversation."""
prompt = messages_to_prompt([
{"role": "user", "content": "What is X?"},
{"role": "assistant", "content": "X is..."},
{"role": "user", "content": "And Y?"},
])
assert "[User]: What is X?" in prompt
assert "[Assistant]: X is..." in prompt
assert "[User]: And Y?" in prompt
def test_messages_to_prompt_tool_result():
"""Test tool result message handling."""
prompt = messages_to_prompt([
{"role": "user", "content": "Run tool"},
{"role": "assistant", "content": "Calling..."},
{"role": "tool", "content": "Result", "name": "read_file", "tool_call_id": "tc1"},
])
assert "Tool read_file" in prompt
assert "returned: Result" in prompt
def test_messages_to_prompt_array_content():
"""Test message with array content (multimodal)."""
prompt = messages_to_prompt([
{"role": "user", "content": [{"type": "text", "text": "Hello"}]},
])
assert "Hello" in prompt
def test_estimate_usage():
"""Test token usage estimation."""
usage = estimate_usage(
[{"role": "user", "content": "Hi"}],
"Hello back",
)
assert usage["prompt_tokens"] >= 1
assert usage["completion_tokens"] >= 1
assert usage["total_tokens"] == usage["prompt_tokens"] + usage["completion_tokens"]
def test_final_response_to_openai():
"""Test FinalResponse to OpenAI format translation."""
am = AgreementMap(agreed_claims=[], disputed_claims=[], confidence_score=0.9)
tr = TransparencyReport(
agreement_map=am,
head_contributions=[],
safety_report="",
confidence_score=0.9,
)
final = FinalResponse(
final_answer="Hello from FusionAGI",
transparency_report=tr,
head_contributions=[],
confidence_score=0.9,
)
result = final_response_to_openai(
final,
task_id="task-abc-123",
request_model="fusionagi-dvadasa",
messages=[{"role": "user", "content": "Hi"}],
)
assert result["object"] == "chat.completion"
assert result["model"] == "fusionagi-dvadasa"
assert result["choices"][0]["message"]["content"] == "Hello from FusionAGI"
assert result["choices"][0]["message"]["role"] == "assistant"
assert result["choices"][0]["finish_reason"] == "stop"
assert "usage" in result
assert result["usage"]["prompt_tokens"] >= 1
assert result["usage"]["completion_tokens"] >= 1
@pytest.fixture
def openai_client():
"""Create TestClient with StubAdapter for OpenAI bridge tests."""
stub = StubAdapter(
response="Final composed answer from Witness",
structured_response=HEAD_OUTPUT,
)
app = create_app(adapter=stub)
return TestClient(app)
def test_models_endpoint(openai_client):
"""Test GET /v1/models returns fusionagi-dvadasa."""
resp = openai_client.get("/v1/models")
assert resp.status_code == 200
data = resp.json()
assert data["object"] == "list"
assert len(data["data"]) >= 1
assert data["data"][0]["id"] == "fusionagi-dvadasa"
assert data["data"][0]["owned_by"] == "fusionagi"
def test_models_endpoint_with_auth(openai_client):
"""Test models endpoint with auth disabled accepts any request."""
resp = openai_client.get("/v1/models", headers={"Authorization": "Bearer any"})
assert resp.status_code == 200
def test_chat_completions_sync(openai_client):
"""Test POST /v1/chat/completions (stream=false)."""
resp = openai_client.post(
"/v1/chat/completions",
json={
"model": "fusionagi-dvadasa",
"messages": [{"role": "user", "content": "What is 2+2?"}],
"stream": False,
},
)
assert resp.status_code == 200
data = resp.json()
assert data["object"] == "chat.completion"
assert "choices" in data
assert len(data["choices"]) >= 1
assert data["choices"][0]["message"]["role"] == "assistant"
assert "content" in data["choices"][0]["message"]
assert data["choices"][0]["finish_reason"] == "stop"
assert "usage" in data
def test_chat_completions_stream(openai_client):
"""Test POST /v1/chat/completions (stream=true)."""
resp = openai_client.post(
"/v1/chat/completions",
json={
"model": "fusionagi-dvadasa",
"messages": [{"role": "user", "content": "Say hello"}],
"stream": True,
},
)
assert resp.status_code == 200
assert "text/event-stream" in resp.headers.get("content-type", "")
lines = [line for line in resp.text.split("\n") if line.startswith("data: ")]
assert len(lines) >= 2
# Last line should be [DONE]
assert "data: [DONE]" in resp.text
# At least one chunk with content
content_found = False
for line in lines:
if line == "data: [DONE]":
continue
try:
chunk = json.loads(line[6:])
if chunk.get("choices"):
delta = chunk["choices"][0].get("delta", {})
if delta.get("content"):
content_found = True
break
except json.JSONDecodeError:
pass
assert content_found or "Final" in resp.text or "composed" in resp.text
def test_chat_completions_missing_messages(openai_client):
"""Test 400 when messages is missing."""
resp = openai_client.post(
"/v1/chat/completions",
json={"model": "fusionagi-dvadasa"},
)
assert resp.status_code == 400
data = resp.json()
# FastAPI wraps HTTPException detail in "detail" key
assert "invalid_request_error" in str(data)
def test_chat_completions_empty_messages(openai_client):
"""Test 400 when messages is empty."""
resp = openai_client.post(
"/v1/chat/completions",
json={"model": "fusionagi-dvadasa", "messages": []},
)
assert resp.status_code == 400
def test_chat_completions_empty_content(openai_client):
"""Test 400 when all message contents are empty."""
resp = openai_client.post(
"/v1/chat/completions",
json={
"model": "fusionagi-dvadasa",
"messages": [{"role": "user", "content": ""}],
},
)
assert resp.status_code == 400
def test_auth_when_enabled(openai_client):
"""Test 401 when auth is enabled and key is wrong."""
orig_auth = os.environ.get("OPENAI_BRIDGE_AUTH")
orig_key = os.environ.get("OPENAI_BRIDGE_API_KEY")
try:
os.environ["OPENAI_BRIDGE_AUTH"] = "Bearer"
os.environ["OPENAI_BRIDGE_API_KEY"] = "secret123"
# Recreate app to pick up new env
from fusionagi.api.dependencies import _app_state
_app_state.pop("openai_bridge_config", None)
app = create_app(adapter=StubAdapter(response="x", structured_response=HEAD_OUTPUT))
client = TestClient(app)
resp = client.get("/v1/models") # No Authorization header
assert resp.status_code == 401
resp = client.get("/v1/models", headers={"Authorization": "Bearer wrongkey"})
assert resp.status_code == 401
resp = client.get("/v1/models", headers={"Authorization": "Bearer secret123"})
assert resp.status_code == 200
finally:
if orig_auth is not None:
os.environ["OPENAI_BRIDGE_AUTH"] = orig_auth
else:
os.environ.pop("OPENAI_BRIDGE_AUTH", None)
if orig_key is not None:
os.environ["OPENAI_BRIDGE_API_KEY"] = orig_key
else:
os.environ.pop("OPENAI_BRIDGE_API_KEY", None)
from fusionagi.api.dependencies import _app_state
_app_state.pop("openai_bridge_config", None)