134 lines
4.3 KiB
Python
134 lines
4.3 KiB
Python
|
|
"""Tests for Dvādaśa 12-head FusionAGI components."""
|
||
|
|
|
||
|
|
import pytest
|
||
|
|
|
||
|
|
from fusionagi.schemas import (
|
||
|
|
HeadId,
|
||
|
|
HeadOutput,
|
||
|
|
HeadClaim,
|
||
|
|
AgreementMap,
|
||
|
|
FinalResponse,
|
||
|
|
parse_user_input,
|
||
|
|
UserIntent,
|
||
|
|
)
|
||
|
|
from fusionagi.agents import HeadAgent, WitnessAgent
|
||
|
|
from fusionagi.agents.heads import create_head_agent, create_all_content_heads
|
||
|
|
from fusionagi.multi_agent import run_consensus, collect_claims, CollectedClaim
|
||
|
|
from fusionagi.adapters import StubAdapter
|
||
|
|
from fusionagi import Orchestrator, EventBus, StateManager
|
||
|
|
from fusionagi.core import run_heads_parallel, run_witness, run_dvadasa, select_heads_for_complexity
|
||
|
|
|
||
|
|
|
||
|
|
def test_parse_user_input_normal():
|
||
|
|
cmd = parse_user_input("What is the best approach?")
|
||
|
|
assert cmd.intent == UserIntent.NORMAL
|
||
|
|
assert cmd.cleaned_prompt == "What is the best approach?"
|
||
|
|
|
||
|
|
|
||
|
|
def test_parse_user_input_head_strategy():
|
||
|
|
cmd = parse_user_input("/head strategy What is the best approach?")
|
||
|
|
assert cmd.intent == UserIntent.HEAD_STRATEGY
|
||
|
|
assert cmd.head_id == HeadId.STRATEGY
|
||
|
|
assert "best approach" in cmd.cleaned_prompt
|
||
|
|
|
||
|
|
|
||
|
|
def test_parse_user_input_show_dissent():
|
||
|
|
cmd = parse_user_input("/show dissent")
|
||
|
|
assert cmd.intent == UserIntent.SHOW_DISSENT
|
||
|
|
|
||
|
|
|
||
|
|
def test_head_output_schema():
|
||
|
|
out = HeadOutput(
|
||
|
|
head_id=HeadId.LOGIC,
|
||
|
|
summary="Test",
|
||
|
|
claims=[
|
||
|
|
HeadClaim(claim_text="X is true", confidence=0.9, evidence=[], assumptions=[]),
|
||
|
|
],
|
||
|
|
risks=[],
|
||
|
|
questions=[],
|
||
|
|
recommended_actions=[],
|
||
|
|
tone_guidance="",
|
||
|
|
)
|
||
|
|
assert out.head_id == HeadId.LOGIC
|
||
|
|
assert len(out.claims) == 1
|
||
|
|
assert out.claims[0].confidence == 0.9
|
||
|
|
|
||
|
|
|
||
|
|
def test_consensus_engine():
|
||
|
|
outputs = [
|
||
|
|
HeadOutput(
|
||
|
|
head_id=HeadId.LOGIC,
|
||
|
|
summary="S1",
|
||
|
|
claims=[
|
||
|
|
HeadClaim(claim_text="X is true", confidence=0.8, evidence=[], assumptions=[]),
|
||
|
|
],
|
||
|
|
risks=[],
|
||
|
|
questions=[],
|
||
|
|
recommended_actions=[],
|
||
|
|
tone_guidance="",
|
||
|
|
),
|
||
|
|
]
|
||
|
|
am = run_consensus(outputs)
|
||
|
|
assert am.confidence_score >= 0
|
||
|
|
assert isinstance(am.agreed_claims, list)
|
||
|
|
assert isinstance(am.disputed_claims, list)
|
||
|
|
|
||
|
|
|
||
|
|
def test_create_all_heads():
|
||
|
|
heads = create_all_content_heads()
|
||
|
|
assert len(heads) == 11
|
||
|
|
assert HeadId.WITNESS not in heads
|
||
|
|
|
||
|
|
|
||
|
|
def test_run_heads_parallel():
|
||
|
|
stub = StubAdapter(structured_response={
|
||
|
|
"head_id": "logic",
|
||
|
|
"summary": "Stub",
|
||
|
|
"claims": [],
|
||
|
|
"risks": [],
|
||
|
|
"questions": [],
|
||
|
|
"recommended_actions": [],
|
||
|
|
"tone_guidance": "",
|
||
|
|
})
|
||
|
|
bus = EventBus()
|
||
|
|
state = StateManager()
|
||
|
|
orch = Orchestrator(event_bus=bus, state_manager=state)
|
||
|
|
heads = create_all_content_heads(adapter=stub)
|
||
|
|
for hid, agent in list(heads.items())[:2]:
|
||
|
|
orch.register_agent(hid.value, agent)
|
||
|
|
orch.register_agent(HeadId.WITNESS.value, WitnessAgent(adapter=stub))
|
||
|
|
|
||
|
|
task_id = orch.submit_task(goal="Test")
|
||
|
|
results = run_heads_parallel(orch, task_id, "Hello", head_ids=[HeadId.LOGIC, HeadId.RESEARCH])
|
||
|
|
assert len(results) >= 1
|
||
|
|
assert all(isinstance(r, HeadOutput) for r in results)
|
||
|
|
|
||
|
|
|
||
|
|
def test_select_heads_for_complexity():
|
||
|
|
simple = select_heads_for_complexity("What is 2+2?")
|
||
|
|
assert len(simple) <= 5
|
||
|
|
complex_heads = select_heads_for_complexity(
|
||
|
|
"We need to design a secure architecture for production with compliance requirements"
|
||
|
|
)
|
||
|
|
assert len(complex_heads) == 11
|
||
|
|
|
||
|
|
|
||
|
|
def test_run_dvadasa_native_reasoning():
|
||
|
|
"""Test Dvādaśa runs with native reasoning (no external LLM)."""
|
||
|
|
bus = EventBus()
|
||
|
|
state = StateManager()
|
||
|
|
orch = Orchestrator(event_bus=bus, state_manager=state)
|
||
|
|
# adapter=None => uses NativeReasoningProvider for heads, NativeAdapter for Witness
|
||
|
|
heads = create_all_content_heads(adapter=None)
|
||
|
|
for hid, agent in list(heads.items())[:3]: # Just Logic, Research, Systems
|
||
|
|
orch.register_agent(hid.value, agent)
|
||
|
|
orch.register_agent(HeadId.WITNESS.value, WitnessAgent(adapter=None))
|
||
|
|
task_id = orch.submit_task(goal="What is the best approach for secure authentication?")
|
||
|
|
final = run_dvadasa(
|
||
|
|
orch, task_id, "What is the best approach for secure authentication?", event_bus=bus
|
||
|
|
)
|
||
|
|
assert final is not None
|
||
|
|
assert final.final_answer
|
||
|
|
assert len(final.final_answer) > 20
|
||
|
|
assert final.confidence_score >= 0
|