102 lines
3.4 KiB
Python
102 lines
3.4 KiB
Python
"""
|
|
Native adapter: implements LLMAdapter using FusionAGI's internal reasoning.
|
|
|
|
No external API calls. Used for synthesis (e.g. Witness compose) when operating
|
|
in fully native AGI mode.
|
|
"""
|
|
|
|
from typing import Any
|
|
|
|
from fusionagi.adapters.base import LLMAdapter
|
|
|
|
|
|
def _synthesize_from_messages(messages: list[dict[str, Any]]) -> str:
|
|
"""
|
|
Synthesize narrative from message content using native logic only.
|
|
Extracts head summaries and agreed claims, produces coherent narrative.
|
|
"""
|
|
if not messages:
|
|
return ""
|
|
|
|
content_parts: list[str] = []
|
|
for msg in messages:
|
|
content = msg.get("content", "")
|
|
if isinstance(content, str) and content.strip():
|
|
content_parts.append(content)
|
|
|
|
if not content_parts:
|
|
return ""
|
|
|
|
full_content = "\n".join(content_parts)
|
|
|
|
# Extract "User asked:" for context
|
|
user_prompt = ""
|
|
if "User asked:" in full_content:
|
|
idx = full_content.index("User asked:") + len("User asked:")
|
|
end = full_content.find("\n\n", idx)
|
|
user_prompt = full_content[idx:end if end > 0 else None].strip()
|
|
|
|
narrative_parts: list[str] = []
|
|
|
|
if user_prompt:
|
|
truncated = user_prompt[:120] + ("..." if len(user_prompt) > 120 else "")
|
|
narrative_parts.append(f"Regarding your question: {truncated}")
|
|
|
|
# Extract head summaries
|
|
if "Head summaries:" in full_content:
|
|
start = full_content.index("Head summaries:") + len("Head summaries:")
|
|
end = full_content.find("\n\nAgreed claims:", start)
|
|
if end < 0:
|
|
end = full_content.find("Agreed claims:", start)
|
|
if end < 0:
|
|
end = len(full_content)
|
|
summaries = full_content[start:end].strip()
|
|
for line in summaries.split("\n"):
|
|
line = line.strip()
|
|
if line.startswith("-") and ":" in line:
|
|
narrative_parts.append(line[1:].strip())
|
|
|
|
# Extract agreed claims as key points
|
|
if "Agreed claims:" in full_content:
|
|
start = full_content.index("Agreed claims:") + len("Agreed claims:")
|
|
rest = full_content[start:].strip()
|
|
claims_section = rest.split("\n\nDisputed:")[0].split("\n\n")[0]
|
|
claim_lines = [ln.strip()[1:].strip() for ln in claims_section.split("\n") if ln.strip().startswith("-")]
|
|
for c in claim_lines[:5]:
|
|
if " (confidence:" in c:
|
|
c = c.split(" (confidence:")[0].strip()
|
|
if c:
|
|
narrative_parts.append(c)
|
|
|
|
if not narrative_parts:
|
|
paragraphs = [p.strip() for p in full_content.split("\n\n") if len(p.strip()) > 20]
|
|
narrative_parts = paragraphs[:5] if paragraphs else [full_content[:500]]
|
|
|
|
return "\n\n".join(narrative_parts)
|
|
|
|
|
|
class NativeAdapter(LLMAdapter):
|
|
"""
|
|
Adapter that uses FusionAGI's native synthesis—no external LLM calls.
|
|
|
|
For complete(): synthesizes narrative from message content.
|
|
For complete_structured(): returns None (use NativeReasoningProvider for heads).
|
|
"""
|
|
|
|
def complete(
|
|
self,
|
|
messages: list[dict[str, str]],
|
|
**kwargs: Any,
|
|
) -> str:
|
|
"""Synthesize response from message content using native logic."""
|
|
return _synthesize_from_messages(messages)
|
|
|
|
def complete_structured(
|
|
self,
|
|
messages: list[dict[str, str]],
|
|
schema: dict[str, Any] | None = None,
|
|
**kwargs: Any,
|
|
) -> Any:
|
|
"""Not supported; use NativeReasoningProvider for structured HeadOutput."""
|
|
return None
|