44 lines
1.5 KiB
Python
44 lines
1.5 KiB
Python
"""Chain-of-thought: prompt structure and trace storage."""
|
|
|
|
from typing import Any
|
|
|
|
from fusionagi.adapters.base import LLMAdapter
|
|
|
|
COT_SYSTEM = """You reason step by step. For each step, state your thought clearly.
|
|
Output your final conclusion or recommendation after your reasoning."""
|
|
|
|
|
|
def build_cot_messages(
|
|
query: str,
|
|
context: str | None = None,
|
|
trace_so_far: list[str] | None = None,
|
|
) -> list[dict[str, str]]:
|
|
"""Build message list for chain-of-thought: system + optional context + user query."""
|
|
messages: list[dict[str, str]] = [{"role": "system", "content": COT_SYSTEM}]
|
|
if context:
|
|
messages.append({"role": "user", "content": f"Context:\n{context}\n\nQuery: {query}"})
|
|
else:
|
|
messages.append({"role": "user", "content": query})
|
|
if trace_so_far:
|
|
assistant_content = "\n".join(trace_so_far)
|
|
messages.append({"role": "assistant", "content": assistant_content})
|
|
messages.append({"role": "user", "content": "Continue."})
|
|
return messages
|
|
|
|
|
|
def run_chain_of_thought(
|
|
adapter: LLMAdapter,
|
|
query: str,
|
|
context: str | None = None,
|
|
trace_so_far: list[str] | None = None,
|
|
**kwargs: Any,
|
|
) -> tuple[str, list[str]]:
|
|
"""
|
|
Run one CoT step; return (full_response, trace_entries).
|
|
Trace entries can be stored and passed as trace_so_far for multi-step CoT.
|
|
"""
|
|
messages = build_cot_messages(query, context=context, trace_so_far=trace_so_far)
|
|
response = adapter.complete(messages, **kwargs)
|
|
trace = (trace_so_far or []) + [response]
|
|
return response, trace
|