Files
FusionAGI/examples/multimodal_ui_example.py

148 lines
4.5 KiB
Python
Raw Normal View History

"""Example: Using the Multi-Modal User Interface."""
import asyncio
from fusionagi import Orchestrator, EventBus, StateManager
from fusionagi.interfaces.multimodal_ui import MultiModalUI
from fusionagi.interfaces.voice import VoiceInterface, VoiceLibrary, VoiceProfile
from fusionagi.interfaces.conversation import ConversationManager, ConversationTuner, ConversationStyle
from fusionagi.interfaces.base import ModalityType
async def main() -> None:
"""Demonstrate multi-modal UI usage."""
# Initialize core FusionAGI components
bus = EventBus()
state = StateManager()
orch = Orchestrator(event_bus=bus, state_manager=state)
# Set up voice interface
voice_library = VoiceLibrary()
voice_library.add_voice(VoiceProfile(
name="Assistant Voice",
language="en-US",
style="friendly",
))
voice_interface = VoiceInterface(
voice_library=voice_library,
stt_provider="whisper",
tts_provider="system",
)
# Set up conversation manager
tuner = ConversationTuner()
tuner.register_style("default", ConversationStyle(
formality="neutral",
verbosity="balanced",
empathy_level=0.7,
))
conv_manager = ConversationManager(tuner=tuner)
# Create multi-modal UI
ui = MultiModalUI(
orchestrator=orch,
conversation_manager=conv_manager,
voice_interface=voice_interface,
)
print("=== FusionAGI Multi-Modal User Interface ===\n")
# ========== Session Creation ==========
print("1. Creating User Session...")
session_id = ui.create_session(
user_id="demo_user",
preferred_modalities=[ModalityType.TEXT, ModalityType.VOICE],
accessibility_settings={
"high_contrast": False,
"screen_reader": False,
"large_text": False,
},
)
print(f" ✓ Session created: {session_id[:16]}...")
print(f" ✓ Active modalities: TEXT, VOICE\n")
# ========== Multi-Modal Output ==========
print("2. Sending Multi-Modal Messages...")
await ui.send_to_user(
session_id,
"Welcome to FusionAGI! I can interact with you through text and voice.",
modalities=[ModalityType.TEXT],
)
print(" ✓ Sent welcome message (TEXT)")
await ui.send_to_user(
session_id,
"How can I assist you today?",
modalities=[ModalityType.TEXT, ModalityType.VOICE],
)
print(" ✓ Sent greeting (TEXT + VOICE)\n")
# ========== Conversation ==========
print("3. Conversational Interaction...")
user_inputs = [
"What can you do?",
"Tell me about your capabilities",
"Can you help me with a task?",
]
for user_input in user_inputs:
print(f" User: {user_input}")
response = await ui.converse(session_id, user_input)
print(f" Agent: {response}\n")
# ========== Task Submission ==========
print("4. Interactive Task Submission...")
task_id = await ui.submit_task_interactive(
session_id,
goal="Analyze system performance and generate report",
constraints={"format": "markdown", "max_length": 1000},
)
print(f" ✓ Task submitted: {task_id[:8]}...")
print(" ✓ User will receive real-time updates\n")
# ========== Modality Management ==========
print("5. Managing Modalities...")
# Enable haptic feedback (if available)
success = ui.enable_modality(session_id, ModalityType.HAPTIC)
if success:
print(" ✓ Haptic modality enabled")
else:
print(" Haptic modality not available")
# Check available modalities
available = ui.get_available_modalities()
print(f" Available modalities: {[m.value for m in available]}\n")
# ========== Session Statistics ==========
print("6. Session Statistics...")
stats = ui.get_session_statistics(session_id)
print(f" Session ID: {stats['session_id'][:16]}...")
print(f" User ID: {stats['user_id']}")
print(f" Active modalities: {stats['active_modalities']}")
print(f" Conversation turns: {stats['conversation_turns']}")
print(f" Started at: {stats['started_at']}")
print()
# ========== Session Cleanup ==========
print("7. Ending Session...")
ui.end_session(session_id)
print(" ✓ Session ended\n")
print("=== Multi-Modal UI Demo Complete ===")
if __name__ == "__main__":
asyncio.run(main())