Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement custom console for MagenticOne CLI with enhanced message re… #4812

Draft
wants to merge 8 commits into
base: main
Choose a base branch
from
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import argparse
import asyncio

from autogen_agentchat.ui import Console

import os
import sys
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_ext.teams.magentic_one import MagenticOne
from autogen_ext.teams.rich_console import RichConsole


def main() -> None:
Expand Down Expand Up @@ -36,11 +36,15 @@ def main() -> None:
async def run_task(task: str, hil_mode: bool) -> None:
client = OpenAIChatCompletionClient(model="gpt-4o")
m1 = MagenticOne(client=client, hil_mode=hil_mode)
await Console(m1.run_stream(task=task))
await RichConsole(m1.run_stream(task=task))

task = args.task[0]
asyncio.run(run_task(task, not args.no_hil))
asyncio.run(asyncio.wait_for(run_task(task, not args.no_hil), timeout=300))


if __name__ == "__main__":
fd = sys.stdout.fileno()
flags = os.fcntl(fd, os.F_GETFL)
os.fcntl(fd, os.F_SETFL, flags & ~os.O_NONBLOCK)

main()
117 changes: 117 additions & 0 deletions python/packages/autogen-ext/src/autogen_ext/teams/rich_console.py
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps put it inside autogen_ext.ui.rich to match the module layout in agentchat.

Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
import os
import sys
import time
from typing import AsyncGenerator, List, Optional, TypeVar, cast

from autogen_agentchat.base import Response, TaskResult
from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage
from autogen_core import Image
from autogen_core.models import RequestUsage
from rich.console import Console
from rich.panel import Panel
from rich.text import Text

T = TypeVar("T", bound=TaskResult | Response)


def _is_running_in_iterm() -> bool:
return os.getenv("TERM_PROGRAM") == "iTerm.app"


def _is_output_a_tty() -> bool:
return sys.stdout.isatty()


def _image_to_iterm(image: Image) -> str:
image_data = image.to_base64()
return f"\033]1337;File=inline=1:{image_data}\a\n"


def _message_to_str(message: AgentEvent | ChatMessage, *, render_image_iterm: bool = False) -> str:
if isinstance(message, MultiModalMessage):
result: List[str] = []
for c in message.content:
if isinstance(c, str):
result.append(c)
else:
if render_image_iterm:
result.append(_image_to_iterm(c))
else:
result.append("<image>")
return "\n".join(result)
else:
return f"{message.content}"


async def RichConsole(
stream: AsyncGenerator[AgentEvent | ChatMessage | T, None],
*,
no_inline_images: bool = False,
primary_color: str = "magenta",
) -> T:
render_image_iterm = _is_running_in_iterm() and _is_output_a_tty() and not no_inline_images
start_time = time.time()
total_usage = RequestUsage(prompt_tokens=0, completion_tokens=0)

last_processed: Optional[T] = None
console = Console()

async for message in stream:
if isinstance(message, TaskResult):
duration = time.time() - start_time
output = (
f"Number of messages: {len(message.messages)}\n"
f"Finish reason: {message.stop_reason}\n"
f"Total prompt tokens: {total_usage.prompt_tokens}\n"
f"Total completion tokens: {total_usage.completion_tokens}\n"
f"Duration: {duration:.2f} seconds\n"
)
console.print(Panel(output, title="Summary"))
last_processed = message # type: ignore

elif isinstance(message, Response):
duration = time.time() - start_time

output = Text.from_markup(f"{_message_to_str(message.chat_message, render_image_iterm=render_image_iterm)}")
if message.chat_message.models_usage:
output.append(
f"\n[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]"
)
total_usage.completion_tokens += message.chat_message.models_usage.completion_tokens
total_usage.prompt_tokens += message.chat_message.models_usage.prompt_tokens
console.print(
Panel(output, title=f"[bold {primary_color}]{message.chat_message.source}[/bold {primary_color}]")
)

if message.inner_messages is not None:
num_inner_messages = len(message.inner_messages)
else:
num_inner_messages = 0
output = (
f"Number of inner messages: {num_inner_messages}\n"
f"Total prompt tokens: {total_usage.prompt_tokens}\n"
f"Total completion tokens: {total_usage.completion_tokens}\n"
f"Duration: {duration:.2f} seconds\n"
)
console.print(Panel(output, title="Summary"))
last_processed = message # type: ignore

else:
message = cast(AgentEvent | ChatMessage, message) # type: ignore
output = Text.from_markup(f"{_message_to_str(message, render_image_iterm=render_image_iterm)}")
if message.models_usage:
output.append(
f"\n[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]"
)
total_usage.completion_tokens += message.models_usage.completion_tokens
total_usage.prompt_tokens += message.models_usage.prompt_tokens
console.print(Panel(output, title=f"[bold {primary_color}]{message.source}[/bold {primary_color}]"))
if render_image_iterm and isinstance(message, MultiModalMessage):
for c in message.content:
if isinstance(c, Image):
print(_image_to_iterm(c))

if last_processed is None:
raise ValueError("No TaskResult or Response was processed.")

return last_processed
Loading