feat:统一replyer在是否多模态下的表现,提高一致性和通用性,新增模型visual参数

This commit is contained in:
SengokuCola
2026-04-11 16:41:00 +08:00
parent 3ea2bf1059
commit c0230fc313
20 changed files with 323 additions and 1802 deletions

View File

@@ -32,7 +32,7 @@ from src.services.llm_service import LLMServiceClient
from .builtin_tool import get_builtin_tools
from .context_messages import AssistantMessage, LLMContextMessage, ToolResultMessage
from .history_utils import drop_orphan_tool_results
from .prompt_cli_renderer import PromptCLIVisualizer
from .display.prompt_cli_renderer import PromptCLIVisualizer
TIMING_GATE_TOOL_NAMES = {"continue", "no_reply", "wait"}

View File

@@ -0,0 +1,33 @@
"""Maisaka 展示模块。"""
from .display_utils import (
build_tool_call_summary_lines,
format_token_count,
format_tool_call_for_display,
get_request_panel_style,
get_role_badge_label,
get_role_badge_style,
)
from .prompt_cli_renderer import PromptCLIVisualizer
from .prompt_preview_logger import PromptPreviewLogger
from .stage_status_board import (
disable_stage_status_board,
enable_stage_status_board,
remove_stage_status,
update_stage_status,
)
__all__ = [
"PromptCLIVisualizer",
"PromptPreviewLogger",
"build_tool_call_summary_lines",
"disable_stage_status_board",
"enable_stage_status_board",
"format_token_count",
"format_tool_call_for_display",
"get_request_panel_style",
"get_role_badge_label",
"get_role_badge_style",
"remove_stage_status",
"update_stage_status",
]

View File

@@ -4,15 +4,15 @@ from typing import Any
_REQUEST_PANEL_STYLE_MAP: dict[str, tuple[str, str]] = {
"planner": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u5927\u6a21\u578b\u8bf7\u6c42 - \u5bf9\u8bdd\u5355\u6b65", "green"),
"timing_gate": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u5927\u6a21\u578b\u8bf7\u6c42 - Timing Gate \u5b50\u4ee3\u7406", "bright_magenta"),
"replyer": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u56de\u590d\u5668 Prompt", "bright_yellow"),
"planner": ("MaiSaka 大模型请求 - 对话单步", "green"),
"timing_gate": ("MaiSaka 大模型请求 - Timing Gate 子代理", "bright_magenta"),
"replyer": ("MaiSaka 回复器 Prompt", "bright_yellow"),
"emotion": ("MaiSaka Emotion Tool Prompt", "bright_cyan"),
"sub_agent": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u5927\u6a21\u578b\u8bf7\u6c42 - \u5b50\u4ee3\u7406", "bright_blue"),
"sub_agent": ("MaiSaka 大模型请求 - 子代理", "bright_blue"),
}
_DEFAULT_REQUEST_PANEL_STYLE: tuple[str, str] = (
"\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u5927\u6a21\u578b\u8bf7\u6c42 - \u5bf9\u8bdd\u5355\u6b65",
"MaiSaka 大模型请求 - 对话单步",
"cyan",
)
@@ -24,10 +24,10 @@ _ROLE_BADGE_STYLE_MAP: dict[str, str] = {
}
_ROLE_BADGE_LABEL_MAP: dict[str, str] = {
"system": "\u7cfb\u7edf",
"user": "\u7528\u6237",
"assistant": "\u52a9\u624b",
"tool": "\u5de5\u5177",
"system": "系统",
"user": "用户",
"assistant": "助手",
"tool": "工具",
}
@@ -55,7 +55,7 @@ def get_role_badge_style(role: str) -> str:
def get_role_badge_label(role: str) -> str:
"""返回角色标签对应的展示文案。"""
return _ROLE_BADGE_LABEL_MAP.get(role, "\u672a\u77e5")
return _ROLE_BADGE_LABEL_MAP.get(role, "未知")
def format_tool_call_for_display(tool_call: Any) -> dict[str, Any]:

View File

@@ -181,6 +181,16 @@ class PromptCLIVisualizer:
padding=(0, 1),
)
@staticmethod
def _extract_image_pair(item: Any) -> tuple[str, str] | None:
"""兼容图片片段被序列化为 tuple 或 list 的两种形式。"""
if isinstance(item, (tuple, list)) and len(item) == 2:
image_format, image_base64 = item
if isinstance(image_format, str) and isinstance(image_base64, str):
return image_format, image_base64
return None
@classmethod
def _render_message_content(cls, content: Any, settings: PromptImageDisplaySettings) -> RenderableType:
if isinstance(content, str):
@@ -192,11 +202,11 @@ class PromptCLIVisualizer:
if isinstance(item, str):
parts.append(Text(item))
continue
if isinstance(item, tuple) and len(item) == 2:
image_format, image_base64 = item
if isinstance(image_format, str) and isinstance(image_base64, str):
parts.append(cls._render_image_item(image_format, image_base64, settings))
continue
image_pair = cls._extract_image_pair(item)
if image_pair is not None:
image_format, image_base64 = image_pair
parts.append(cls._render_image_item(image_format, image_base64, settings))
continue
if isinstance(item, dict) and item.get("type") == "text" and isinstance(item.get("text"), str):
parts.append(Text(item["text"]))
else:
@@ -218,8 +228,9 @@ class PromptCLIVisualizer:
if isinstance(item, str):
parts.append(item)
continue
if isinstance(item, tuple) and len(item) == 2:
image_format, image_base64 = item
image_pair = cls._extract_image_pair(item)
if image_pair is not None:
image_format, image_base64 = image_pair
approx_size = max(0, len(str(image_base64)) * 3 // 4)
parts.append(f"[图片 image/{image_format} {approx_size} B]")
continue
@@ -395,8 +406,9 @@ class PromptCLIVisualizer:
if isinstance(item, str):
parts.append(f"<pre>{html.escape(item)}</pre>")
continue
if isinstance(item, tuple) and len(item) == 2:
image_format, image_base64 = item
image_pair = cls._extract_image_pair(item)
if image_pair is not None:
image_format, image_base64 = image_pair
image_html = cls._render_image_item_html(str(image_format), str(image_base64))
parts.append(image_html)
continue

View File

@@ -8,6 +8,7 @@ from pathlib import Path
from typing import Dict
from uuid import uuid4
class PromptPreviewLogger:
"""负责保存 Maisaka Prompt 预览文件并控制目录容量。"""

View File

@@ -8,8 +8,8 @@ from typing import Any
import json
import os
import sys
import traceback
import time
import traceback
def _clear_screen() -> None:

View File

@@ -34,10 +34,10 @@ from src.plugin_runtime.hook_payloads import deserialize_prompt_messages
from .chat_loop_service import ChatResponse, MaisakaChatLoopService
from .context_messages import LLMContextMessage
from .display_utils import build_tool_call_summary_lines, format_token_count
from .prompt_cli_renderer import PromptCLIVisualizer
from .display.display_utils import build_tool_call_summary_lines, format_token_count
from .display.prompt_cli_renderer import PromptCLIVisualizer
from .display.stage_status_board import remove_stage_status, update_stage_status
from .reasoning_engine import MaisakaReasoningEngine
from .stage_status_board import remove_stage_status, update_stage_status
from .tool_provider import MaisakaBuiltinToolProvider
logger = get_logger("maisaka_runtime")