From faae3edadf8c3cc26744cd436832de8bf4001e43 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 4 Apr 2026 15:53:31 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E8=90=BD=E5=BA=93=E9=BA=A6?= =?UTF-8?q?=E9=BA=A6=E6=96=87=E4=BB=B6=E5=A4=B9=EF=BC=8C=E9=9C=80=E8=A6=81?= =?UTF-8?q?=E6=88=91=E5=B8=AE=E4=BD=A0=E7=A8=B3=E7=A8=B3=E7=9A=84=E6=8E=A5?= =?UTF-8?q?=E4=BD=8Freplyer=E7=9A=84log=E5=90=97=EF=BC=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit feat2 --- src/chat/replyer/maisaka_generator_multi.py | 69 +++++- src/common/logger_color_and_mapping.py | 2 + src/maisaka/chat_loop_service.py | 39 +-- src/maisaka/prompt_cli_renderer.py | 254 +++++++++++++++++--- src/maisaka/prompt_preview_logger.py | 83 +++++++ src/maisaka/reasoning_engine.py | 3 + src/maisaka/runtime.py | 35 ++- src/plugin_runtime/host/message_utils.py | 15 -- 8 files changed, 413 insertions(+), 87 deletions(-) create mode 100644 src/maisaka/prompt_preview_logger.py diff --git a/src/chat/replyer/maisaka_generator_multi.py b/src/chat/replyer/maisaka_generator_multi.py index a3befb72..626dac5a 100644 --- a/src/chat/replyer/maisaka_generator_multi.py +++ b/src/chat/replyer/maisaka_generator_multi.py @@ -1,15 +1,20 @@ +import random +import time from dataclasses import dataclass, field from datetime import datetime from typing import Dict, List, Optional, Tuple -import random -import time +from rich.console import Group, RenderableType +from rich.panel import Panel +from rich.text import Text from sqlmodel import select from src.chat.message_receive.chat_manager import BotChatSession +from src.chat.message_receive.message import SessionMessage +from src.cli.console import console from src.common.database.database import get_db_session -from src.common.data_models.message_component_data_model import MessageSequence, TextComponent from src.common.database.database_model import Expression +from src.common.data_models.message_component_data_model import MessageSequence, TextComponent from src.common.data_models.reply_generation_data_models import ( GenerationMetrics, LLMCompletionResult, @@ -23,9 +28,15 @@ from src.core.types import ActionInfo from src.llm_models.payload_content.message import ImageMessagePart, Message, MessageBuilder, RoleType, TextMessagePart from src.services.llm_service import LLMServiceClient -from src.chat.message_receive.message import SessionMessage -from src.maisaka.context_messages import AssistantMessage, LLMContextMessage, ReferenceMessage, SessionBackedMessage, ToolResultMessage +from src.maisaka.context_messages import ( + AssistantMessage, + LLMContextMessage, + ReferenceMessage, + SessionBackedMessage, + ToolResultMessage, +) from src.maisaka.message_adapter import clone_message_sequence, parse_speaker_content +from src.maisaka.prompt_cli_renderer import PromptCLIVisualizer logger = get_logger("replyer") @@ -509,9 +520,17 @@ class MaisakaReplyGenerator: return request_messages result.completion.request_prompt = prompt_preview - + preview_chat_id = self._resolve_session_id(stream_id) + replyer_prompt_section: RenderableType | None = None if global_config.debug.show_replyer_prompt: - logger.info(f"\nMaisaka 回复器提示词:\n{prompt_preview}\n") + replyer_prompt_section = PromptCLIVisualizer.build_text_section( + prompt_preview, + category="replyer", + chat_id=preview_chat_id, + request_kind="replyer", + subtitle=f"会话流标识:{preview_chat_id}", + folded=global_config.debug.fold_maisaka_thinking, + ) started_at = time.perf_counter() try: @@ -550,5 +569,41 @@ class MaisakaReplyGenerator: f"总耗时毫秒={result.metrics.overall_ms} " f"已选表达编号={result.selected_expression_ids!r}" ) + if global_config.debug.show_replyer_prompt or global_config.debug.show_replyer_reasoning: + summary_lines = [ + f"会话流标识: {preview_chat_id or 'unknown'}", + f"总耗时: {result.metrics.overall_ms} ms", + ] + if result.selected_expression_ids: + summary_lines.append(f"表达习惯编号: {result.selected_expression_ids!r}") + + renderables: List[RenderableType] = [Text("\n".join(summary_lines))] + if replyer_prompt_section is not None: + renderables.append(replyer_prompt_section) + if global_config.debug.show_replyer_reasoning and result.completion.reasoning_text: + renderables.append( + Panel( + Text(result.completion.reasoning_text), + title="回复器思考", + border_style="magenta", + padding=(0, 1), + ) + ) + renderables.append( + Panel( + Text(response_text), + title="回复结果", + border_style="green", + padding=(0, 1), + ) + ) + console.print( + Panel( + Group(*renderables), + title="MaiSaka 回复器结果", + border_style="bright_yellow", + padding=(0, 1), + ) + ) result.text_fragments = [response_text] return True, result diff --git a/src/common/logger_color_and_mapping.py b/src/common/logger_color_and_mapping.py index 863c9c1e..dc4bdbb2 100644 --- a/src/common/logger_color_and_mapping.py +++ b/src/common/logger_color_and_mapping.py @@ -31,6 +31,8 @@ MODULE_COLORS: Dict[str, Tuple[str, Optional[str], bool]] = { "llm_models": ("#008080", None, False), "remote": ("#6c6c6c", None, False), # 深灰色,更不显眼 "planner": ("#008080", None, False), + "maisaka_reasoning_engine": ("#008080", None, False), + "maisaka_runtime": ("#ff5fff", None, False), "relation": ("#af87af", None, False), # 柔和的紫色,不刺眼 # 聊天相关模块 "hfc": ("#d787af", None, False), # 柔和的粉色,不显眼但保持粉色系 diff --git a/src/maisaka/chat_loop_service.py b/src/maisaka/chat_loop_service.py index dabf0091..f7c4ad6f 100644 --- a/src/maisaka/chat_loop_service.py +++ b/src/maisaka/chat_loop_service.py @@ -10,10 +10,8 @@ import json import random from pydantic import BaseModel, Field as PydanticField -from rich.console import Group +from rich.console import RenderableType from rich.panel import Panel - -from src.cli.console import console from src.common.data_models.llm_service_data_models import LLMGenerationOptions from src.common.logger import get_logger from src.common.prompt_i18n import load_prompt @@ -53,6 +51,7 @@ class ChatResponse: built_message_count: int completion_tokens: int total_tokens: int + prompt_section: Optional[RenderableType] = None class ToolFilterSelection(BaseModel): @@ -765,30 +764,17 @@ class MaisakaChatLoopService: if isinstance(raw_tool_definitions, list): all_tools = [item for item in raw_tool_definitions if isinstance(item, dict)] + prompt_section: RenderableType | None = None if global_config.debug.show_maisaka_thinking: - panel_title, panel_border_style = PromptCLIVisualizer.get_request_panel_style(request_kind) image_display_mode: str = "path_link" if global_config.maisaka.show_image_path else "legacy" - if global_config.debug.fold_maisaka_thinking: - prompt_renderable = PromptCLIVisualizer.build_prompt_access_panel( - built_messages, - request_kind=request_kind, - selection_reason=selection_reason, - image_display_mode=image_display_mode, - ) - else: - ordered_panels = PromptCLIVisualizer.build_prompt_panels( - built_messages, - image_display_mode=image_display_mode, - ) - prompt_renderable = Group(*ordered_panels) - console.print( - Panel( - prompt_renderable, - title=panel_title, - subtitle=selection_reason, - border_style=panel_border_style, - padding=(0, 1), - ) + prompt_section = PromptCLIVisualizer.build_prompt_section( + built_messages, + category="planner" if request_kind != "timing_gate" else "timing_gate", + chat_id=self._session_id, + request_kind=request_kind, + selection_reason=selection_reason, + image_display_mode=image_display_mode, + folded=global_config.debug.fold_maisaka_thinking, ) request_started_at = perf_counter() @@ -809,8 +795,6 @@ class MaisakaChatLoopService: interrupt_flag=self._interrupt_flag, ), ) - request_elapsed = perf_counter() - request_started_at - logger.info(f"规划器请求完成,耗时={request_elapsed:.3f} 秒") prompt_stats_text = PromptCLIVisualizer.build_prompt_stats_text( selected_history_count=len(selected_history), @@ -865,6 +849,7 @@ class MaisakaChatLoopService: built_message_count=len(built_messages), completion_tokens=completion_tokens, total_tokens=total_tokens, + prompt_section=prompt_section, ) @staticmethod diff --git a/src/maisaka/prompt_cli_renderer.py b/src/maisaka/prompt_cli_renderer.py index c60cbafd..9652eb7b 100644 --- a/src/maisaka/prompt_cli_renderer.py +++ b/src/maisaka/prompt_cli_renderer.py @@ -2,24 +2,26 @@ from __future__ import annotations -import hashlib -import html -import json from base64 import b64decode from dataclasses import dataclass from enum import Enum from pathlib import Path -from urllib.parse import quote from typing import Any, Dict, List, Literal +from urllib.parse import quote +import hashlib +import html +import json import tempfile from pydantic import BaseModel, Field as PydanticField from rich.console import Group, RenderableType -from rich.pretty import Pretty from rich.panel import Panel +from rich.pretty import Pretty from rich.text import Text +from .prompt_preview_logger import PromptPreviewLogger + PROJECT_ROOT = Path(__file__).parent.parent.parent.absolute().resolve() DATA_IMAGE_DIR = PROJECT_ROOT / "data" / "images" @@ -53,8 +55,6 @@ class _MessageRenderResult: class PromptCLIVisualizer: """负责构建 CLI 下 prompt 展示所需的所有可视化组件。""" - PROMPT_DUMP_DIR = Path(tempfile.gettempdir()) / "maisaka_prompt_dumps" - @staticmethod def get_request_panel_style(request_kind: str) -> tuple[str, str]: """返回不同请求类型对应的标题与边框颜色。""" @@ -62,6 +62,8 @@ class PromptCLIVisualizer: normalized_kind = str(request_kind or "planner").strip().lower() if normalized_kind == "timing_gate": return "MaiSaka 大模型请求 - Timing Gate 子代理", "bright_magenta" + if normalized_kind == "replyer": + return "MaiSaka 回复器 Prompt", "bright_yellow" if normalized_kind == "sub_agent": return "MaiSaka 大模型请求 - 子代理", "bright_blue" return "MaiSaka 大模型请求 - 对话单步", "cyan" @@ -133,19 +135,9 @@ class PromptCLIVisualizer: @staticmethod def _build_file_uri(file_path: Path) -> str: - normalized = file_path.as_posix() + normalized = file_path.resolve().as_posix() return f"file:///{quote(normalized, safe='/:')}" - @classmethod - def _build_prompt_dump_base_path(cls, messages: list[Any]) -> Path: - cls.PROMPT_DUMP_DIR.mkdir(parents=True, exist_ok=True) - try: - payload = json.dumps(messages, ensure_ascii=False, default=str) - except Exception: - payload = repr(messages) - digest = hashlib.sha256(payload.encode("utf-8")).hexdigest() - return cls.PROMPT_DUMP_DIR / digest - @staticmethod def _build_official_image_path(image_format: str, image_base64: str) -> Path | None: normalized_format = PromptCLIVisualizer._normalize_image_format(image_format) @@ -611,15 +603,14 @@ class PromptCLIVisualizer: cls, messages: list[Any], *, + category: str, + chat_id: str, request_kind: str, selection_reason: str, image_display_mode: Literal["legacy", "path_link"], - ) -> Panel: - """构建用于查看完整 prompt 的入口面板。""" + ) -> RenderableType: + """构建用于查看完整 prompt 的折叠入口内容。""" - base_path = cls._build_prompt_dump_base_path(messages) - prompt_dump_path = base_path.with_suffix(".txt") - prompt_dump_path.write_text(cls._build_prompt_dump_text(messages), encoding="utf-8") viewer_messages: list[dict[str, Any]] = [] for message in messages: if isinstance(message, dict): @@ -641,15 +632,22 @@ class PromptCLIVisualizer: ] viewer_messages.append(normalized_message) - viewer_html_path = base_path.with_suffix(".html") - viewer_html_path.write_text( - cls._build_prompt_viewer_html( - viewer_messages, - request_kind=request_kind, - selection_reason=selection_reason, - ), - encoding="utf-8", + prompt_dump_text = cls._build_prompt_dump_text(messages) + viewer_html_text = cls._build_prompt_viewer_html( + viewer_messages, + request_kind=request_kind, + selection_reason=selection_reason, ) + saved_paths = PromptPreviewLogger.save_preview_files( + chat_id, + category, + { + ".html": viewer_html_text, + ".txt": prompt_dump_text, + }, + ) + viewer_html_path = saved_paths[".html"] + prompt_dump_path = saved_paths[".txt"] viewer_uri = cls._build_file_uri(viewer_html_path) dump_uri = cls._build_file_uri(prompt_dump_path) @@ -659,10 +657,198 @@ class PromptCLIVisualizer: Text.from_markup(f"[link={viewer_uri}]点击在浏览器打开富文本 Prompt 视图[/link]", style="bold green"), Text.from_markup(f"[link={dump_uri}]点击直接打开 Prompt 文本[/link]", style="cyan"), ) + return body + + @classmethod + def build_prompt_section( + cls, + messages: list[Any], + *, + category: str, + chat_id: str, + request_kind: str, + selection_reason: str, + image_display_mode: Literal["legacy", "path_link"], + folded: bool, + ) -> Panel: + """构建用于嵌入结果面板中的 Prompt 区块。""" + + panel_title, panel_border_style = cls.get_request_panel_style(request_kind) + if folded: + prompt_renderable = cls.build_prompt_access_panel( + messages, + category=category, + chat_id=chat_id, + request_kind=request_kind, + selection_reason=selection_reason, + image_display_mode=image_display_mode, + ) + else: + ordered_panels = cls.build_prompt_panels( + messages, + image_display_mode=image_display_mode, + ) + prompt_renderable = Group(*ordered_panels) + return Panel( - body, - title=Text(" Prompt 查看入口 ", style="bold white on blue"), - border_style="blue", + prompt_renderable, + title=panel_title, + subtitle=selection_reason, + border_style=panel_border_style, + padding=(0, 1), + ) + + @classmethod + def _build_text_preview_html( + cls, + content: str, + *, + request_kind: str, + subtitle: str, + ) -> str: + panel_title, _ = cls.get_request_panel_style(request_kind) + subtitle_html = f"
{html.escape(subtitle)}
" if subtitle.strip() else "" + return f""" + + + + + {html.escape(panel_title)} + + + +
+
+
{html.escape(panel_title)}
+ {subtitle_html} +
+
+
{html.escape(content)}
+
+
+ +""" + + @classmethod + def build_text_access_panel( + cls, + content: str, + *, + category: str, + chat_id: str, + request_kind: str, + subtitle: str, + ) -> RenderableType: + """构建文本型 Prompt 的折叠入口内容。""" + + html_content = cls._build_text_preview_html(content, request_kind=request_kind, subtitle=subtitle) + saved_paths = PromptPreviewLogger.save_preview_files( + chat_id, + category, + { + ".html": html_content, + ".txt": content, + }, + ) + viewer_html_path = saved_paths[".html"] + text_dump_path = saved_paths[".txt"] + viewer_uri = cls._build_file_uri(viewer_html_path) + dump_uri = cls._build_file_uri(text_dump_path) + + body = Group( + Text(f"富文本预览:{viewer_html_path}", style="bold green"), + Text(f"原始文本备份:{text_dump_path}", style="magenta"), + Text.from_markup(f"[link={viewer_uri}]点击在浏览器打开富文本 Prompt 视图[/link]", style="bold green"), + Text.from_markup(f"[link={dump_uri}]点击直接打开 Prompt 文本[/link]", style="cyan"), + ) + return body + + @classmethod + def build_text_section( + cls, + content: str, + *, + category: str, + chat_id: str, + request_kind: str, + subtitle: str, + folded: bool, + ) -> Panel: + """构建文本型 Prompt 的嵌入区块。""" + + panel_title, panel_border_style = cls.get_request_panel_style(request_kind) + if folded: + prompt_renderable = cls.build_text_access_panel( + content, + category=category, + chat_id=chat_id, + request_kind=request_kind, + subtitle=subtitle, + ) + else: + prompt_renderable = Text(content) + + return Panel( + prompt_renderable, + title=panel_title, + subtitle=subtitle, + border_style=panel_border_style, padding=(0, 1), ) diff --git a/src/maisaka/prompt_preview_logger.py b/src/maisaka/prompt_preview_logger.py new file mode 100644 index 00000000..35917156 --- /dev/null +++ b/src/maisaka/prompt_preview_logger.py @@ -0,0 +1,83 @@ +"""Maisaka Prompt 预览落盘器。""" + +from __future__ import annotations + +import re +import time +from pathlib import Path +from typing import Dict +from uuid import uuid4 + +from src.config.config import global_config + + +class PromptPreviewLogger: + """负责保存 Maisaka Prompt 预览文件并控制目录容量。""" + + _BASE_DIR = Path("logs") / "maisaka_prompt" + _TRIM_COUNT = 100 + _SAFE_NAME_PATTERN = re.compile(r"[^A-Za-z0-9._-]+") + + @classmethod + def _get_max_per_chat(cls) -> int: + """从配置中获取每个聊天流最大保存的预览数量。""" + + return getattr(global_config.chat, "plan_reply_log_max_per_chat", 1000) + + @classmethod + def _normalize_chat_id(cls, chat_id: str) -> str: + normalized_chat_id = cls._SAFE_NAME_PATTERN.sub("_", str(chat_id or "").strip()).strip("._") + if normalized_chat_id: + return normalized_chat_id + return "unknown_chat" + + @classmethod + def save_preview_files( + cls, + chat_id: str, + category: str, + files: Dict[str, str], + ) -> Dict[str, Path]: + """保存同一份 Prompt 预览的多个文件并执行超量清理。""" + + normalized_category = cls._normalize_chat_id(category) + chat_dir = (cls._BASE_DIR / normalized_category / cls._normalize_chat_id(chat_id)).resolve() + chat_dir.mkdir(parents=True, exist_ok=True) + stem = f"{int(time.time() * 1000)}_{uuid4().hex[:8]}" + saved_paths: Dict[str, Path] = {} + try: + for suffix, content in files.items(): + normalized_suffix = suffix if suffix.startswith(".") else f".{suffix}" + file_path = chat_dir / f"{stem}{normalized_suffix}" + file_path.write_text(content, encoding="utf-8") + saved_paths[normalized_suffix] = file_path + finally: + cls._trim_overflow(chat_dir) + return saved_paths + + @classmethod + def _trim_overflow(cls, chat_dir: Path) -> None: + """超过阈值时按批次删除最老的若干组预览文件。""" + + grouped_files: dict[str, list[Path]] = {} + for file_path in chat_dir.iterdir(): + if not file_path.is_file(): + continue + grouped_files.setdefault(file_path.stem, []).append(file_path) + + max_per_chat = cls._get_max_per_chat() + if len(grouped_files) <= max_per_chat: + return + + sorted_groups = sorted( + grouped_files.items(), + key=lambda item: min(path.stat().st_mtime for path in item[1]), + ) + overflow_count = len(grouped_files) - max_per_chat + trim_count = min(len(sorted_groups), max(cls._TRIM_COUNT, overflow_count)) + for _, file_group in sorted_groups[:trim_count]: + for old_file in file_group: + try: + old_file.unlink() + except FileNotFoundError: + continue diff --git a/src/maisaka/reasoning_engine.py b/src/maisaka/reasoning_engine.py index 73639d79..c1cd25e4 100644 --- a/src/maisaka/reasoning_engine.py +++ b/src/maisaka/reasoning_engine.py @@ -290,6 +290,7 @@ class MaisakaReasoningEngine: planner_response=timing_response.content or "", tool_calls=timing_response.tool_calls, tool_results=timing_tool_results, + prompt_section=timing_response.prompt_section, ) if timing_action != "continue": logger.info( @@ -340,6 +341,7 @@ class MaisakaReasoningEngine: planner_response=response.content or "", tool_calls=response.tool_calls, tool_results=tool_result_summaries, + prompt_section=response.prompt_section, ) if should_pause: break @@ -349,6 +351,7 @@ class MaisakaReasoningEngine: selected_history_count=response.selected_history_count, prompt_tokens=response.prompt_tokens, planner_response=response.content or "", + prompt_section=response.prompt_section, ) if not response.content: break diff --git a/src/maisaka/runtime.py b/src/maisaka/runtime.py index 5e2341eb..c68998e8 100644 --- a/src/maisaka/runtime.py +++ b/src/maisaka/runtime.py @@ -5,6 +5,7 @@ from typing import Any, Literal, Optional, Sequence import asyncio import time +from rich.console import Group, RenderableType from rich.panel import Panel from rich.text import Text @@ -493,6 +494,7 @@ class MaisakaHeartFlowChatting: planner_response: str = "", tool_calls: Optional[list[Any]] = None, tool_results: Optional[list[str]] = None, + prompt_section: Optional[RenderableType] = None, ) -> None: """在终端展示当前聊天流的上下文占用、规划结果与工具摘要。""" if not global_config.debug.show_maisaka_thinking: @@ -505,21 +507,46 @@ class MaisakaHeartFlowChatting: f"上下文占用: {selected_history_count}条 / {self._format_token_count(prompt_tokens)}", ] + renderables: list[RenderableType] = [Text("\n".join(body_lines))] + if prompt_section is not None: + renderables.append(prompt_section) + normalized_response = planner_response.strip() if normalized_response: - body_lines.extend(["", "Maisaka 返回:", normalized_response]) + renderables.append( + Panel( + Text(normalized_response), + title="Maisaka 返回", + border_style="green", + padding=(0, 1), + ) + ) normalized_tool_calls = self._build_tool_call_summary_lines(tool_calls or []) if normalized_tool_calls: - body_lines.extend(["", "工具调用:", *normalized_tool_calls]) + renderables.append( + Panel( + Text("\n".join(normalized_tool_calls)), + title="工具调用", + border_style="magenta", + padding=(0, 1), + ) + ) normalized_tool_results = [result.strip() for result in tool_results or [] if isinstance(result, str) and result.strip()] if normalized_tool_results: - body_lines.extend(["", "工具结果:", *normalized_tool_results]) + renderables.append( + Panel( + Text("\n".join(normalized_tool_results)), + title="工具结果", + border_style="yellow", + padding=(0, 1), + ) + ) console.print( Panel( - Text("\n".join(body_lines)), + Group(*renderables), title="MaiSaka 上下文与结果", border_style="bright_blue", padding=(0, 1), diff --git a/src/plugin_runtime/host/message_utils.py b/src/plugin_runtime/host/message_utils.py index 28d72779..cc31824b 100644 --- a/src/plugin_runtime/host/message_utils.py +++ b/src/plugin_runtime/host/message_utils.py @@ -238,15 +238,6 @@ class PluginMessageUtils: logger.info(f"解析转发节点时跳过非字典节点: {node!r}") continue raw_content = node.get("content", []) - logger.info( - "开始解析转发节点: " - f"message_id={node.get('message_id')!r} " - f"user_id={node.get('user_id')!r} " - f"user_nickname={node.get('user_nickname')!r} " - f"user_cardname={node.get('user_cardname')!r} " - f"raw_content_type={type(raw_content).__name__} " - f"raw_content={raw_content!r}" - ) node_components: List[StandardMessageComponents] = [] if isinstance(raw_content, list): node_components = [ @@ -254,12 +245,6 @@ class PluginMessageUtils: for content in raw_content if isinstance(content, dict) ] - logger.info( - "转发节点解析结果: " - f"message_id={node.get('message_id')!r} " - f"component_types={[component.__class__.__name__ for component in node_components]!r} " - f"component_values={[getattr(component, 'text', None) for component in node_components]!r}" - ) if not node_components: logger.warning( "转发节点内容为空,使用占位文本回退: "