feat:落库麦麦文件夹,需要我帮你稳稳的接住replyer的log吗?

feat2
This commit is contained in:
SengokuCola
2026-04-04 15:53:31 +08:00
parent 40e774ed39
commit faae3edadf
8 changed files with 413 additions and 87 deletions

View File

@@ -1,15 +1,20 @@
import random
import time
from dataclasses import dataclass, field
from datetime import datetime
from typing import Dict, List, Optional, Tuple
import random
import time
from rich.console import Group, RenderableType
from rich.panel import Panel
from rich.text import Text
from sqlmodel import select
from src.chat.message_receive.chat_manager import BotChatSession
from src.chat.message_receive.message import SessionMessage
from src.cli.console import console
from src.common.database.database import get_db_session
from src.common.data_models.message_component_data_model import MessageSequence, TextComponent
from src.common.database.database_model import Expression
from src.common.data_models.message_component_data_model import MessageSequence, TextComponent
from src.common.data_models.reply_generation_data_models import (
GenerationMetrics,
LLMCompletionResult,
@@ -23,9 +28,15 @@ from src.core.types import ActionInfo
from src.llm_models.payload_content.message import ImageMessagePart, Message, MessageBuilder, RoleType, TextMessagePart
from src.services.llm_service import LLMServiceClient
from src.chat.message_receive.message import SessionMessage
from src.maisaka.context_messages import AssistantMessage, LLMContextMessage, ReferenceMessage, SessionBackedMessage, ToolResultMessage
from src.maisaka.context_messages import (
AssistantMessage,
LLMContextMessage,
ReferenceMessage,
SessionBackedMessage,
ToolResultMessage,
)
from src.maisaka.message_adapter import clone_message_sequence, parse_speaker_content
from src.maisaka.prompt_cli_renderer import PromptCLIVisualizer
logger = get_logger("replyer")
@@ -509,9 +520,17 @@ class MaisakaReplyGenerator:
return request_messages
result.completion.request_prompt = prompt_preview
preview_chat_id = self._resolve_session_id(stream_id)
replyer_prompt_section: RenderableType | None = None
if global_config.debug.show_replyer_prompt:
logger.info(f"\nMaisaka 回复器提示词:\n{prompt_preview}\n")
replyer_prompt_section = PromptCLIVisualizer.build_text_section(
prompt_preview,
category="replyer",
chat_id=preview_chat_id,
request_kind="replyer",
subtitle=f"会话流标识:{preview_chat_id}",
folded=global_config.debug.fold_maisaka_thinking,
)
started_at = time.perf_counter()
try:
@@ -550,5 +569,41 @@ class MaisakaReplyGenerator:
f"总耗时毫秒={result.metrics.overall_ms} "
f"已选表达编号={result.selected_expression_ids!r}"
)
if global_config.debug.show_replyer_prompt or global_config.debug.show_replyer_reasoning:
summary_lines = [
f"会话流标识: {preview_chat_id or 'unknown'}",
f"总耗时: {result.metrics.overall_ms} ms",
]
if result.selected_expression_ids:
summary_lines.append(f"表达习惯编号: {result.selected_expression_ids!r}")
renderables: List[RenderableType] = [Text("\n".join(summary_lines))]
if replyer_prompt_section is not None:
renderables.append(replyer_prompt_section)
if global_config.debug.show_replyer_reasoning and result.completion.reasoning_text:
renderables.append(
Panel(
Text(result.completion.reasoning_text),
title="回复器思考",
border_style="magenta",
padding=(0, 1),
)
)
renderables.append(
Panel(
Text(response_text),
title="回复结果",
border_style="green",
padding=(0, 1),
)
)
console.print(
Panel(
Group(*renderables),
title="MaiSaka 回复器结果",
border_style="bright_yellow",
padding=(0, 1),
)
)
result.text_fragments = [response_text]
return True, result

View File

@@ -31,6 +31,8 @@ MODULE_COLORS: Dict[str, Tuple[str, Optional[str], bool]] = {
"llm_models": ("#008080", None, False),
"remote": ("#6c6c6c", None, False), # 深灰色,更不显眼
"planner": ("#008080", None, False),
"maisaka_reasoning_engine": ("#008080", None, False),
"maisaka_runtime": ("#ff5fff", None, False),
"relation": ("#af87af", None, False), # 柔和的紫色,不刺眼
# 聊天相关模块
"hfc": ("#d787af", None, False), # 柔和的粉色,不显眼但保持粉色系

View File

@@ -10,10 +10,8 @@ import json
import random
from pydantic import BaseModel, Field as PydanticField
from rich.console import Group
from rich.console import RenderableType
from rich.panel import Panel
from src.cli.console import console
from src.common.data_models.llm_service_data_models import LLMGenerationOptions
from src.common.logger import get_logger
from src.common.prompt_i18n import load_prompt
@@ -53,6 +51,7 @@ class ChatResponse:
built_message_count: int
completion_tokens: int
total_tokens: int
prompt_section: Optional[RenderableType] = None
class ToolFilterSelection(BaseModel):
@@ -765,30 +764,17 @@ class MaisakaChatLoopService:
if isinstance(raw_tool_definitions, list):
all_tools = [item for item in raw_tool_definitions if isinstance(item, dict)]
prompt_section: RenderableType | None = None
if global_config.debug.show_maisaka_thinking:
panel_title, panel_border_style = PromptCLIVisualizer.get_request_panel_style(request_kind)
image_display_mode: str = "path_link" if global_config.maisaka.show_image_path else "legacy"
if global_config.debug.fold_maisaka_thinking:
prompt_renderable = PromptCLIVisualizer.build_prompt_access_panel(
built_messages,
request_kind=request_kind,
selection_reason=selection_reason,
image_display_mode=image_display_mode,
)
else:
ordered_panels = PromptCLIVisualizer.build_prompt_panels(
built_messages,
image_display_mode=image_display_mode,
)
prompt_renderable = Group(*ordered_panels)
console.print(
Panel(
prompt_renderable,
title=panel_title,
subtitle=selection_reason,
border_style=panel_border_style,
padding=(0, 1),
)
prompt_section = PromptCLIVisualizer.build_prompt_section(
built_messages,
category="planner" if request_kind != "timing_gate" else "timing_gate",
chat_id=self._session_id,
request_kind=request_kind,
selection_reason=selection_reason,
image_display_mode=image_display_mode,
folded=global_config.debug.fold_maisaka_thinking,
)
request_started_at = perf_counter()
@@ -809,8 +795,6 @@ class MaisakaChatLoopService:
interrupt_flag=self._interrupt_flag,
),
)
request_elapsed = perf_counter() - request_started_at
logger.info(f"规划器请求完成,耗时={request_elapsed:.3f}")
prompt_stats_text = PromptCLIVisualizer.build_prompt_stats_text(
selected_history_count=len(selected_history),
@@ -865,6 +849,7 @@ class MaisakaChatLoopService:
built_message_count=len(built_messages),
completion_tokens=completion_tokens,
total_tokens=total_tokens,
prompt_section=prompt_section,
)
@staticmethod

View File

@@ -2,24 +2,26 @@
from __future__ import annotations
import hashlib
import html
import json
from base64 import b64decode
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from urllib.parse import quote
from typing import Any, Dict, List, Literal
from urllib.parse import quote
import hashlib
import html
import json
import tempfile
from pydantic import BaseModel, Field as PydanticField
from rich.console import Group, RenderableType
from rich.pretty import Pretty
from rich.panel import Panel
from rich.pretty import Pretty
from rich.text import Text
from .prompt_preview_logger import PromptPreviewLogger
PROJECT_ROOT = Path(__file__).parent.parent.parent.absolute().resolve()
DATA_IMAGE_DIR = PROJECT_ROOT / "data" / "images"
@@ -53,8 +55,6 @@ class _MessageRenderResult:
class PromptCLIVisualizer:
"""负责构建 CLI 下 prompt 展示所需的所有可视化组件。"""
PROMPT_DUMP_DIR = Path(tempfile.gettempdir()) / "maisaka_prompt_dumps"
@staticmethod
def get_request_panel_style(request_kind: str) -> tuple[str, str]:
"""返回不同请求类型对应的标题与边框颜色。"""
@@ -62,6 +62,8 @@ class PromptCLIVisualizer:
normalized_kind = str(request_kind or "planner").strip().lower()
if normalized_kind == "timing_gate":
return "MaiSaka 大模型请求 - Timing Gate 子代理", "bright_magenta"
if normalized_kind == "replyer":
return "MaiSaka 回复器 Prompt", "bright_yellow"
if normalized_kind == "sub_agent":
return "MaiSaka 大模型请求 - 子代理", "bright_blue"
return "MaiSaka 大模型请求 - 对话单步", "cyan"
@@ -133,19 +135,9 @@ class PromptCLIVisualizer:
@staticmethod
def _build_file_uri(file_path: Path) -> str:
normalized = file_path.as_posix()
normalized = file_path.resolve().as_posix()
return f"file:///{quote(normalized, safe='/:')}"
@classmethod
def _build_prompt_dump_base_path(cls, messages: list[Any]) -> Path:
cls.PROMPT_DUMP_DIR.mkdir(parents=True, exist_ok=True)
try:
payload = json.dumps(messages, ensure_ascii=False, default=str)
except Exception:
payload = repr(messages)
digest = hashlib.sha256(payload.encode("utf-8")).hexdigest()
return cls.PROMPT_DUMP_DIR / digest
@staticmethod
def _build_official_image_path(image_format: str, image_base64: str) -> Path | None:
normalized_format = PromptCLIVisualizer._normalize_image_format(image_format)
@@ -611,15 +603,14 @@ class PromptCLIVisualizer:
cls,
messages: list[Any],
*,
category: str,
chat_id: str,
request_kind: str,
selection_reason: str,
image_display_mode: Literal["legacy", "path_link"],
) -> Panel:
"""构建用于查看完整 prompt 的入口面板"""
) -> RenderableType:
"""构建用于查看完整 prompt 的折叠入口内容"""
base_path = cls._build_prompt_dump_base_path(messages)
prompt_dump_path = base_path.with_suffix(".txt")
prompt_dump_path.write_text(cls._build_prompt_dump_text(messages), encoding="utf-8")
viewer_messages: list[dict[str, Any]] = []
for message in messages:
if isinstance(message, dict):
@@ -641,15 +632,22 @@ class PromptCLIVisualizer:
]
viewer_messages.append(normalized_message)
viewer_html_path = base_path.with_suffix(".html")
viewer_html_path.write_text(
cls._build_prompt_viewer_html(
viewer_messages,
request_kind=request_kind,
selection_reason=selection_reason,
),
encoding="utf-8",
prompt_dump_text = cls._build_prompt_dump_text(messages)
viewer_html_text = cls._build_prompt_viewer_html(
viewer_messages,
request_kind=request_kind,
selection_reason=selection_reason,
)
saved_paths = PromptPreviewLogger.save_preview_files(
chat_id,
category,
{
".html": viewer_html_text,
".txt": prompt_dump_text,
},
)
viewer_html_path = saved_paths[".html"]
prompt_dump_path = saved_paths[".txt"]
viewer_uri = cls._build_file_uri(viewer_html_path)
dump_uri = cls._build_file_uri(prompt_dump_path)
@@ -659,10 +657,198 @@ class PromptCLIVisualizer:
Text.from_markup(f"[link={viewer_uri}]点击在浏览器打开富文本 Prompt 视图[/link]", style="bold green"),
Text.from_markup(f"[link={dump_uri}]点击直接打开 Prompt 文本[/link]", style="cyan"),
)
return body
@classmethod
def build_prompt_section(
cls,
messages: list[Any],
*,
category: str,
chat_id: str,
request_kind: str,
selection_reason: str,
image_display_mode: Literal["legacy", "path_link"],
folded: bool,
) -> Panel:
"""构建用于嵌入结果面板中的 Prompt 区块。"""
panel_title, panel_border_style = cls.get_request_panel_style(request_kind)
if folded:
prompt_renderable = cls.build_prompt_access_panel(
messages,
category=category,
chat_id=chat_id,
request_kind=request_kind,
selection_reason=selection_reason,
image_display_mode=image_display_mode,
)
else:
ordered_panels = cls.build_prompt_panels(
messages,
image_display_mode=image_display_mode,
)
prompt_renderable = Group(*ordered_panels)
return Panel(
body,
title=Text(" Prompt 查看入口 ", style="bold white on blue"),
border_style="blue",
prompt_renderable,
title=panel_title,
subtitle=selection_reason,
border_style=panel_border_style,
padding=(0, 1),
)
@classmethod
def _build_text_preview_html(
cls,
content: str,
*,
request_kind: str,
subtitle: str,
) -> str:
panel_title, _ = cls.get_request_panel_style(request_kind)
subtitle_html = f"<div class='subtitle'>{html.escape(subtitle)}</div>" if subtitle.strip() else ""
return f"""<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>{html.escape(panel_title)}</title>
<style>
:root {{
--bg: #f6f7fb;
--card: #ffffff;
--border: #d7dfeb;
--text: #18212f;
--muted: #5b6878;
--shadow: 0 18px 40px rgba(15, 23, 42, 0.08);
}}
body {{
margin: 0;
background:
radial-gradient(circle at top left, rgba(202, 138, 4, 0.12), transparent 24%),
radial-gradient(circle at top right, rgba(29, 78, 216, 0.10), transparent 24%),
var(--bg);
color: var(--text);
font-family: "Segoe UI", "PingFang SC", "Microsoft YaHei", sans-serif;
}}
.page {{
width: min(1200px, calc(100vw - 40px));
margin: 24px auto 40px;
}}
.hero {{
background: linear-gradient(135deg, #ffffff 0%, #fff8eb 100%);
border: 1px solid var(--border);
border-radius: 20px;
box-shadow: var(--shadow);
padding: 20px 24px;
margin-bottom: 18px;
}}
.title {{
font-size: 26px;
font-weight: 700;
letter-spacing: 0.02em;
}}
.subtitle {{
margin-top: 10px;
color: var(--muted);
white-space: pre-wrap;
}}
.content-card {{
background: var(--card);
border: 1px solid var(--border);
border-radius: 18px;
box-shadow: var(--shadow);
padding: 18px 20px;
}}
pre {{
margin: 0;
white-space: pre-wrap;
word-break: break-word;
font-family: "Cascadia Mono", "JetBrains Mono", "Consolas", monospace;
font-size: 13px;
line-height: 1.6;
color: #1e293b;
}}
</style>
</head>
<body>
<main class="page">
<header class="hero">
<div class="title">{html.escape(panel_title)}</div>
{subtitle_html}
</header>
<section class="content-card">
<pre>{html.escape(content)}</pre>
</section>
</main>
</body>
</html>"""
@classmethod
def build_text_access_panel(
cls,
content: str,
*,
category: str,
chat_id: str,
request_kind: str,
subtitle: str,
) -> RenderableType:
"""构建文本型 Prompt 的折叠入口内容。"""
html_content = cls._build_text_preview_html(content, request_kind=request_kind, subtitle=subtitle)
saved_paths = PromptPreviewLogger.save_preview_files(
chat_id,
category,
{
".html": html_content,
".txt": content,
},
)
viewer_html_path = saved_paths[".html"]
text_dump_path = saved_paths[".txt"]
viewer_uri = cls._build_file_uri(viewer_html_path)
dump_uri = cls._build_file_uri(text_dump_path)
body = Group(
Text(f"富文本预览:{viewer_html_path}", style="bold green"),
Text(f"原始文本备份:{text_dump_path}", style="magenta"),
Text.from_markup(f"[link={viewer_uri}]点击在浏览器打开富文本 Prompt 视图[/link]", style="bold green"),
Text.from_markup(f"[link={dump_uri}]点击直接打开 Prompt 文本[/link]", style="cyan"),
)
return body
@classmethod
def build_text_section(
cls,
content: str,
*,
category: str,
chat_id: str,
request_kind: str,
subtitle: str,
folded: bool,
) -> Panel:
"""构建文本型 Prompt 的嵌入区块。"""
panel_title, panel_border_style = cls.get_request_panel_style(request_kind)
if folded:
prompt_renderable = cls.build_text_access_panel(
content,
category=category,
chat_id=chat_id,
request_kind=request_kind,
subtitle=subtitle,
)
else:
prompt_renderable = Text(content)
return Panel(
prompt_renderable,
title=panel_title,
subtitle=subtitle,
border_style=panel_border_style,
padding=(0, 1),
)

View File

@@ -0,0 +1,83 @@
"""Maisaka Prompt 预览落盘器。"""
from __future__ import annotations
import re
import time
from pathlib import Path
from typing import Dict
from uuid import uuid4
from src.config.config import global_config
class PromptPreviewLogger:
"""负责保存 Maisaka Prompt 预览文件并控制目录容量。"""
_BASE_DIR = Path("logs") / "maisaka_prompt"
_TRIM_COUNT = 100
_SAFE_NAME_PATTERN = re.compile(r"[^A-Za-z0-9._-]+")
@classmethod
def _get_max_per_chat(cls) -> int:
"""从配置中获取每个聊天流最大保存的预览数量。"""
return getattr(global_config.chat, "plan_reply_log_max_per_chat", 1000)
@classmethod
def _normalize_chat_id(cls, chat_id: str) -> str:
normalized_chat_id = cls._SAFE_NAME_PATTERN.sub("_", str(chat_id or "").strip()).strip("._")
if normalized_chat_id:
return normalized_chat_id
return "unknown_chat"
@classmethod
def save_preview_files(
cls,
chat_id: str,
category: str,
files: Dict[str, str],
) -> Dict[str, Path]:
"""保存同一份 Prompt 预览的多个文件并执行超量清理。"""
normalized_category = cls._normalize_chat_id(category)
chat_dir = (cls._BASE_DIR / normalized_category / cls._normalize_chat_id(chat_id)).resolve()
chat_dir.mkdir(parents=True, exist_ok=True)
stem = f"{int(time.time() * 1000)}_{uuid4().hex[:8]}"
saved_paths: Dict[str, Path] = {}
try:
for suffix, content in files.items():
normalized_suffix = suffix if suffix.startswith(".") else f".{suffix}"
file_path = chat_dir / f"{stem}{normalized_suffix}"
file_path.write_text(content, encoding="utf-8")
saved_paths[normalized_suffix] = file_path
finally:
cls._trim_overflow(chat_dir)
return saved_paths
@classmethod
def _trim_overflow(cls, chat_dir: Path) -> None:
"""超过阈值时按批次删除最老的若干组预览文件。"""
grouped_files: dict[str, list[Path]] = {}
for file_path in chat_dir.iterdir():
if not file_path.is_file():
continue
grouped_files.setdefault(file_path.stem, []).append(file_path)
max_per_chat = cls._get_max_per_chat()
if len(grouped_files) <= max_per_chat:
return
sorted_groups = sorted(
grouped_files.items(),
key=lambda item: min(path.stat().st_mtime for path in item[1]),
)
overflow_count = len(grouped_files) - max_per_chat
trim_count = min(len(sorted_groups), max(cls._TRIM_COUNT, overflow_count))
for _, file_group in sorted_groups[:trim_count]:
for old_file in file_group:
try:
old_file.unlink()
except FileNotFoundError:
continue

View File

@@ -290,6 +290,7 @@ class MaisakaReasoningEngine:
planner_response=timing_response.content or "",
tool_calls=timing_response.tool_calls,
tool_results=timing_tool_results,
prompt_section=timing_response.prompt_section,
)
if timing_action != "continue":
logger.info(
@@ -340,6 +341,7 @@ class MaisakaReasoningEngine:
planner_response=response.content or "",
tool_calls=response.tool_calls,
tool_results=tool_result_summaries,
prompt_section=response.prompt_section,
)
if should_pause:
break
@@ -349,6 +351,7 @@ class MaisakaReasoningEngine:
selected_history_count=response.selected_history_count,
prompt_tokens=response.prompt_tokens,
planner_response=response.content or "",
prompt_section=response.prompt_section,
)
if not response.content:
break

View File

@@ -5,6 +5,7 @@ from typing import Any, Literal, Optional, Sequence
import asyncio
import time
from rich.console import Group, RenderableType
from rich.panel import Panel
from rich.text import Text
@@ -493,6 +494,7 @@ class MaisakaHeartFlowChatting:
planner_response: str = "",
tool_calls: Optional[list[Any]] = None,
tool_results: Optional[list[str]] = None,
prompt_section: Optional[RenderableType] = None,
) -> None:
"""在终端展示当前聊天流的上下文占用、规划结果与工具摘要。"""
if not global_config.debug.show_maisaka_thinking:
@@ -505,21 +507,46 @@ class MaisakaHeartFlowChatting:
f"上下文占用: {selected_history_count}条 / {self._format_token_count(prompt_tokens)}",
]
renderables: list[RenderableType] = [Text("\n".join(body_lines))]
if prompt_section is not None:
renderables.append(prompt_section)
normalized_response = planner_response.strip()
if normalized_response:
body_lines.extend(["", "Maisaka 返回:", normalized_response])
renderables.append(
Panel(
Text(normalized_response),
title="Maisaka 返回",
border_style="green",
padding=(0, 1),
)
)
normalized_tool_calls = self._build_tool_call_summary_lines(tool_calls or [])
if normalized_tool_calls:
body_lines.extend(["", "工具调用:", *normalized_tool_calls])
renderables.append(
Panel(
Text("\n".join(normalized_tool_calls)),
title="工具调用",
border_style="magenta",
padding=(0, 1),
)
)
normalized_tool_results = [result.strip() for result in tool_results or [] if isinstance(result, str) and result.strip()]
if normalized_tool_results:
body_lines.extend(["", "工具结果:", *normalized_tool_results])
renderables.append(
Panel(
Text("\n".join(normalized_tool_results)),
title="工具结果",
border_style="yellow",
padding=(0, 1),
)
)
console.print(
Panel(
Text("\n".join(body_lines)),
Group(*renderables),
title="MaiSaka 上下文与结果",
border_style="bright_blue",
padding=(0, 1),

View File

@@ -238,15 +238,6 @@ class PluginMessageUtils:
logger.info(f"解析转发节点时跳过非字典节点: {node!r}")
continue
raw_content = node.get("content", [])
logger.info(
"开始解析转发节点: "
f"message_id={node.get('message_id')!r} "
f"user_id={node.get('user_id')!r} "
f"user_nickname={node.get('user_nickname')!r} "
f"user_cardname={node.get('user_cardname')!r} "
f"raw_content_type={type(raw_content).__name__} "
f"raw_content={raw_content!r}"
)
node_components: List[StandardMessageComponents] = []
if isinstance(raw_content, list):
node_components = [
@@ -254,12 +245,6 @@ class PluginMessageUtils:
for content in raw_content
if isinstance(content, dict)
]
logger.info(
"转发节点解析结果: "
f"message_id={node.get('message_id')!r} "
f"component_types={[component.__class__.__name__ for component in node_components]!r} "
f"component_values={[getattr(component, 'text', None) for component in node_components]!r}"
)
if not node_components:
logger.warning(
"转发节点内容为空,使用占位文本回退: "