feat:给planner最终展示添加统一结果

This commit is contained in:
SengokuCola
2026-04-07 18:16:12 +08:00
parent 6968879a04
commit 2233ee1af4
12 changed files with 651 additions and 235 deletions

View File

@@ -1,138 +0,0 @@
import json
import time
from pathlib import Path
from typing import Any, Dict, List, Optional
from uuid import uuid4
from src.config.config import global_config
class PlanReplyLogger:
"""独立的Plan/Reply日志记录器负责落盘和容量控制。"""
_BASE_DIR = Path("logs")
_PLAN_DIR = _BASE_DIR / "plan"
_REPLY_DIR = _BASE_DIR / "reply"
_TRIM_COUNT = 100
@classmethod
def _get_max_per_chat(cls) -> int:
"""从配置中获取每个聊天流最大保存的日志数量"""
return getattr(global_config.chat, "plan_reply_log_max_per_chat", 1000)
@classmethod
def log_plan(
cls,
chat_id: str,
prompt: str,
reasoning: str,
raw_output: Optional[str],
raw_reasoning: Optional[str],
actions: List[Any],
timing: Optional[Dict[str, Any]] = None,
extra: Optional[Dict[str, Any]] = None,
) -> None:
payload = {
"type": "plan",
"chat_id": chat_id,
"timestamp": time.time(),
"prompt": prompt,
"reasoning": reasoning,
"raw_output": raw_output,
"raw_reasoning": raw_reasoning,
"actions": [cls._serialize_action(action) for action in actions],
"timing": timing or {},
"extra": cls._safe_data(extra),
}
cls._write_json(cls._PLAN_DIR, chat_id, payload)
@classmethod
def log_reply(
cls,
chat_id: str,
prompt: str,
output: Optional[str],
processed_output: Optional[List[Any]],
model: Optional[str],
timing: Optional[Dict[str, Any]] = None,
reasoning: Optional[str] = None,
think_level: Optional[int] = None,
error: Optional[str] = None,
success: bool = True,
) -> None:
payload = {
"type": "reply",
"chat_id": chat_id,
"timestamp": time.time(),
"prompt": prompt,
"output": output,
"processed_output": cls._safe_data(processed_output),
"model": model,
"reasoning": reasoning,
"think_level": think_level,
"timing": timing or {},
"error": error if not success else None,
"success": success,
}
cls._write_json(cls._REPLY_DIR, chat_id, payload)
@classmethod
def _write_json(cls, base_dir: Path, chat_id: str, payload: Dict[str, Any]) -> None:
chat_dir = base_dir / chat_id
chat_dir.mkdir(parents=True, exist_ok=True)
file_path = chat_dir / f"{int(time.time() * 1000)}_{uuid4().hex[:8]}.json"
try:
with file_path.open("w", encoding="utf-8") as f:
json.dump(cls._safe_data(payload), f, ensure_ascii=False, indent=2)
finally:
cls._trim_overflow(chat_dir)
@classmethod
def _trim_overflow(cls, chat_dir: Path) -> None:
"""超过阈值时删除最老的若干文件,避免目录无限增长。"""
files = sorted(chat_dir.glob("*.json"), key=lambda p: p.stat().st_mtime)
max_per_chat = cls._get_max_per_chat()
if len(files) <= max_per_chat:
return
# 删除最老的 TRIM_COUNT 条
for old_file in files[: cls._TRIM_COUNT]:
try:
old_file.unlink()
except FileNotFoundError:
continue
@classmethod
def _serialize_action(cls, action: Any) -> Dict[str, Any]:
# ActionPlannerInfo 结构的轻量序列化,避免引用复杂对象
message_info = None
action_message = getattr(action, "action_message", None)
if action_message:
user_info = getattr(action_message, "user_info", None)
message_info = {
"message_id": getattr(action_message, "message_id", None),
"user_id": getattr(user_info, "user_id", None) if user_info else None,
"platform": getattr(user_info, "platform", None) if user_info else None,
"text": getattr(action_message, "processed_plain_text", None),
}
return {
"action_type": getattr(action, "action_type", None),
"reasoning": getattr(action, "reasoning", None),
"action_data": cls._safe_data(getattr(action, "action_data", None)),
"action_message": message_info,
"available_actions": cls._safe_data(getattr(action, "available_actions", None)),
"action_reasoning": getattr(action, "action_reasoning", None),
}
@classmethod
def _safe_data(cls, value: Any) -> Any:
if isinstance(value, (str, int, float, bool)) or value is None:
return value
if isinstance(value, dict):
return {str(k): cls._safe_data(v) for k, v in value.items()}
if isinstance(value, (list, tuple, set)):
return [cls._safe_data(v) for v in value]
if isinstance(value, Path):
return str(value)
# Fallback to string for other complex types
return str(value)

View File

@@ -33,7 +33,6 @@ from src.person_info.person_info import Person
from src.core.types import ActionInfo, EventType
from src.services import llm_service as llm_api
from src.chat.logger.plan_reply_logger import PlanReplyLogger
from src.memory_system.memory_retrieval import init_memory_retrieval_sys, build_memory_retrieval_prompt
from src.learners.jargon_explainer_old import explain_jargon_in_context
from src.chat.utils.common_utils import TempMethodsExpression
@@ -131,22 +130,6 @@ class DefaultReplyer:
llm_response.timing["overall_ms"] = round((time.perf_counter() - overall_start) * 1000, 2)
llm_response.timing["almost_zero"] = almost_zero_str
llm_response.timing["timing_logs"] = timing_logs
if log_reply:
try:
PlanReplyLogger.log_reply(
chat_id=self.chat_stream.session_id,
prompt="",
output=None,
processed_output=None,
model=None,
timing=llm_response.timing,
reasoning=None,
think_level=think_level,
error="build_prompt_failed",
success=False,
)
except Exception:
logger.exception("记录reply日志失败")
return False, llm_response
from src.core.event_bus import event_bus
from src.chat.event_helpers import build_event_message
@@ -201,21 +184,6 @@ class DefaultReplyer:
llm_response.timing_logs = timing_logs
llm_response.timing["timing_logs"] = timing_logs
llm_response.timing["almost_zero"] = almost_zero_str
try:
if log_reply:
PlanReplyLogger.log_reply(
chat_id=self.chat_stream.session_id,
prompt=prompt,
output=content,
processed_output=None,
model=model_name,
timing=llm_response.timing,
reasoning=reasoning_content,
think_level=think_level,
success=True,
)
except Exception:
logger.exception("记录reply日志失败")
_event_msg = build_event_message(
EventType.AFTER_LLM, llm_prompt=prompt, llm_response=llm_response, stream_id=stream_id
)
@@ -259,22 +227,6 @@ class DefaultReplyer:
llm_response.timing_logs = timing_logs
llm_response.timing["timing_logs"] = timing_logs
llm_response.timing["almost_zero"] = almost_zero_str
if log_reply:
try:
PlanReplyLogger.log_reply(
chat_id=self.chat_stream.session_id,
prompt=prompt or "",
output=None,
processed_output=None,
model=model_name,
timing=llm_response.timing,
reasoning=None,
think_level=think_level,
error=str(llm_e),
success=False,
)
except Exception:
logger.exception("记录reply日志失败")
return False, llm_response # LLM 调用失败则无法生成回复
return True, llm_response

View File

@@ -5,8 +5,11 @@ from typing import Awaitable, Callable, Dict, List, Optional, Tuple
import random
import time
from rich.panel import Panel
from src.chat.message_receive.chat_manager import BotChatSession
from src.chat.message_receive.message import SessionMessage
from src.cli.console import console
from src.common.data_models.reply_generation_data_models import (
GenerationMetrics,
LLMCompletionResult,
@@ -27,6 +30,7 @@ from src.maisaka.context_messages import (
ToolResultMessage,
)
from src.maisaka.message_adapter import parse_speaker_content
from src.maisaka.prompt_cli_renderer import PromptCLIVisualizer
from .maisaka_expression_selector import maisaka_expression_selector
@@ -365,9 +369,23 @@ class MaisakaReplyGenerator:
result.completion.request_prompt = prompt
show_replyer_prompt = bool(getattr(global_config.debug, "show_replyer_prompt", False))
show_replyer_reasoning = bool(getattr(global_config.debug, "show_replyer_reasoning", False))
preview_chat_id = self._resolve_session_id(stream_id) or "unknown"
if show_replyer_prompt:
logger.info(f"\nMaisaka 回复器提示词:\n{prompt}\n")
console.print(
Panel(
PromptCLIVisualizer.build_text_access_panel(
prompt,
category="replyer",
chat_id=preview_chat_id,
request_kind="replyer",
subtitle=f"流ID: {preview_chat_id}",
),
title="Maisaka 回复器 Prompt",
border_style="bright_yellow",
padding=(0, 1),
)
)
llm_started_at = time.perf_counter()
try:

View File

@@ -444,13 +444,17 @@ class MaisakaReplyGenerator:
preview_chat_id = self._resolve_session_id(stream_id)
replyer_prompt_section: RenderableType | None = None
if show_replyer_prompt:
replyer_prompt_section = PromptCLIVisualizer.build_text_section(
prompt_preview,
category="replyer",
chat_id=preview_chat_id,
request_kind="replyer",
subtitle=f"流ID: {preview_chat_id}",
folded=global_config.debug.fold_maisaka_thinking,
replyer_prompt_section = Panel(
PromptCLIVisualizer.build_text_access_panel(
prompt_preview,
category="replyer",
chat_id=preview_chat_id,
request_kind="replyer",
subtitle=f"流ID: {preview_chat_id}",
),
title="Reply Prompt",
border_style="bright_yellow",
padding=(0, 1),
)
llm_started_at = time.perf_counter()