feat:给planner最终展示添加统一结果
This commit is contained in:
@@ -2,6 +2,7 @@ from types import SimpleNamespace
|
||||
from typing import Any, Callable
|
||||
|
||||
import pytest
|
||||
from rich.panel import Panel
|
||||
|
||||
from src.chat.replyer import maisaka_generator as legacy_replyer_module
|
||||
from src.chat.replyer import maisaka_generator_multi as multimodal_replyer_module
|
||||
@@ -13,8 +14,10 @@ from src.common.data_models.reply_generation_data_models import (
|
||||
from src.core.tooling import ToolExecutionResult, ToolInvocation
|
||||
from src.maisaka.builtin_tool.context import BuiltinToolRuntimeContext
|
||||
from src.maisaka.builtin_tool import reply as reply_tool_module
|
||||
from src.maisaka.builtin_tool import send_emoji as send_emoji_tool_module
|
||||
from src.maisaka.monitor_events import emit_planner_finalized
|
||||
from src.maisaka.reasoning_engine import MaisakaReasoningEngine
|
||||
from src.maisaka.runtime import MaisakaHeartFlowChatting
|
||||
|
||||
|
||||
class _FakeLLMResult:
|
||||
@@ -132,6 +135,72 @@ async def test_reply_tool_puts_monitor_detail_into_metadata(monkeypatch: pytest.
|
||||
assert result.metadata["monitor_detail"] == fake_monitor_detail
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_emoji_tool_puts_monitor_detail_into_metadata(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
async def _fake_build_emoji_candidate_message(emojis: list[Any]) -> object:
|
||||
assert emojis
|
||||
return SimpleNamespace()
|
||||
|
||||
async def _fake_send_emoji_for_maisaka(**kwargs: Any) -> Any:
|
||||
selected_emoji, matched_emotion = await kwargs["emoji_selector"](
|
||||
kwargs["requested_emotion"],
|
||||
kwargs["reasoning"],
|
||||
kwargs["context_texts"],
|
||||
2,
|
||||
)
|
||||
assert selected_emoji is not None
|
||||
return SimpleNamespace(
|
||||
success=True,
|
||||
message="已发送表情包:开心",
|
||||
emoji_base64="ZW1vamk=",
|
||||
description="开心",
|
||||
emotions=["开心", "可爱"],
|
||||
matched_emotion=matched_emotion or "开心",
|
||||
sent_message=None,
|
||||
)
|
||||
|
||||
monkeypatch.setattr(send_emoji_tool_module, "_build_emoji_candidate_message", _fake_build_emoji_candidate_message)
|
||||
monkeypatch.setattr(send_emoji_tool_module, "send_emoji_for_maisaka", _fake_send_emoji_for_maisaka)
|
||||
monkeypatch.setattr(
|
||||
send_emoji_tool_module.emoji_manager,
|
||||
"emojis",
|
||||
[
|
||||
SimpleNamespace(description="开心,可爱", emotion=["开心", "可爱"]),
|
||||
SimpleNamespace(description="难过", emotion=["难过"]),
|
||||
],
|
||||
)
|
||||
|
||||
async def _fake_run_sub_agent(**kwargs: Any) -> Any:
|
||||
del kwargs
|
||||
return SimpleNamespace(
|
||||
content='{"emoji_index": 1, "reason": "更贴合当前语气"}',
|
||||
prompt_tokens=9,
|
||||
completion_tokens=6,
|
||||
total_tokens=15,
|
||||
)
|
||||
|
||||
runtime = SimpleNamespace(
|
||||
_chat_history=[],
|
||||
log_prefix="[test]",
|
||||
session_id="session-emoji",
|
||||
run_sub_agent=_fake_run_sub_agent,
|
||||
)
|
||||
engine = SimpleNamespace(last_reasoning_content="用户刚刚表达了开心情绪")
|
||||
tool_ctx = BuiltinToolRuntimeContext(engine=engine, runtime=runtime)
|
||||
invocation = ToolInvocation(tool_name="send_emoji", arguments={"emotion": "开心"})
|
||||
|
||||
result = await send_emoji_tool_module.handle_tool(tool_ctx, invocation)
|
||||
|
||||
assert result.success is True
|
||||
assert result.metadata["monitor_detail"]["prompt_text"]
|
||||
assert result.metadata["monitor_detail"]["reasoning_text"] == "更贴合当前语气"
|
||||
assert result.metadata["monitor_detail"]["metrics"]["total_tokens"] == 15
|
||||
assert any(
|
||||
section["title"] == "表情发送结果"
|
||||
for section in result.metadata["monitor_detail"]["extra_sections"]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_emit_planner_finalized_broadcasts_new_protocol(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
captured: dict[str, Any] = {}
|
||||
@@ -190,3 +259,130 @@ def test_reasoning_engine_build_tool_monitor_result_keeps_non_reply_tool_without
|
||||
assert tool_result["tool_name"] == "query_memory"
|
||||
assert tool_result["tool_args"] == {"query": "Alice"}
|
||||
assert tool_result["detail"] is None
|
||||
|
||||
|
||||
def test_runtime_build_tool_detail_panels_renders_reply_monitor_detail() -> None:
|
||||
runtime = object.__new__(MaisakaHeartFlowChatting)
|
||||
runtime.session_id = "session-1"
|
||||
panels = runtime._build_tool_detail_panels(
|
||||
[
|
||||
{
|
||||
"tool_call_id": "call-reply-1",
|
||||
"tool_name": "reply",
|
||||
"tool_args": {"msg_id": "m1"},
|
||||
"success": True,
|
||||
"duration_ms": 20.5,
|
||||
"summary": "- reply [成功]: 已回复",
|
||||
"detail": {
|
||||
"prompt_text": "reply prompt",
|
||||
"reasoning_text": "reply reasoning",
|
||||
"output_text": "reply output",
|
||||
"metrics": {
|
||||
"model_name": "fake-model",
|
||||
"prompt_tokens": 10,
|
||||
"completion_tokens": 5,
|
||||
"total_tokens": 15,
|
||||
"prompt_ms": 2.1,
|
||||
"llm_ms": 18.4,
|
||||
"overall_ms": 20.5,
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
assert len(panels) == 1
|
||||
assert isinstance(panels[0], Panel)
|
||||
|
||||
|
||||
def test_runtime_filter_redundant_tool_results_keeps_only_non_detailed_summary() -> None:
|
||||
filtered_results = MaisakaHeartFlowChatting._filter_redundant_tool_results(
|
||||
tool_results=[
|
||||
"- reply [成功]: 已回复",
|
||||
"- query_memory [成功]: 查询到 2 条记录",
|
||||
],
|
||||
tool_detail_results=[
|
||||
{
|
||||
"summary": "- reply [成功]: 已回复",
|
||||
"detail": {"output_text": "测试回复"},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert filtered_results == ["- query_memory [成功]: 查询到 2 条记录"]
|
||||
|
||||
|
||||
def test_runtime_build_tool_detail_panels_uses_prompt_access_panel(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
runtime = object.__new__(MaisakaHeartFlowChatting)
|
||||
runtime.session_id = "session-link"
|
||||
captured: dict[str, Any] = {}
|
||||
|
||||
def _fake_build_text_access_panel(content: str, **kwargs: Any) -> str:
|
||||
captured["content"] = content
|
||||
captured["kwargs"] = kwargs
|
||||
return "PROMPT_LINK"
|
||||
|
||||
monkeypatch.setattr(
|
||||
"src.maisaka.runtime.PromptCLIVisualizer.build_text_access_panel",
|
||||
_fake_build_text_access_panel,
|
||||
)
|
||||
|
||||
panels = runtime._build_tool_detail_panels(
|
||||
[
|
||||
{
|
||||
"tool_call_id": "call-reply-2",
|
||||
"tool_name": "reply",
|
||||
"tool_args": {"msg_id": "m2"},
|
||||
"success": True,
|
||||
"duration_ms": 12.0,
|
||||
"summary": "- reply [成功]: 已回复",
|
||||
"detail": {
|
||||
"prompt_text": "reply prompt link",
|
||||
"output_text": "reply output",
|
||||
},
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
assert len(panels) == 1
|
||||
assert captured["content"] == "reply prompt link"
|
||||
assert captured["kwargs"]["chat_id"] == "session-link"
|
||||
assert captured["kwargs"]["request_kind"] == "replyer"
|
||||
|
||||
|
||||
def test_runtime_build_tool_detail_panels_uses_emotion_prompt_access_panel(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
runtime = object.__new__(MaisakaHeartFlowChatting)
|
||||
runtime.session_id = "session-emotion"
|
||||
captured: dict[str, Any] = {}
|
||||
|
||||
def _fake_build_text_access_panel(content: str, **kwargs: Any) -> str:
|
||||
captured["content"] = content
|
||||
captured["kwargs"] = kwargs
|
||||
return "EMOTION_PROMPT_LINK"
|
||||
|
||||
monkeypatch.setattr(
|
||||
"src.maisaka.runtime.PromptCLIVisualizer.build_text_access_panel",
|
||||
_fake_build_text_access_panel,
|
||||
)
|
||||
|
||||
panels = runtime._build_tool_detail_panels(
|
||||
[
|
||||
{
|
||||
"tool_call_id": "call-emoji-1",
|
||||
"tool_name": "send_emoji",
|
||||
"tool_args": {"emotion": "开心"},
|
||||
"success": True,
|
||||
"duration_ms": 15.0,
|
||||
"summary": "- send_emoji [成功]: 已发送表情包",
|
||||
"detail": {
|
||||
"prompt_text": "emotion prompt link",
|
||||
"output_text": '{"emoji_index": 1}',
|
||||
},
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
assert len(panels) == 1
|
||||
assert captured["content"] == "emotion prompt link"
|
||||
assert captured["kwargs"]["chat_id"] == "session-emotion"
|
||||
assert captured["kwargs"]["request_kind"] == "emotion"
|
||||
|
||||
@@ -1,138 +0,0 @@
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
from uuid import uuid4
|
||||
|
||||
from src.config.config import global_config
|
||||
|
||||
|
||||
class PlanReplyLogger:
|
||||
"""独立的Plan/Reply日志记录器,负责落盘和容量控制。"""
|
||||
|
||||
_BASE_DIR = Path("logs")
|
||||
_PLAN_DIR = _BASE_DIR / "plan"
|
||||
_REPLY_DIR = _BASE_DIR / "reply"
|
||||
_TRIM_COUNT = 100
|
||||
|
||||
@classmethod
|
||||
def _get_max_per_chat(cls) -> int:
|
||||
"""从配置中获取每个聊天流最大保存的日志数量"""
|
||||
return getattr(global_config.chat, "plan_reply_log_max_per_chat", 1000)
|
||||
|
||||
@classmethod
|
||||
def log_plan(
|
||||
cls,
|
||||
chat_id: str,
|
||||
prompt: str,
|
||||
reasoning: str,
|
||||
raw_output: Optional[str],
|
||||
raw_reasoning: Optional[str],
|
||||
actions: List[Any],
|
||||
timing: Optional[Dict[str, Any]] = None,
|
||||
extra: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
payload = {
|
||||
"type": "plan",
|
||||
"chat_id": chat_id,
|
||||
"timestamp": time.time(),
|
||||
"prompt": prompt,
|
||||
"reasoning": reasoning,
|
||||
"raw_output": raw_output,
|
||||
"raw_reasoning": raw_reasoning,
|
||||
"actions": [cls._serialize_action(action) for action in actions],
|
||||
"timing": timing or {},
|
||||
"extra": cls._safe_data(extra),
|
||||
}
|
||||
cls._write_json(cls._PLAN_DIR, chat_id, payload)
|
||||
|
||||
@classmethod
|
||||
def log_reply(
|
||||
cls,
|
||||
chat_id: str,
|
||||
prompt: str,
|
||||
output: Optional[str],
|
||||
processed_output: Optional[List[Any]],
|
||||
model: Optional[str],
|
||||
timing: Optional[Dict[str, Any]] = None,
|
||||
reasoning: Optional[str] = None,
|
||||
think_level: Optional[int] = None,
|
||||
error: Optional[str] = None,
|
||||
success: bool = True,
|
||||
) -> None:
|
||||
payload = {
|
||||
"type": "reply",
|
||||
"chat_id": chat_id,
|
||||
"timestamp": time.time(),
|
||||
"prompt": prompt,
|
||||
"output": output,
|
||||
"processed_output": cls._safe_data(processed_output),
|
||||
"model": model,
|
||||
"reasoning": reasoning,
|
||||
"think_level": think_level,
|
||||
"timing": timing or {},
|
||||
"error": error if not success else None,
|
||||
"success": success,
|
||||
}
|
||||
cls._write_json(cls._REPLY_DIR, chat_id, payload)
|
||||
|
||||
@classmethod
|
||||
def _write_json(cls, base_dir: Path, chat_id: str, payload: Dict[str, Any]) -> None:
|
||||
chat_dir = base_dir / chat_id
|
||||
chat_dir.mkdir(parents=True, exist_ok=True)
|
||||
file_path = chat_dir / f"{int(time.time() * 1000)}_{uuid4().hex[:8]}.json"
|
||||
try:
|
||||
with file_path.open("w", encoding="utf-8") as f:
|
||||
json.dump(cls._safe_data(payload), f, ensure_ascii=False, indent=2)
|
||||
finally:
|
||||
cls._trim_overflow(chat_dir)
|
||||
|
||||
@classmethod
|
||||
def _trim_overflow(cls, chat_dir: Path) -> None:
|
||||
"""超过阈值时删除最老的若干文件,避免目录无限增长。"""
|
||||
files = sorted(chat_dir.glob("*.json"), key=lambda p: p.stat().st_mtime)
|
||||
max_per_chat = cls._get_max_per_chat()
|
||||
if len(files) <= max_per_chat:
|
||||
return
|
||||
# 删除最老的 TRIM_COUNT 条
|
||||
for old_file in files[: cls._TRIM_COUNT]:
|
||||
try:
|
||||
old_file.unlink()
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
|
||||
@classmethod
|
||||
def _serialize_action(cls, action: Any) -> Dict[str, Any]:
|
||||
# ActionPlannerInfo 结构的轻量序列化,避免引用复杂对象
|
||||
message_info = None
|
||||
action_message = getattr(action, "action_message", None)
|
||||
if action_message:
|
||||
user_info = getattr(action_message, "user_info", None)
|
||||
message_info = {
|
||||
"message_id": getattr(action_message, "message_id", None),
|
||||
"user_id": getattr(user_info, "user_id", None) if user_info else None,
|
||||
"platform": getattr(user_info, "platform", None) if user_info else None,
|
||||
"text": getattr(action_message, "processed_plain_text", None),
|
||||
}
|
||||
|
||||
return {
|
||||
"action_type": getattr(action, "action_type", None),
|
||||
"reasoning": getattr(action, "reasoning", None),
|
||||
"action_data": cls._safe_data(getattr(action, "action_data", None)),
|
||||
"action_message": message_info,
|
||||
"available_actions": cls._safe_data(getattr(action, "available_actions", None)),
|
||||
"action_reasoning": getattr(action, "action_reasoning", None),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def _safe_data(cls, value: Any) -> Any:
|
||||
if isinstance(value, (str, int, float, bool)) or value is None:
|
||||
return value
|
||||
if isinstance(value, dict):
|
||||
return {str(k): cls._safe_data(v) for k, v in value.items()}
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
return [cls._safe_data(v) for v in value]
|
||||
if isinstance(value, Path):
|
||||
return str(value)
|
||||
# Fallback to string for other complex types
|
||||
return str(value)
|
||||
@@ -33,7 +33,6 @@ from src.person_info.person_info import Person
|
||||
from src.core.types import ActionInfo, EventType
|
||||
from src.services import llm_service as llm_api
|
||||
|
||||
from src.chat.logger.plan_reply_logger import PlanReplyLogger
|
||||
from src.memory_system.memory_retrieval import init_memory_retrieval_sys, build_memory_retrieval_prompt
|
||||
from src.learners.jargon_explainer_old import explain_jargon_in_context
|
||||
from src.chat.utils.common_utils import TempMethodsExpression
|
||||
@@ -131,22 +130,6 @@ class DefaultReplyer:
|
||||
llm_response.timing["overall_ms"] = round((time.perf_counter() - overall_start) * 1000, 2)
|
||||
llm_response.timing["almost_zero"] = almost_zero_str
|
||||
llm_response.timing["timing_logs"] = timing_logs
|
||||
if log_reply:
|
||||
try:
|
||||
PlanReplyLogger.log_reply(
|
||||
chat_id=self.chat_stream.session_id,
|
||||
prompt="",
|
||||
output=None,
|
||||
processed_output=None,
|
||||
model=None,
|
||||
timing=llm_response.timing,
|
||||
reasoning=None,
|
||||
think_level=think_level,
|
||||
error="build_prompt_failed",
|
||||
success=False,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("记录reply日志失败")
|
||||
return False, llm_response
|
||||
from src.core.event_bus import event_bus
|
||||
from src.chat.event_helpers import build_event_message
|
||||
@@ -201,21 +184,6 @@ class DefaultReplyer:
|
||||
llm_response.timing_logs = timing_logs
|
||||
llm_response.timing["timing_logs"] = timing_logs
|
||||
llm_response.timing["almost_zero"] = almost_zero_str
|
||||
try:
|
||||
if log_reply:
|
||||
PlanReplyLogger.log_reply(
|
||||
chat_id=self.chat_stream.session_id,
|
||||
prompt=prompt,
|
||||
output=content,
|
||||
processed_output=None,
|
||||
model=model_name,
|
||||
timing=llm_response.timing,
|
||||
reasoning=reasoning_content,
|
||||
think_level=think_level,
|
||||
success=True,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("记录reply日志失败")
|
||||
_event_msg = build_event_message(
|
||||
EventType.AFTER_LLM, llm_prompt=prompt, llm_response=llm_response, stream_id=stream_id
|
||||
)
|
||||
@@ -259,22 +227,6 @@ class DefaultReplyer:
|
||||
llm_response.timing_logs = timing_logs
|
||||
llm_response.timing["timing_logs"] = timing_logs
|
||||
llm_response.timing["almost_zero"] = almost_zero_str
|
||||
if log_reply:
|
||||
try:
|
||||
PlanReplyLogger.log_reply(
|
||||
chat_id=self.chat_stream.session_id,
|
||||
prompt=prompt or "",
|
||||
output=None,
|
||||
processed_output=None,
|
||||
model=model_name,
|
||||
timing=llm_response.timing,
|
||||
reasoning=None,
|
||||
think_level=think_level,
|
||||
error=str(llm_e),
|
||||
success=False,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("记录reply日志失败")
|
||||
return False, llm_response # LLM 调用失败则无法生成回复
|
||||
|
||||
return True, llm_response
|
||||
|
||||
@@ -5,8 +5,11 @@ from typing import Awaitable, Callable, Dict, List, Optional, Tuple
|
||||
import random
|
||||
import time
|
||||
|
||||
from rich.panel import Panel
|
||||
|
||||
from src.chat.message_receive.chat_manager import BotChatSession
|
||||
from src.chat.message_receive.message import SessionMessage
|
||||
from src.cli.console import console
|
||||
from src.common.data_models.reply_generation_data_models import (
|
||||
GenerationMetrics,
|
||||
LLMCompletionResult,
|
||||
@@ -27,6 +30,7 @@ from src.maisaka.context_messages import (
|
||||
ToolResultMessage,
|
||||
)
|
||||
from src.maisaka.message_adapter import parse_speaker_content
|
||||
from src.maisaka.prompt_cli_renderer import PromptCLIVisualizer
|
||||
|
||||
from .maisaka_expression_selector import maisaka_expression_selector
|
||||
|
||||
@@ -365,9 +369,23 @@ class MaisakaReplyGenerator:
|
||||
result.completion.request_prompt = prompt
|
||||
show_replyer_prompt = bool(getattr(global_config.debug, "show_replyer_prompt", False))
|
||||
show_replyer_reasoning = bool(getattr(global_config.debug, "show_replyer_reasoning", False))
|
||||
preview_chat_id = self._resolve_session_id(stream_id) or "unknown"
|
||||
|
||||
if show_replyer_prompt:
|
||||
logger.info(f"\nMaisaka 回复器提示词:\n{prompt}\n")
|
||||
console.print(
|
||||
Panel(
|
||||
PromptCLIVisualizer.build_text_access_panel(
|
||||
prompt,
|
||||
category="replyer",
|
||||
chat_id=preview_chat_id,
|
||||
request_kind="replyer",
|
||||
subtitle=f"流ID: {preview_chat_id}",
|
||||
),
|
||||
title="Maisaka 回复器 Prompt",
|
||||
border_style="bright_yellow",
|
||||
padding=(0, 1),
|
||||
)
|
||||
)
|
||||
|
||||
llm_started_at = time.perf_counter()
|
||||
try:
|
||||
|
||||
@@ -444,13 +444,17 @@ class MaisakaReplyGenerator:
|
||||
preview_chat_id = self._resolve_session_id(stream_id)
|
||||
replyer_prompt_section: RenderableType | None = None
|
||||
if show_replyer_prompt:
|
||||
replyer_prompt_section = PromptCLIVisualizer.build_text_section(
|
||||
prompt_preview,
|
||||
category="replyer",
|
||||
chat_id=preview_chat_id,
|
||||
request_kind="replyer",
|
||||
subtitle=f"流ID: {preview_chat_id}",
|
||||
folded=global_config.debug.fold_maisaka_thinking,
|
||||
replyer_prompt_section = Panel(
|
||||
PromptCLIVisualizer.build_text_access_panel(
|
||||
prompt_preview,
|
||||
category="replyer",
|
||||
chat_id=preview_chat_id,
|
||||
request_kind="replyer",
|
||||
subtitle=f"流ID: {preview_chat_id}",
|
||||
),
|
||||
title="Reply Prompt",
|
||||
border_style="bright_yellow",
|
||||
padding=(0, 1),
|
||||
)
|
||||
|
||||
llm_started_at = time.perf_counter()
|
||||
|
||||
@@ -5,6 +5,7 @@ from typing import Optional
|
||||
|
||||
from src.chat.replyer.replyer_manager import replyer_manager
|
||||
from src.cli.maisaka_cli_sender import CLI_PLATFORM_NAME, render_cli_message
|
||||
from src.common.data_models.reply_generation_data_models import ReplyGenerationResult
|
||||
from src.common.logger import get_logger
|
||||
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
|
||||
from src.services import send_service
|
||||
@@ -57,10 +58,10 @@ def get_tool_spec() -> ToolSpec:
|
||||
)
|
||||
|
||||
|
||||
def _build_monitor_metadata(reply_result: object) -> dict[str, object]:
|
||||
def _build_monitor_metadata(reply_result: ReplyGenerationResult) -> dict[str, object]:
|
||||
"""从 reply 结果中提取统一监控详情。"""
|
||||
|
||||
monitor_detail = getattr(reply_result, "monitor_detail", None)
|
||||
monitor_detail = reply_result.monitor_detail
|
||||
if isinstance(monitor_detail, dict):
|
||||
return {"monitor_detail": monitor_detail}
|
||||
return {}
|
||||
|
||||
@@ -66,6 +66,25 @@ def get_tool_spec() -> ToolSpec:
|
||||
)
|
||||
|
||||
|
||||
def _normalize_candidate_emotions(emoji: MaiEmoji) -> list[str]:
|
||||
"""清洗候选表情上的情绪标签。"""
|
||||
|
||||
raw_emotions = getattr(emoji, "emotion", None)
|
||||
if isinstance(raw_emotions, list) and raw_emotions:
|
||||
return [str(item).strip() for item in raw_emotions if str(item).strip()]
|
||||
|
||||
description = str(getattr(emoji, "description", "") or "").strip()
|
||||
if not description:
|
||||
return []
|
||||
|
||||
normalized_description = (
|
||||
description.replace(",", ",")
|
||||
.replace("、", ",")
|
||||
.replace(";", ",")
|
||||
)
|
||||
return [item.strip() for item in normalized_description.split(",") if item.strip()]
|
||||
|
||||
|
||||
async def _load_emoji_bytes(emoji: MaiEmoji) -> bytes:
|
||||
"""读取单个表情包图片字节。"""
|
||||
|
||||
@@ -211,13 +230,126 @@ async def _build_emoji_candidate_message(emojis: list[MaiEmoji]) -> SessionBacke
|
||||
)
|
||||
|
||||
|
||||
def _build_emoji_candidate_summary(emojis: list[MaiEmoji]) -> str:
|
||||
"""构建供监控展示使用的候选表情摘要。"""
|
||||
|
||||
summary_lines: list[str] = []
|
||||
for index, emoji in enumerate(emojis, start=1):
|
||||
description = emoji.description.strip() or "(无描述)"
|
||||
emotions = "、".join(_normalize_candidate_emotions(emoji)) or "无"
|
||||
summary_lines.append(f"{index}. 描述:{description}")
|
||||
summary_lines.append(f" 情绪:{emotions}")
|
||||
return "\n".join(summary_lines).strip()
|
||||
|
||||
|
||||
def _build_send_emoji_prompt_preview(
|
||||
*,
|
||||
system_prompt: str,
|
||||
requested_emotion: str,
|
||||
grid_rows: int,
|
||||
grid_columns: int,
|
||||
sampled_emojis: list[MaiEmoji],
|
||||
) -> str:
|
||||
"""构建表情选择子代理的文本预览。"""
|
||||
|
||||
task_text = (
|
||||
"[选择任务]\n"
|
||||
f"requested_emotion: {requested_emotion or '未指定'}\n"
|
||||
f"候选总数: {len(sampled_emojis)}\n"
|
||||
f"拼图布局: {grid_rows}x{grid_columns}\n"
|
||||
"请只输出 JSON。"
|
||||
)
|
||||
candidate_summary = _build_emoji_candidate_summary(sampled_emojis)
|
||||
return (
|
||||
f"[System Prompt]\n{system_prompt}\n\n"
|
||||
f"{task_text}\n\n"
|
||||
f"[候选表情摘要]\n{candidate_summary or '无候选表情'}"
|
||||
).strip()
|
||||
|
||||
|
||||
def _build_send_emoji_monitor_detail(
|
||||
*,
|
||||
prompt_text: str = "",
|
||||
reasoning_text: str = "",
|
||||
output_text: str = "",
|
||||
metrics: Optional[Dict[str, Any]] = None,
|
||||
extra_sections: Optional[list[dict[str, str]]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""构建 emotion tool 统一监控详情。"""
|
||||
|
||||
detail: Dict[str, Any] = {}
|
||||
if prompt_text.strip():
|
||||
detail["prompt_text"] = prompt_text.strip()
|
||||
if reasoning_text.strip():
|
||||
detail["reasoning_text"] = reasoning_text.strip()
|
||||
if output_text.strip():
|
||||
detail["output_text"] = output_text.strip()
|
||||
if isinstance(metrics, dict) and metrics:
|
||||
detail["metrics"] = dict(metrics)
|
||||
normalized_sections = [
|
||||
{
|
||||
"title": str(section.get("title") or "").strip(),
|
||||
"content": str(section.get("content") or "").strip(),
|
||||
}
|
||||
for section in extra_sections or []
|
||||
if isinstance(section, dict)
|
||||
and str(section.get("title") or "").strip()
|
||||
and str(section.get("content") or "").strip()
|
||||
]
|
||||
if normalized_sections:
|
||||
detail["extra_sections"] = normalized_sections
|
||||
return detail
|
||||
|
||||
|
||||
def _build_send_emoji_monitor_metadata(
|
||||
selection_metadata: Dict[str, Any],
|
||||
*,
|
||||
requested_emotion: str,
|
||||
send_result: Optional[Any] = None,
|
||||
error_message: str = "",
|
||||
) -> Dict[str, Any]:
|
||||
"""根据表情选择与发送结果构建统一监控 metadata。"""
|
||||
|
||||
raw_detail = selection_metadata.get("monitor_detail")
|
||||
detail = dict(raw_detail) if isinstance(raw_detail, dict) else {}
|
||||
extra_sections = list(detail.get("extra_sections", [])) if isinstance(detail.get("extra_sections"), list) else []
|
||||
|
||||
if send_result is not None:
|
||||
result_lines = [
|
||||
f"请求情绪:{requested_emotion or '未指定'}",
|
||||
f"命中情绪:{send_result.matched_emotion or '未命中'}",
|
||||
f"表情描述:{send_result.description or '无描述'}",
|
||||
f"情绪标签:{'、'.join(send_result.emotions) if send_result.emotions else '无'}",
|
||||
f"发送结果:{send_result.message or ('成功' if send_result.success else '失败')}",
|
||||
]
|
||||
extra_sections.append({
|
||||
"title": "表情发送结果",
|
||||
"content": "\n".join(result_lines),
|
||||
})
|
||||
elif error_message.strip():
|
||||
extra_sections.append({
|
||||
"title": "表情发送结果",
|
||||
"content": (
|
||||
f"请求情绪:{requested_emotion or '未指定'}\n"
|
||||
f"发送结果:{error_message.strip()}"
|
||||
),
|
||||
})
|
||||
|
||||
if extra_sections:
|
||||
detail["extra_sections"] = extra_sections
|
||||
|
||||
if detail:
|
||||
return {"monitor_detail": detail}
|
||||
return {}
|
||||
|
||||
|
||||
async def _select_emoji_with_sub_agent(
|
||||
tool_ctx: BuiltinToolRuntimeContext,
|
||||
requested_emotion: str,
|
||||
reasoning: str,
|
||||
context_texts: list[str],
|
||||
sample_size: int,
|
||||
selection_metadata: Optional[Dict[str, str]] = None,
|
||||
selection_metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> tuple[MaiEmoji | None, str]:
|
||||
"""通过临时子代理从候选表情包中选出一个结果。"""
|
||||
|
||||
@@ -255,7 +387,15 @@ async def _select_emoji_with_sub_agent(
|
||||
remaining_uses_value=1,
|
||||
display_prefix="[表情包选择任务]",
|
||||
)
|
||||
prompt_preview = _build_send_emoji_prompt_preview(
|
||||
system_prompt=system_prompt,
|
||||
requested_emotion=requested_emotion,
|
||||
grid_rows=grid_rows,
|
||||
grid_columns=grid_columns,
|
||||
sampled_emojis=sampled_emojis,
|
||||
)
|
||||
|
||||
selection_started_at = datetime.now()
|
||||
response = await tool_ctx.runtime.run_sub_agent(
|
||||
context_message_limit=_EMOJI_SUB_AGENT_CONTEXT_LIMIT,
|
||||
system_prompt=system_prompt,
|
||||
@@ -266,16 +406,40 @@ async def _select_emoji_with_sub_agent(
|
||||
schema=EmojiSelectionResult,
|
||||
),
|
||||
)
|
||||
selection_duration_ms = round((datetime.now() - selection_started_at).total_seconds() * 1000, 2)
|
||||
|
||||
selection_metrics: Dict[str, Any] = {
|
||||
"prompt_tokens": response.prompt_tokens,
|
||||
"completion_tokens": response.completion_tokens,
|
||||
"total_tokens": response.total_tokens,
|
||||
"overall_ms": selection_duration_ms,
|
||||
}
|
||||
|
||||
try:
|
||||
selection = EmojiSelectionResult.model_validate_json(response.content or "")
|
||||
except Exception as exc:
|
||||
logger.warning(f"{tool_ctx.runtime.log_prefix} 表情包子代理结果解析失败,将回退到候选首项: {exc}")
|
||||
if selection_metadata is not None:
|
||||
selection_metadata["monitor_detail"] = _build_send_emoji_monitor_detail(
|
||||
prompt_text=prompt_preview,
|
||||
output_text=response.content or "",
|
||||
metrics=selection_metrics,
|
||||
extra_sections=[{
|
||||
"title": "解析异常",
|
||||
"content": str(exc),
|
||||
}],
|
||||
)
|
||||
fallback_emoji = sampled_emojis[0] if sampled_emojis else None
|
||||
return fallback_emoji, ""
|
||||
|
||||
if selection_metadata is not None:
|
||||
selection_metadata["reason"] = selection.reason.strip()
|
||||
selection_metadata["monitor_detail"] = _build_send_emoji_monitor_detail(
|
||||
prompt_text=prompt_preview,
|
||||
reasoning_text=selection.reason,
|
||||
output_text=response.content or "",
|
||||
metrics=selection_metrics,
|
||||
)
|
||||
|
||||
emoji_index = int(selection.emoji_index)
|
||||
if emoji_index < 1 or emoji_index > len(sampled_emojis):
|
||||
@@ -310,7 +474,7 @@ async def handle_tool(
|
||||
"matched_emotion": "",
|
||||
"reason": "",
|
||||
}
|
||||
selection_metadata: Dict[str, str] = {"reason": ""}
|
||||
selection_metadata: Dict[str, Any] = {"reason": "", "monitor_detail": {}}
|
||||
|
||||
logger.info(f"{tool_ctx.runtime.log_prefix} 触发表情包发送工具,请求情绪={emotion!r}")
|
||||
|
||||
@@ -336,6 +500,11 @@ async def handle_tool(
|
||||
invocation.tool_name,
|
||||
structured_result["message"],
|
||||
structured_content=structured_result,
|
||||
metadata=_build_send_emoji_monitor_metadata(
|
||||
selection_metadata,
|
||||
requested_emotion=emotion,
|
||||
error_message=structured_result["message"],
|
||||
),
|
||||
)
|
||||
|
||||
if send_result.success:
|
||||
@@ -358,6 +527,11 @@ async def handle_tool(
|
||||
invocation.tool_name,
|
||||
selection_metadata["reason"] or _EMOJI_SUCCESS_MESSAGE,
|
||||
structured_content=structured_result,
|
||||
metadata=_build_send_emoji_monitor_metadata(
|
||||
selection_metadata,
|
||||
requested_emotion=emotion,
|
||||
send_result=send_result,
|
||||
),
|
||||
)
|
||||
|
||||
structured_result["description"] = send_result.description
|
||||
@@ -373,4 +547,9 @@ async def handle_tool(
|
||||
invocation.tool_name,
|
||||
structured_result["message"],
|
||||
structured_content=structured_result,
|
||||
metadata=_build_send_emoji_monitor_metadata(
|
||||
selection_metadata,
|
||||
requested_emotion=emotion,
|
||||
send_result=send_result,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -935,11 +935,3 @@ class MaisakaChatLoopService:
|
||||
|
||||
return filtered_history, hidden_assistant_count
|
||||
|
||||
@staticmethod
|
||||
def _drop_orphan_tool_results(
|
||||
selected_history: List[LLMContextMessage],
|
||||
) -> List[LLMContextMessage]:
|
||||
"""移除窗口中缺少对应 tool_call 的工具结果消息。"""
|
||||
|
||||
normalized_history, _ = drop_orphan_tool_results(selected_history)
|
||||
return normalized_history
|
||||
|
||||
@@ -6,6 +6,7 @@ from typing import Any
|
||||
_REQUEST_PANEL_STYLE_MAP: dict[str, tuple[str, str]] = {
|
||||
"timing_gate": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u5927\u6a21\u578b\u8bf7\u6c42 - Timing Gate \u5b50\u4ee3\u7406", "bright_magenta"),
|
||||
"replyer": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u56de\u590d\u5668 Prompt", "bright_yellow"),
|
||||
"emotion": ("MaiSaka Emotion Tool Prompt", "bright_cyan"),
|
||||
"sub_agent": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u5927\u6a21\u578b\u8bf7\u6c42 - \u5b50\u4ee3\u7406", "bright_blue"),
|
||||
}
|
||||
|
||||
|
||||
@@ -431,6 +431,7 @@ class MaisakaReasoningEngine:
|
||||
planner_response=response.content or "",
|
||||
tool_calls=response.tool_calls,
|
||||
tool_results=tool_result_summaries,
|
||||
tool_detail_results=tool_monitor_results,
|
||||
prompt_section=response.prompt_section,
|
||||
)
|
||||
if should_pause:
|
||||
@@ -683,14 +684,6 @@ class MaisakaReasoningEngine:
|
||||
self._runtime._chat_history = trimmed_history
|
||||
self._runtime._log_history_trimmed(removed_count, conversation_message_count)
|
||||
|
||||
@staticmethod
|
||||
def _drop_leading_orphan_tool_results(
|
||||
chat_history: list[LLMContextMessage],
|
||||
) -> tuple[list[LLMContextMessage], int]:
|
||||
"""清理历史窗口中缺少对应 assistant tool_call 的工具结果消息。"""
|
||||
|
||||
return drop_leading_orphan_tool_results(chat_history)
|
||||
|
||||
@staticmethod
|
||||
def _calculate_similarity(text1: str, text2: str) -> float:
|
||||
"""计算两个文本之间的相似度。
|
||||
@@ -1083,7 +1076,8 @@ class MaisakaReasoningEngine:
|
||||
anchor_message: 当前轮的锚点消息。
|
||||
|
||||
Returns:
|
||||
tuple[bool, list[str]]: 是否需要暂停当前思考循环,以及工具结果摘要列表。
|
||||
tuple[bool, list[str], list[dict[str, Any]]]: 是否需要暂停当前思考循环、
|
||||
工具结果摘要列表,以及最终监控事件使用的工具详情列表。
|
||||
"""
|
||||
|
||||
tool_result_summaries: list[str] = []
|
||||
@@ -1126,8 +1120,6 @@ class MaisakaReasoningEngine:
|
||||
self._build_tool_monitor_result(tool_call, invocation, result, tool_duration_ms)
|
||||
)
|
||||
|
||||
# 向监控前端广播工具执行结果
|
||||
|
||||
if not result.success and tool_call.func_name == "reply":
|
||||
logger.warning(f"{self._runtime.log_prefix} 回复工具未生成可见消息,将继续下一轮循环")
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import time
|
||||
|
||||
from rich.console import Group, RenderableType
|
||||
from rich.panel import Panel
|
||||
from rich.pretty import Pretty
|
||||
from rich.text import Text
|
||||
|
||||
from src.cli.console import console
|
||||
@@ -31,6 +32,7 @@ from src.plugin_runtime.tool_provider import PluginToolProvider
|
||||
from .chat_loop_service import ChatResponse, MaisakaChatLoopService
|
||||
from .context_messages import LLMContextMessage
|
||||
from .display_utils import build_tool_call_summary_lines, format_token_count
|
||||
from .prompt_cli_renderer import PromptCLIVisualizer
|
||||
from .reasoning_engine import MaisakaReasoningEngine
|
||||
from .tool_provider import MaisakaBuiltinToolProvider
|
||||
|
||||
@@ -568,9 +570,10 @@ class MaisakaHeartFlowChatting:
|
||||
planner_response: str = "",
|
||||
tool_calls: Optional[list[Any]] = None,
|
||||
tool_results: Optional[list[str]] = None,
|
||||
tool_detail_results: Optional[list[dict[str, Any]]] = None,
|
||||
prompt_section: Optional[RenderableType] = None,
|
||||
) -> None:
|
||||
"""在终端展示当前聊天流的上下文占用、规划结果与工具摘要。"""
|
||||
"""在终端展示当前聊天流的上下文占用、规划结果与工具结果。"""
|
||||
if not global_config.debug.show_maisaka_thinking:
|
||||
return
|
||||
|
||||
@@ -605,7 +608,10 @@ class MaisakaHeartFlowChatting:
|
||||
)
|
||||
)
|
||||
|
||||
normalized_tool_results = [result.strip() for result in tool_results or [] if isinstance(result, str) and result.strip()]
|
||||
normalized_tool_results = self._filter_redundant_tool_results(
|
||||
tool_results=tool_results or [],
|
||||
tool_detail_results=tool_detail_results or [],
|
||||
)
|
||||
if normalized_tool_results:
|
||||
renderables.append(
|
||||
Panel(
|
||||
@@ -616,6 +622,10 @@ class MaisakaHeartFlowChatting:
|
||||
)
|
||||
)
|
||||
|
||||
detail_panels = self._build_tool_detail_panels(tool_detail_results or [])
|
||||
if detail_panels:
|
||||
renderables.extend(detail_panels)
|
||||
|
||||
console.print(
|
||||
Panel(
|
||||
Group(*renderables),
|
||||
@@ -625,6 +635,231 @@ class MaisakaHeartFlowChatting:
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _filter_redundant_tool_results(
|
||||
*,
|
||||
tool_results: list[str],
|
||||
tool_detail_results: list[dict[str, Any]],
|
||||
) -> list[str]:
|
||||
"""过滤掉已经在详情卡片中展示过的工具摘要。"""
|
||||
|
||||
detailed_summaries = {
|
||||
str(tool_result.get("summary") or "").strip()
|
||||
for tool_result in tool_detail_results
|
||||
if isinstance(tool_result.get("detail"), dict) and tool_result.get("detail")
|
||||
}
|
||||
return [
|
||||
result.strip()
|
||||
for result in tool_results
|
||||
if isinstance(result, str)
|
||||
and result.strip()
|
||||
and result.strip() not in detailed_summaries
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _build_tool_metrics_text(metrics: dict[str, Any]) -> str:
|
||||
"""将工具监控 metrics 转换为便于 CLI 阅读的文本。"""
|
||||
|
||||
lines: list[str] = []
|
||||
model_name = str(metrics.get("model_name") or "").strip()
|
||||
if model_name:
|
||||
lines.append(f"模型:{model_name}")
|
||||
|
||||
prompt_tokens = metrics.get("prompt_tokens")
|
||||
completion_tokens = metrics.get("completion_tokens")
|
||||
total_tokens = metrics.get("total_tokens")
|
||||
if isinstance(prompt_tokens, int) or isinstance(completion_tokens, int) or isinstance(total_tokens, int):
|
||||
lines.append(
|
||||
"Token:"
|
||||
f"输入 {format_token_count(int(prompt_tokens or 0))} / "
|
||||
f"输出 {format_token_count(int(completion_tokens or 0))} / "
|
||||
f"总计 {format_token_count(int(total_tokens or 0))}"
|
||||
)
|
||||
|
||||
prompt_ms = metrics.get("prompt_ms")
|
||||
llm_ms = metrics.get("llm_ms")
|
||||
overall_ms = metrics.get("overall_ms")
|
||||
timing_parts: list[str] = []
|
||||
if isinstance(prompt_ms, (int, float)):
|
||||
timing_parts.append(f"prompt {round(float(prompt_ms), 2)} ms")
|
||||
if isinstance(llm_ms, (int, float)):
|
||||
timing_parts.append(f"llm {round(float(llm_ms), 2)} ms")
|
||||
if isinstance(overall_ms, (int, float)):
|
||||
timing_parts.append(f"overall {round(float(overall_ms), 2)} ms")
|
||||
if timing_parts:
|
||||
lines.append("耗时:" + " / ".join(timing_parts))
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
@staticmethod
|
||||
def _get_tool_detail_labels(tool_name: str) -> dict[str, str]:
|
||||
"""返回不同工具对应的详情区标题与预览类别。"""
|
||||
|
||||
normalized_tool_name = str(tool_name or "").strip().lower()
|
||||
if normalized_tool_name == "reply":
|
||||
return {
|
||||
"prompt_title": "Reply Prompt",
|
||||
"reasoning_title": "Reply 思考",
|
||||
"output_title": "Reply 输出",
|
||||
"prompt_category": "replyer",
|
||||
"request_kind": "replyer",
|
||||
}
|
||||
if normalized_tool_name == "send_emoji":
|
||||
return {
|
||||
"prompt_title": "Emotion Prompt",
|
||||
"reasoning_title": "Emotion 思考",
|
||||
"output_title": "Emotion 输出",
|
||||
"prompt_category": "emotion",
|
||||
"request_kind": "emotion",
|
||||
}
|
||||
display_name = normalized_tool_name or "tool"
|
||||
return {
|
||||
"prompt_title": f"{display_name} Prompt",
|
||||
"reasoning_title": f"{display_name} 思考",
|
||||
"output_title": f"{display_name} 输出",
|
||||
"prompt_category": display_name,
|
||||
"request_kind": "sub_agent",
|
||||
}
|
||||
|
||||
def _build_tool_prompt_access_panel(
|
||||
self,
|
||||
*,
|
||||
tool_name: str,
|
||||
prompt_text: str,
|
||||
tool_call_id: str,
|
||||
) -> Panel:
|
||||
"""将工具 prompt 渲染为可点击查看的预览入口。"""
|
||||
|
||||
labels = self._get_tool_detail_labels(tool_name)
|
||||
subtitle = f"会话ID: {self.session_id}"
|
||||
if tool_call_id:
|
||||
subtitle += f"\n调用ID: {tool_call_id}"
|
||||
|
||||
return Panel(
|
||||
PromptCLIVisualizer.build_text_access_panel(
|
||||
prompt_text,
|
||||
category=labels["prompt_category"],
|
||||
chat_id=self.session_id,
|
||||
request_kind=labels["request_kind"],
|
||||
subtitle=subtitle,
|
||||
),
|
||||
title=labels["prompt_title"],
|
||||
border_style="bright_yellow",
|
||||
padding=(0, 1),
|
||||
)
|
||||
|
||||
def _build_tool_detail_panels(self, tool_detail_results: list[dict[str, Any]]) -> list[RenderableType]:
|
||||
"""将 tool monitor detail 渲染为 CLI 详情卡片。"""
|
||||
|
||||
panels: list[RenderableType] = []
|
||||
for tool_result in tool_detail_results:
|
||||
detail = tool_result.get("detail")
|
||||
if not isinstance(detail, dict) or not detail:
|
||||
continue
|
||||
|
||||
tool_name = str(tool_result.get("tool_name") or "unknown").strip() or "unknown"
|
||||
detail_labels = self._get_tool_detail_labels(tool_name)
|
||||
tool_call_id = str(tool_result.get("tool_call_id") or "").strip()
|
||||
tool_args = tool_result.get("tool_args")
|
||||
summary = str(tool_result.get("summary") or "").strip()
|
||||
duration_ms = tool_result.get("duration_ms")
|
||||
|
||||
parts: list[RenderableType] = []
|
||||
header_lines: list[str] = []
|
||||
if summary:
|
||||
header_lines.append(summary)
|
||||
if tool_call_id:
|
||||
header_lines.append(f"调用ID:{tool_call_id}")
|
||||
if isinstance(duration_ms, (int, float)):
|
||||
header_lines.append(f"执行耗时:{round(float(duration_ms), 2)} ms")
|
||||
if header_lines:
|
||||
parts.append(Text("\n".join(header_lines)))
|
||||
|
||||
if isinstance(tool_args, dict) and tool_args:
|
||||
parts.append(
|
||||
Panel(
|
||||
Pretty(tool_args, expand_all=True),
|
||||
title="工具参数",
|
||||
border_style="cyan",
|
||||
padding=(0, 1),
|
||||
)
|
||||
)
|
||||
|
||||
metrics = detail.get("metrics")
|
||||
if isinstance(metrics, dict):
|
||||
metrics_text = self._build_tool_metrics_text(metrics)
|
||||
if metrics_text:
|
||||
parts.append(
|
||||
Panel(
|
||||
Text(metrics_text),
|
||||
title="执行指标",
|
||||
border_style="bright_cyan",
|
||||
padding=(0, 1),
|
||||
)
|
||||
)
|
||||
|
||||
prompt_text = str(detail.get("prompt_text") or "").strip()
|
||||
if prompt_text:
|
||||
parts.append(
|
||||
self._build_tool_prompt_access_panel(
|
||||
tool_name=tool_name,
|
||||
prompt_text=prompt_text,
|
||||
tool_call_id=tool_call_id,
|
||||
)
|
||||
)
|
||||
|
||||
reasoning_text = str(detail.get("reasoning_text") or "").strip()
|
||||
if reasoning_text:
|
||||
parts.append(
|
||||
Panel(
|
||||
Text(reasoning_text),
|
||||
title=detail_labels["reasoning_title"],
|
||||
border_style="magenta",
|
||||
padding=(0, 1),
|
||||
)
|
||||
)
|
||||
|
||||
output_text = str(detail.get("output_text") or "").strip()
|
||||
if output_text:
|
||||
parts.append(
|
||||
Panel(
|
||||
Text(output_text),
|
||||
title=detail_labels["output_title"],
|
||||
border_style="green",
|
||||
padding=(0, 1),
|
||||
)
|
||||
)
|
||||
|
||||
extra_sections = detail.get("extra_sections")
|
||||
if isinstance(extra_sections, list):
|
||||
for section in extra_sections:
|
||||
if not isinstance(section, dict):
|
||||
continue
|
||||
section_title = str(section.get("title") or "").strip() or "附加信息"
|
||||
section_content = str(section.get("content") or "").strip()
|
||||
if not section_content:
|
||||
continue
|
||||
parts.append(
|
||||
Panel(
|
||||
Text(section_content),
|
||||
title=section_title,
|
||||
border_style="white",
|
||||
padding=(0, 1),
|
||||
)
|
||||
)
|
||||
|
||||
if parts:
|
||||
panels.append(
|
||||
Panel(
|
||||
Group(*parts),
|
||||
title=f"{tool_name} 工具详情",
|
||||
border_style="yellow",
|
||||
padding=(0, 1),
|
||||
)
|
||||
)
|
||||
|
||||
return panels
|
||||
|
||||
def _log_cycle_started(self, cycle_detail: CycleDetail, round_index: int) -> None:
|
||||
logger.info(
|
||||
f"{self.log_prefix} MaiSaka 轮次开始: 循环编号={cycle_detail.cycle_id} "
|
||||
|
||||
@@ -10,7 +10,6 @@ from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
|
||||
|
||||
from rich.traceback import install
|
||||
|
||||
from src.chat.logger.plan_reply_logger import PlanReplyLogger
|
||||
from src.chat.message_receive.chat_manager import BotChatSession
|
||||
from src.chat.replyer.group_generator import DefaultReplyer
|
||||
from src.chat.replyer.private_generator import PrivateReplyer
|
||||
@@ -155,21 +154,6 @@ async def generate_reply(
|
||||
f"[GeneratorService] 回复生成成功,生成了 {len(reply_set.components) if reply_set else 0} 个回复项"
|
||||
)
|
||||
|
||||
try:
|
||||
PlanReplyLogger.log_reply(
|
||||
chat_id=chat_stream.session_id if chat_stream else (chat_id or ""),
|
||||
prompt=llm_response.prompt or "",
|
||||
output=llm_response.content,
|
||||
processed_output=llm_response.processed_output,
|
||||
model=llm_response.model,
|
||||
timing=llm_response.timing,
|
||||
reasoning=llm_response.reasoning,
|
||||
think_level=think_level,
|
||||
success=True,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("[GeneratorService] 记录reply日志失败")
|
||||
|
||||
return success, llm_response
|
||||
|
||||
except ValueError as ve:
|
||||
|
||||
Reference in New Issue
Block a user