feat:拆分maisak一些混杂结构

This commit is contained in:
SengokuCola
2026-04-05 17:44:28 +08:00
parent 499abe89aa
commit 80be746be0
8 changed files with 248 additions and 221 deletions

View File

@@ -1,11 +1,10 @@
你的任务是分析当前聊天节奏,并只决定 {bot_name} 下一步应当继续、等待,还是暂停本轮发言。
你不是回复生成器,也不是信息搜集器;你只负责做节奏控制判断。
你的任务是分析当前聊天节奏,并只决定 {bot_name} 下一步应当继续、等待,还是暂停本轮发言。你只负责做节奏控制判断。
【参考信息】
{bot_name} 的人设:{identity}
【参考信息结束】
你需要根据提供的参考信息、当前场景和输出规则来进行节奏判断。
你需要根据提供的参考信息、当前场景和输出规则来进行节奏判断。你必须先思考再输出json格式的tool
在当前场景中,不同的人正在互动({bot_name} 也是一位参与的用户),用户也可能正在连续发送消息或彼此互动。
你的任务不是生成对别人可见的发言,也不是直接使用查询类工具,而是判断当前是否应该:
- continue立刻进入下一轮完整思考、搜集信息、回复与其他工具执行
@@ -22,4 +21,4 @@
{group_chat_attention_block}
现在,请先输出你对当前聊天节奏的简短分析,然后调用一个工具:
现在,请先输出你对当前聊天节奏的文本简短分析,然后调用一个工具:

View File

@@ -12,7 +12,8 @@ from src.config.config import global_config
from src.core.tooling import ToolExecutionResult
from ..context_messages import SessionBackedMessage
from ..message_adapter import build_visible_text_from_sequence, clone_message_sequence, format_speaker_content
from ..history_utils import build_prefixed_message_sequence, build_session_message_visible_text
from ..message_adapter import format_speaker_content
from ..planner_message_utils import build_planner_prefix, build_session_backed_text_message
if TYPE_CHECKING:
@@ -142,21 +143,7 @@ class BuiltinToolRuntimeContext:
def _build_visible_text_from_sent_message(message: "SessionMessage") -> str:
"""将已发送消息转换为 Maisaka 可见文本。"""
user_info = message.message_info.user_info
speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id
visible_message_id = None if message.is_notify else message.message_id
legacy_sequence = MessageSequence([])
legacy_sequence.text(
format_speaker_content(
speaker_name,
"",
message.timestamp,
visible_message_id,
)
)
for component in clone_message_sequence(message.raw_message).components:
legacy_sequence.components.append(component)
return build_visible_text_from_sequence(legacy_sequence).strip()
return build_session_message_visible_text(message)
def append_sent_message_to_chat_history(
self,
@@ -175,15 +162,9 @@ class BuiltinToolRuntimeContext:
message_id=message.message_id,
include_message_id=not message.is_notify and bool(message.message_id),
)
planner_components = clone_message_sequence(message.raw_message).components
if planner_components and isinstance(planner_components[0], TextComponent):
planner_components[0].text = f"{planner_prefix}{planner_components[0].text}"
else:
planner_components.insert(0, TextComponent(planner_prefix))
history_message = SessionBackedMessage.from_session_message(
message,
raw_message=MessageSequence(planner_components),
raw_message=build_prefixed_message_sequence(message.raw_message, planner_prefix),
visible_text=self._build_visible_text_from_sent_message(message),
source_kind=source_kind,
)

View File

@@ -35,7 +35,8 @@ from src.plugin_runtime.host.hook_spec_registry import HookSpec, HookSpecRegistr
from src.services.llm_service import LLMServiceClient
from .builtin_tool import get_builtin_tools
from .context_messages import AssistantMessage, LLMContextMessage, ToolResultMessage
from .context_messages import AssistantMessage, LLMContextMessage
from .history_utils import drop_leading_orphan_tool_results
from .prompt_cli_renderer import PromptCLIVisualizer
@@ -881,7 +882,7 @@ class MaisakaChatLoopService:
selected_indices.reverse()
selected_history = [chat_history[index] for index in selected_indices]
selected_history, hidden_assistant_count = MaisakaChatLoopService._hide_early_assistant_messages(selected_history)
selected_history = MaisakaChatLoopService._drop_leading_orphan_tool_results(selected_history)
selected_history, _ = drop_leading_orphan_tool_results(selected_history)
selection_reason = (
f"上下文裁剪:最近 {effective_context_size} 条 user/assistant 消息,"
f"实际发送 {len(selected_history)}"
@@ -925,7 +926,7 @@ class MaisakaChatLoopService:
selected_indices.reverse()
selected_history = [chat_history[index] for index in selected_indices]
selected_history, hidden_assistant_count = MaisakaChatLoopService._hide_early_assistant_messages(selected_history)
selected_history = MaisakaChatLoopService._drop_leading_orphan_tool_results(selected_history)
selected_history, _ = drop_leading_orphan_tool_results(selected_history)
return (
selected_history,
(
@@ -975,26 +976,5 @@ class MaisakaChatLoopService:
) -> List[LLMContextMessage]:
"""移除窗口前缀中缺少对应 tool_call 的工具结果消息。"""
if not selected_history:
return selected_history
available_tool_call_ids = {
tool_call.call_id
for message in selected_history
if isinstance(message, AssistantMessage)
for tool_call in message.tool_calls
if tool_call.call_id
}
first_valid_index = 0
while first_valid_index < len(selected_history):
message = selected_history[first_valid_index]
if not isinstance(message, ToolResultMessage):
break
if message.tool_call_id in available_tool_call_ids:
break
first_valid_index += 1
if first_valid_index == 0:
return selected_history
return selected_history[first_valid_index:]
normalized_history, _ = drop_leading_orphan_tool_results(selected_history)
return normalized_history

View File

@@ -0,0 +1,89 @@
"""Maisaka 展示辅助工具。"""
from typing import Any
_REQUEST_PANEL_STYLE_MAP: dict[str, tuple[str, str]] = {
"timing_gate": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u5927\u6a21\u578b\u8bf7\u6c42 - Timing Gate \u5b50\u4ee3\u7406", "bright_magenta"),
"replyer": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u56de\u590d\u5668 Prompt", "bright_yellow"),
"sub_agent": ("\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u5927\u6a21\u578b\u8bf7\u6c42 - \u5b50\u4ee3\u7406", "bright_blue"),
}
_DEFAULT_REQUEST_PANEL_STYLE: tuple[str, str] = (
"\u004d\u0061\u0069\u0053\u0061\u006b\u0061 \u5927\u6a21\u578b\u8bf7\u6c42 - \u5bf9\u8bdd\u5355\u6b65",
"cyan",
)
_ROLE_BADGE_STYLE_MAP: dict[str, str] = {
"system": "bold white on blue",
"user": "bold black on green",
"assistant": "bold black on yellow",
"tool": "bold white on magenta",
}
_ROLE_BADGE_LABEL_MAP: dict[str, str] = {
"system": "\u7cfb\u7edf",
"user": "\u7528\u6237",
"assistant": "\u52a9\u624b",
"tool": "\u5de5\u5177",
}
def format_token_count(token_count: int) -> str:
"""格式化 token 数量展示文本。"""
if token_count >= 10_000:
return f"{token_count / 1000:.1f}k"
return str(token_count)
def get_request_panel_style(request_kind: str) -> tuple[str, str]:
"""返回不同请求类型对应的标题与边框颜色。"""
normalized_kind = str(request_kind or "planner").strip().lower()
return _REQUEST_PANEL_STYLE_MAP.get(normalized_kind, _DEFAULT_REQUEST_PANEL_STYLE)
def get_role_badge_style(role: str) -> str:
"""返回角色标签对应的 rich 样式。"""
return _ROLE_BADGE_STYLE_MAP.get(role, "bold white on bright_black")
def get_role_badge_label(role: str) -> str:
"""返回角色标签对应的展示文案。"""
return _ROLE_BADGE_LABEL_MAP.get(role, "\u672a\u77e5")
def format_tool_call_for_display(tool_call: Any) -> dict[str, Any]:
"""将不同来源的工具调用对象规范化为统一展示结构。"""
if isinstance(tool_call, dict):
function_info = tool_call.get("function", {})
return {
"id": tool_call.get("id"),
"name": function_info.get("name", tool_call.get("name")),
"arguments": function_info.get("arguments", tool_call.get("arguments")),
}
return {
"id": getattr(tool_call, "call_id", getattr(tool_call, "id", None)),
"name": getattr(tool_call, "func_name", getattr(tool_call, "name", None)),
"arguments": getattr(tool_call, "args", getattr(tool_call, "arguments", None)),
}
def build_tool_call_summary_lines(tool_calls: list[Any]) -> list[str]:
"""构建工具调用摘要文本。"""
summary_lines: list[str] = []
for tool_call in tool_calls:
normalized_tool_call = format_tool_call_for_display(tool_call)
tool_name = str(normalized_tool_call.get("name") or "").strip() or "unknown"
tool_args = normalized_tool_call.get("arguments")
if isinstance(tool_args, dict) and tool_args:
summary_lines.append(f"- {tool_name}: {tool_args}")
else:
summary_lines.append(f"- {tool_name}")
return summary_lines

View File

@@ -0,0 +1,80 @@
"""Maisaka 历史消息处理辅助工具。"""
from typing import TYPE_CHECKING
from src.common.data_models.message_component_data_model import MessageSequence, TextComponent
from .context_messages import AssistantMessage, LLMContextMessage, ToolResultMessage
from .message_adapter import build_visible_text_from_sequence, clone_message_sequence, format_speaker_content
if TYPE_CHECKING:
from src.chat.message_receive.message import SessionMessage
def build_prefixed_message_sequence(
source_sequence: MessageSequence,
planner_prefix: str,
) -> MessageSequence:
"""基于原始消息序列构造带规划器前缀的新序列。"""
planner_components = clone_message_sequence(source_sequence).components
if planner_components and isinstance(planner_components[0], TextComponent):
planner_components[0].text = f"{planner_prefix}{planner_components[0].text}"
else:
planner_components.insert(0, TextComponent(planner_prefix))
return MessageSequence(planner_components)
def build_session_message_visible_text(
message: "SessionMessage",
source_sequence: MessageSequence | None = None,
) -> str:
"""将真实会话消息转换为 Maisaka 可见文本。"""
normalized_sequence = source_sequence if source_sequence is not None else message.raw_message
user_info = message.message_info.user_info
speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id
visible_message_id = None if message.is_notify else message.message_id
visible_sequence = MessageSequence([])
visible_sequence.text(
format_speaker_content(
speaker_name,
"",
message.timestamp,
visible_message_id,
)
)
for component in clone_message_sequence(normalized_sequence).components:
visible_sequence.components.append(component)
return build_visible_text_from_sequence(visible_sequence).strip()
def drop_leading_orphan_tool_results(
chat_history: list[LLMContextMessage],
) -> tuple[list[LLMContextMessage], int]:
"""移除历史前缀中缺少对应 tool_call 的工具结果消息。"""
if not chat_history:
return chat_history, 0
available_tool_call_ids = {
tool_call.call_id
for message in chat_history
if isinstance(message, AssistantMessage)
for tool_call in message.tool_calls
if tool_call.call_id
}
first_valid_index = 0
while first_valid_index < len(chat_history):
message = chat_history[first_valid_index]
if not isinstance(message, ToolResultMessage):
break
if message.tool_call_id in available_tool_call_ids:
break
first_valid_index += 1
if first_valid_index == 0:
return chat_history, 0
return chat_history[first_valid_index:], first_valid_index

View File

@@ -20,6 +20,13 @@ from rich.panel import Panel
from rich.pretty import Pretty
from rich.text import Text
from .display_utils import (
format_token_count,
format_tool_call_for_display as normalize_tool_call_for_display,
get_request_panel_style as get_shared_request_panel_style,
get_role_badge_label as get_shared_role_badge_label,
get_role_badge_style as get_shared_role_badge_style,
)
from .prompt_preview_logger import PromptPreviewLogger
PROJECT_ROOT = Path(__file__).parent.parent.parent.absolute().resolve()
@@ -59,44 +66,19 @@ class PromptCLIVisualizer:
def get_request_panel_style(request_kind: str) -> tuple[str, str]:
"""返回不同请求类型对应的标题与边框颜色。"""
normalized_kind = str(request_kind or "planner").strip().lower()
if normalized_kind == "timing_gate":
return "MaiSaka 大模型请求 - Timing Gate 子代理", "bright_magenta"
if normalized_kind == "replyer":
return "MaiSaka 回复器 Prompt", "bright_yellow"
if normalized_kind == "sub_agent":
return "MaiSaka 大模型请求 - 子代理", "bright_blue"
return "MaiSaka 大模型请求 - 对话单步", "cyan"
return get_shared_request_panel_style(request_kind)
@staticmethod
def _get_role_badge_style(role: str) -> str:
if role == "system":
return "bold white on blue"
if role == "user":
return "bold black on green"
if role == "assistant":
return "bold black on yellow"
if role == "tool":
return "bold white on magenta"
return "bold white on bright_black"
return get_shared_role_badge_style(role)
@staticmethod
def _get_role_badge_label(role: str) -> str:
if role == "system":
return "系统"
if role == "user":
return "用户"
if role == "assistant":
return "助手"
if role == "tool":
return "工具"
return "未知"
return get_shared_role_badge_label(role)
@staticmethod
def _format_token_count(token_count: int) -> str:
if token_count >= 10_000:
return f"{token_count / 1000:.1f}k"
return str(token_count)
return format_token_count(token_count)
@classmethod
def build_prompt_stats_text(
@@ -258,19 +240,7 @@ class PromptCLIVisualizer:
@classmethod
def format_tool_call_for_display(cls, tool_call: Any) -> Dict[str, Any]:
if isinstance(tool_call, dict):
function_info = tool_call.get("function", {})
return {
"id": tool_call.get("id"),
"name": function_info.get("name", tool_call.get("name")),
"arguments": function_info.get("arguments", tool_call.get("arguments")),
}
return {
"id": getattr(tool_call, "call_id", getattr(tool_call, "id", None)),
"name": getattr(tool_call, "func_name", getattr(tool_call, "name", None)),
"arguments": getattr(tool_call, "args", getattr(tool_call, "arguments", None)),
}
return normalize_tool_call_for_display(tool_call)
@classmethod
def _render_tool_call_panel(cls, tool_call: Any, index: int, parent_index: int) -> Panel:

View File

@@ -11,8 +11,7 @@ import traceback
from src.chat.heart_flow.heartFC_utils import CycleDetail
from src.chat.message_receive.message import SessionMessage
from src.chat.utils.utils import process_llm_response
from src.common.data_models.message_component_data_model import EmojiComponent, ImageComponent, MessageSequence, TextComponent
from src.common.data_models.message_component_data_model import EmojiComponent, ImageComponent, MessageSequence
from src.common.logger import get_logger
from src.common.prompt_i18n import load_prompt
from src.config.config import global_config
@@ -27,18 +26,13 @@ from .builtin_tool import get_timing_tools
from .chat_history_visual_refresher import refresh_chat_history_visual_placeholders
from .builtin_tool.context import BuiltinToolRuntimeContext
from .context_messages import (
AssistantMessage,
ComplexSessionMessage,
LLMContextMessage,
SessionBackedMessage,
ToolResultMessage,
contains_complex_message,
)
from .message_adapter import (
build_visible_text_from_sequence,
clone_message_sequence,
format_speaker_content,
)
from .history_utils import build_prefixed_message_sequence, build_session_message_visible_text, drop_leading_orphan_tool_results
from .monitor_events import (
emit_cycle_end,
emit_cycle_start,
@@ -583,30 +577,9 @@ class MaisakaReasoningEngine:
*,
planner_prefix: str,
) -> MessageSequence:
message_sequence = MessageSequence([])
appended_component = False
source_sequence = message.raw_message
planner_components = clone_message_sequence(source_sequence).components
message_sequence = build_prefixed_message_sequence(message.raw_message, planner_prefix)
if global_config.chat.multimodal_planner:
await self._hydrate_visual_components(planner_components)
if planner_components and isinstance(planner_components[0], TextComponent):
planner_components[0].text = planner_prefix + planner_components[0].text
else:
planner_components.insert(0, TextComponent(planner_prefix))
for component in planner_components:
message_sequence.components.append(component)
appended_component = True
if not appended_component:
if not message.processed_plain_text:
await message.process()
content = (message.processed_plain_text or "").strip()
if content:
message_sequence.text(planner_prefix + content)
await self._hydrate_visual_components(message_sequence.components)
return message_sequence
async def _hydrate_visual_components(self, planner_components: list[object]) -> None:
@@ -640,14 +613,7 @@ class MaisakaReasoningEngine:
)
def _build_legacy_visible_text(self, message: SessionMessage, source_sequence: MessageSequence) -> str:
user_info = message.message_info.user_info
speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id
legacy_sequence = MessageSequence([])
visible_message_id = None if message.is_notify else message.message_id
legacy_sequence.text(format_speaker_content(speaker_name, "", message.timestamp, visible_message_id))
for component in clone_message_sequence(source_sequence).components:
legacy_sequence.components.append(component)
return build_visible_text_from_sequence(legacy_sequence).strip()
return build_session_message_visible_text(message, source_sequence)
def _insert_chat_history_message(self, message: LLMContextMessage) -> int:
"""将消息按处理顺序追加到聊天历史末尾。"""
@@ -689,7 +655,7 @@ class MaisakaReasoningEngine:
if removed_message.count_in_context:
conversation_message_count -= 1
trimmed_history, pruned_orphan_count = self._drop_leading_orphan_tool_results(trimmed_history)
trimmed_history, pruned_orphan_count = drop_leading_orphan_tool_results(trimmed_history)
removed_count += pruned_orphan_count
self._runtime._chat_history = trimmed_history
@@ -701,29 +667,7 @@ class MaisakaReasoningEngine:
) -> tuple[list[LLMContextMessage], int]:
"""清理历史前缀中缺少对应 assistant tool_call 的工具结果消息。"""
if not chat_history:
return chat_history, 0
available_tool_call_ids = {
tool_call.call_id
for message in chat_history
if isinstance(message, AssistantMessage)
for tool_call in message.tool_calls
if tool_call.call_id
}
first_valid_index = 0
while first_valid_index < len(chat_history):
message = chat_history[first_valid_index]
if not isinstance(message, ToolResultMessage):
break
if message.tool_call_id in available_tool_call_ids:
break
first_valid_index += 1
if first_valid_index == 0:
return chat_history, 0
return chat_history[first_valid_index:], first_valid_index
return drop_leading_orphan_tool_results(chat_history)
@staticmethod
def _calculate_similarity(text1: str, text2: str) -> float:
@@ -764,15 +708,7 @@ class MaisakaReasoningEngine:
@staticmethod
def _post_process_reply_text(reply_text: str) -> list[str]:
"""沿用旧回复链的文本后处理,执行分段与错别字注入。"""
processed_segments: list[str] = []
for segment in process_llm_response(reply_text):
normalized_segment = segment.strip()
if normalized_segment:
processed_segments.append(normalized_segment)
if processed_segments:
return processed_segments
return [reply_text.strip()]
return BuiltinToolRuntimeContext.post_process_reply_text(reply_text)
def _build_tool_invocation(self, tool_call: ToolCall, latest_thought: str) -> ToolInvocation:
"""将模型输出的工具调用转换为统一调用对象。

View File

@@ -30,6 +30,7 @@ from src.plugin_runtime.tool_provider import PluginToolProvider
from .chat_loop_service import ChatResponse, MaisakaChatLoopService
from .context_messages import LLMContextMessage
from .display_utils import build_tool_call_summary_lines, format_token_count
from .reasoning_engine import MaisakaReasoningEngine
from .tool_provider import MaisakaBuiltinToolProvider
@@ -410,27 +411,49 @@ class MaisakaHeartFlowChatting:
if isinstance(knowledge_result, Exception):
logger.error(f"{self.log_prefix} 知识学习任务异常退出: {knowledge_result}")
async def _trigger_expression_learning(self, messages: list[SessionMessage]) -> None:
"""?????????????????"""
if not self._enable_expression_learning:
logger.debug(f"{self.log_prefix} ??????????????")
return
def _should_trigger_learning(
self,
*,
enabled: bool,
feature_name: str,
last_extraction_time: float,
pending_count: int,
min_messages_for_extraction: int,
) -> bool:
"""判断周期性学习任务是否满足执行条件。"""
elapsed = time.time() - self._last_expression_extraction_time
if not enabled:
logger.debug(f"{self.log_prefix} {feature_name}未启用,跳过本轮学习")
return False
elapsed = time.time() - last_extraction_time
if elapsed < self._min_extraction_interval:
logger.debug(
f"{self.log_prefix} ????????????: "
f"??={elapsed:.2f} ? ??={self._min_extraction_interval} ?"
f"{self.log_prefix} {feature_name}触发间隔不足: "
f"已过={elapsed:.2f} 秒 阈值={self._min_extraction_interval} "
)
return
return False
pending_count = self._expression_learner.get_pending_count(self.message_cache)
if pending_count < self._expression_learner.min_messages_for_extraction:
if pending_count < min_messages_for_extraction:
logger.debug(
f"{self.log_prefix} ??????????????: "
f"??????={pending_count} ??={self._expression_learner.min_messages_for_extraction} "
f"?????={len(self.message_cache)}"
f"{self.log_prefix} {feature_name}待处理消息不足: "
f"待处理={pending_count} 阈值={min_messages_for_extraction} "
f"缓存总量={len(self.message_cache)}"
)
return False
return True
async def _trigger_expression_learning(self, messages: list[SessionMessage]) -> None:
"""?????????????????"""
pending_count = self._expression_learner.get_pending_count(self.message_cache)
if not self._should_trigger_learning(
enabled=self._enable_expression_learning,
feature_name="表达学习",
last_extraction_time=self._last_expression_extraction_time,
pending_count=pending_count,
min_messages_for_extraction=self._expression_learner.min_messages_for_extraction,
):
return
self._last_expression_extraction_time = time.time()
@@ -453,25 +476,14 @@ class MaisakaHeartFlowChatting:
async def _trigger_knowledge_learning(self, messages: list[SessionMessage]) -> None:
"""?????????????????"""
if not global_config.maisaka.enable_knowledge_module:
logger.debug(f"{self.log_prefix} ??????????????")
return
elapsed = time.time() - self._last_knowledge_extraction_time
if elapsed < self._min_extraction_interval:
logger.debug(
f"{self.log_prefix} ????????????: "
f"??={elapsed:.2f} ? ??={self._min_extraction_interval} ?"
)
return
pending_count = self._knowledge_learner.get_pending_count(self.message_cache)
if pending_count < self._knowledge_learner.min_messages_for_extraction:
logger.debug(
f"{self.log_prefix} ??????????????: "
f"??????={pending_count} ??={self._knowledge_learner.min_messages_for_extraction} "
f"?????={len(self.message_cache)}"
)
if not self._should_trigger_learning(
enabled=global_config.maisaka.enable_knowledge_module,
feature_name="知识学习",
last_extraction_time=self._last_knowledge_extraction_time,
pending_count=pending_count,
min_messages_for_extraction=self._knowledge_learner.min_messages_for_extraction,
):
return
self._last_knowledge_extraction_time = time.time()
@@ -535,13 +547,6 @@ class MaisakaHeartFlowChatting:
return GroupInfo(group_id=group_info.group_id, group_name=group_info.group_name)
@staticmethod
def _format_token_count(token_count: int) -> str:
"""格式化 token 数量展示文本。"""
if token_count >= 10_000:
return f"{token_count / 1000:.1f}k"
return str(token_count)
def _render_context_usage_panel(
self,
*,
@@ -558,7 +563,7 @@ class MaisakaHeartFlowChatting:
body_lines = [
f"上下文占用:{selected_history_count}/{self._max_context_size}",
f"本次请求token消耗{self._format_token_count(prompt_tokens)}",
f"本次请求token消耗{format_token_count(prompt_tokens)}",
]
renderables: list[RenderableType] = [Text("\n".join(body_lines))]
@@ -576,7 +581,7 @@ class MaisakaHeartFlowChatting:
)
)
normalized_tool_calls = self._build_tool_call_summary_lines(tool_calls or [])
normalized_tool_calls = build_tool_call_summary_lines(tool_calls or [])
if normalized_tool_calls:
renderables.append(
Panel(
@@ -607,19 +612,6 @@ class MaisakaHeartFlowChatting:
)
)
@staticmethod
def _build_tool_call_summary_lines(tool_calls: list[Any]) -> list[str]:
"""构建工具调用摘要文本。"""
summary_lines: list[str] = []
for tool_call in tool_calls:
tool_name = str(getattr(tool_call, "func_name", getattr(tool_call, "name", "")) or "").strip() or "unknown"
tool_args = getattr(tool_call, "args", getattr(tool_call, "arguments", None))
if isinstance(tool_args, dict) and tool_args:
summary_lines.append(f"- {tool_name}: {tool_args}")
else:
summary_lines.append(f"- {tool_name}")
return summary_lines
def _log_cycle_started(self, cycle_detail: CycleDetail, round_index: int) -> None:
logger.info(
f"{self.log_prefix} MaiSaka 轮次开始: 循环编号={cycle_detail.cycle_id} "