Files
mai-bot/src/maisaka/reasoning_engine.py

756 lines
32 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""Maisaka 推理引擎。"""
from datetime import datetime
from typing import TYPE_CHECKING, Any, Optional
import asyncio
import difflib
import json
import time
import traceback
from src.chat.heart_flow.heartFC_utils import CycleDetail
from src.chat.message_receive.message import SessionMessage
from src.chat.utils.utils import process_llm_response
from src.common.data_models.message_component_data_model import EmojiComponent, ImageComponent, MessageSequence, TextComponent
from src.common.logger import get_logger
from src.config.config import global_config
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from src.llm_models.exceptions import ReqAbortException
from src.llm_models.payload_content.tool_option import ToolCall
from src.services import database_service as database_api
from .builtin_tool import build_builtin_tool_handlers as build_split_builtin_tool_handlers
from .builtin_tool.context import BuiltinToolRuntimeContext
from .context_messages import (
AssistantMessage,
LLMContextMessage,
SessionBackedMessage,
ToolResultMessage,
)
from .message_adapter import (
build_visible_text_from_sequence,
clone_message_sequence,
format_speaker_content,
)
from .planner_message_utils import build_planner_user_prefix_from_session_message
if TYPE_CHECKING:
from .runtime import MaisakaHeartFlowChatting
from .tool_provider import BuiltinToolHandler
logger = get_logger("maisaka_reasoning_engine")
class MaisakaReasoningEngine:
"""负责内部思考、推理与工具执行。"""
def __init__(self, runtime: "MaisakaHeartFlowChatting") -> None:
self._runtime = runtime
self._last_reasoning_content: str = ""
@property
def last_reasoning_content(self) -> str:
"""返回最近一轮思考文本。"""
return self._last_reasoning_content
def build_builtin_tool_handlers(self) -> dict[str, "BuiltinToolHandler"]:
"""构造 Maisaka 内置工具处理器映射。
Returns:
dict[str, BuiltinToolHandler]: 工具名到处理器的映射。
"""
return build_split_builtin_tool_handlers(BuiltinToolRuntimeContext(self, self._runtime))
async def run_loop(self) -> None:
"""独立消费消息批次,并执行对应的内部思考轮次。"""
try:
while self._runtime._running:
cached_messages = await self._runtime._internal_turn_queue.get()
timeout_triggered = cached_messages is None
if not timeout_triggered and not cached_messages:
self._runtime._internal_turn_queue.task_done()
continue
self._runtime._agent_state = self._runtime._STATE_RUNNING
if cached_messages:
self._clear_pending_wait_tool_call_id()
await self._ingest_messages(cached_messages)
anchor_message = cached_messages[-1]
else:
anchor_message = self._get_timeout_anchor_message()
if anchor_message is None:
logger.warning(
f"{self._runtime.log_prefix} 等待超时后缺少可复用的锚点消息,跳过本轮继续思考"
)
self._runtime._internal_turn_queue.task_done()
continue
logger.info(f"{self._runtime.log_prefix} 等待超时后开始新一轮思考")
self._clear_pending_wait_tool_call_id()
self._trim_chat_history()
try:
for round_index in range(self._runtime._max_internal_rounds):
cycle_detail = self._start_cycle()
self._runtime._log_cycle_started(cycle_detail, round_index)
planner_started_at = time.time()
try:
logger.info(
f"{self._runtime.log_prefix} 规划器开始执行: "
f"回合={round_index + 1} "
f"历史消息数={len(self._runtime._chat_history)} "
f"开始时间={planner_started_at:.3f}"
)
interrupt_flag = asyncio.Event()
self._runtime._planner_interrupt_flag = interrupt_flag
self._runtime._chat_loop_service.set_interrupt_flag(interrupt_flag)
try:
response = await self._runtime._chat_loop_service.chat_loop_step(self._runtime._chat_history)
finally:
if self._runtime._planner_interrupt_flag is interrupt_flag:
self._runtime._planner_interrupt_flag = None
self._runtime._chat_loop_service.set_interrupt_flag(None)
cycle_detail.time_records["planner"] = time.time() - planner_started_at
logger.info(
f"{self._runtime.log_prefix} 规划器执行完成: "
f"回合={round_index + 1} "
f"耗时={cycle_detail.time_records['planner']:.3f}"
)
reasoning_content = response.content or ""
if self._should_replace_reasoning(reasoning_content):
response.content = "让我根据新情况重新思考:"
response.raw_message.content = "让我根据新情况重新思考:"
logger.info(f"{self._runtime.log_prefix} 当前思考与上一轮过于相似,已替换为重新思考提示")
self._last_reasoning_content = reasoning_content
self._runtime._render_context_usage_panel(
selected_history_count=response.selected_history_count,
prompt_tokens=response.prompt_tokens,
)
self._runtime._chat_history.append(response.raw_message)
if response.tool_calls:
tool_started_at = time.time()
should_pause = await self._handle_tool_calls(
response.tool_calls,
response.content or "",
anchor_message,
)
cycle_detail.time_records["tool_calls"] = time.time() - tool_started_at
if should_pause:
break
continue
if response.content:
continue
break
except ReqAbortException:
interrupted_at = time.time()
logger.info(
f"{self._runtime.log_prefix} 规划器打断成功: "
f"回合={round_index + 1} "
f"开始时间={planner_started_at:.3f} "
f"打断时间={interrupted_at:.3f} "
f"耗时={interrupted_at - planner_started_at:.3f}"
)
break
finally:
self._end_cycle(cycle_detail)
finally:
if self._runtime._agent_state == self._runtime._STATE_RUNNING:
self._runtime._agent_state = self._runtime._STATE_STOP
self._runtime._internal_turn_queue.task_done()
except asyncio.CancelledError:
self._runtime._log_internal_loop_cancelled()
raise
except Exception:
logger.exception(f"{self._runtime.log_prefix} Maisaka 内部循环发生异常")
logger.error(traceback.format_exc())
raise
def _get_timeout_anchor_message(self) -> Optional[SessionMessage]:
"""在 wait 超时后复用最近一条真实用户消息作为锚点。"""
if self._runtime.message_cache:
return self._runtime.message_cache[-1]
return None
def _clear_pending_wait_tool_call_id(self) -> None:
"""清理等待状态残留的 wait 工具调用编号。"""
self._runtime._pending_wait_tool_call_id = None
def _build_wait_timeout_message(self) -> ToolResultMessage:
"""构造 wait 超时后的工具结果消息。"""
tool_call_id = self._runtime._pending_wait_tool_call_id or "wait_timeout"
self._runtime._pending_wait_tool_call_id = None
return ToolResultMessage(
content="等待已超时,期间没有收到新的用户输入。请基于现有上下文继续下一轮思考。",
timestamp=datetime.now(),
tool_call_id=tool_call_id,
tool_name="wait",
)
def _append_wait_interrupted_message_if_needed(self) -> None:
"""如果 wait 被新消息打断,则补一条对应的工具结果消息。"""
tool_call_id = self._runtime._pending_wait_tool_call_id
if not tool_call_id:
return
self._runtime._pending_wait_tool_call_id = None
self._runtime._chat_history.append(
ToolResultMessage(
content="等待过程被新的用户输入打断,已继续处理最新消息。",
timestamp=datetime.now(),
tool_call_id=tool_call_id,
tool_name="wait",
)
)
async def _ingest_messages(self, messages: list[SessionMessage]) -> None:
"""处理传入消息列表,将其转换为历史消息并加入聊天历史缓存。"""
for message in messages:
# 构建用户消息序列
user_sequence, visible_text = await self._build_message_sequence(message)
if not user_sequence.components:
continue
history_message = SessionBackedMessage.from_session_message(
message,
raw_message=user_sequence,
visible_text=visible_text,
source_kind="user",
)
self._insert_chat_history_message(history_message)
self._trim_chat_history()
async def _build_message_sequence(self, message: SessionMessage) -> tuple[MessageSequence, str]:
message_sequence = MessageSequence([])
planner_prefix = build_planner_user_prefix_from_session_message(message)
appended_component = False
source_sequence = message.raw_message
planner_components = clone_message_sequence(source_sequence).components
if global_config.maisaka.direct_image_input:
await self._hydrate_visual_components(planner_components)
if planner_components and isinstance(planner_components[0], TextComponent):
planner_components[0].text = planner_prefix + planner_components[0].text
else:
planner_components.insert(0, TextComponent(planner_prefix))
for component in planner_components:
message_sequence.components.append(component)
appended_component = True
legacy_visible_text = self._build_legacy_visible_text(message, source_sequence)
if not appended_component:
if not message.processed_plain_text:
await message.process()
content = (message.processed_plain_text or "").strip()
if content:
message_sequence.text(planner_prefix + content)
legacy_visible_text = self._build_legacy_visible_text_from_text(message, content)
return message_sequence, legacy_visible_text
async def _hydrate_visual_components(self, planner_components: list[object]) -> None:
"""在 Maisaka 真正需要图片或表情时,按需回填二进制数据。"""
load_tasks: list[asyncio.Task[None]] = []
for component in planner_components:
if isinstance(component, ImageComponent) and not component.binary_data:
load_tasks.append(asyncio.create_task(component.load_image_binary()))
continue
if isinstance(component, EmojiComponent) and not component.binary_data:
load_tasks.append(asyncio.create_task(component.load_emoji_binary()))
if not load_tasks:
return
results = await asyncio.gather(*load_tasks, return_exceptions=True)
for result in results:
if isinstance(result, Exception):
logger.warning(f"{self._runtime.log_prefix} 回填图片或表情二进制数据失败Maisaka 将退化为文本占位: {result}")
def _build_legacy_visible_text(self, message: SessionMessage, source_sequence: MessageSequence) -> str:
user_info = message.message_info.user_info
speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id
legacy_sequence = MessageSequence([])
visible_message_id = None if message.is_notify else message.message_id
legacy_sequence.text(format_speaker_content(speaker_name, "", message.timestamp, visible_message_id))
for component in clone_message_sequence(source_sequence).components:
legacy_sequence.components.append(component)
return build_visible_text_from_sequence(legacy_sequence).strip()
def _build_legacy_visible_text_from_text(self, message: SessionMessage, content: str) -> str:
user_info = message.message_info.user_info
speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id
visible_message_id = None if message.is_notify else message.message_id
return format_speaker_content(speaker_name, content, message.timestamp, visible_message_id).strip()
def _insert_chat_history_message(self, message: LLMContextMessage) -> int:
"""将消息按处理顺序追加到聊天历史末尾。"""
self._runtime._chat_history.append(message)
return len(self._runtime._chat_history) - 1
def _start_cycle(self) -> CycleDetail:
"""开始一轮 Maisaka 思考循环。"""
self._runtime._cycle_counter += 1
self._runtime._current_cycle_detail = CycleDetail(cycle_id=self._runtime._cycle_counter)
self._runtime._current_cycle_detail.thinking_id = f"maisaka_tid{round(time.time(), 2)}"
return self._runtime._current_cycle_detail
def _end_cycle(self, cycle_detail: CycleDetail, only_long_execution: bool = True) -> CycleDetail:
"""结束并记录一轮 Maisaka 思考循环。"""
cycle_detail.end_time = time.time()
self._runtime.history_loop.append(cycle_detail)
timer_strings = [
f"{name}: {duration:.2f}s"
for name, duration in cycle_detail.time_records.items()
if not only_long_execution or duration >= 0.1
]
self._runtime._log_cycle_completed(cycle_detail, timer_strings)
return cycle_detail
def _trim_chat_history(self) -> None:
"""裁剪聊天历史,保证用户消息数量不超过配置限制。"""
conversation_message_count = sum(1 for message in self._runtime._chat_history if message.count_in_context)
if conversation_message_count <= self._runtime._max_context_size:
return
trimmed_history = list(self._runtime._chat_history)
removed_count = 0
while conversation_message_count > self._runtime._max_context_size and trimmed_history:
removed_message = trimmed_history.pop(0)
removed_count += 1
if removed_message.count_in_context:
conversation_message_count -= 1
trimmed_history, pruned_orphan_count = self._drop_leading_orphan_tool_results(trimmed_history)
removed_count += pruned_orphan_count
self._runtime._chat_history = trimmed_history
self._runtime._log_history_trimmed(removed_count, conversation_message_count)
@staticmethod
def _drop_leading_orphan_tool_results(
chat_history: list[LLMContextMessage],
) -> tuple[list[LLMContextMessage], int]:
"""清理历史前缀中缺少对应 assistant tool_call 的工具结果消息。"""
if not chat_history:
return chat_history, 0
available_tool_call_ids = {
tool_call.call_id
for message in chat_history
if isinstance(message, AssistantMessage)
for tool_call in message.tool_calls
if tool_call.call_id
}
first_valid_index = 0
while first_valid_index < len(chat_history):
message = chat_history[first_valid_index]
if not isinstance(message, ToolResultMessage):
break
if message.tool_call_id in available_tool_call_ids:
break
first_valid_index += 1
if first_valid_index == 0:
return chat_history, 0
return chat_history[first_valid_index:], first_valid_index
@staticmethod
def _calculate_similarity(text1: str, text2: str) -> float:
"""计算两个文本之间的相似度。
Args:
text1: 第一个文本
text2: 第二个文本
Returns:
float: 相似度值,范围 0-11 表示完全相同
"""
return difflib.SequenceMatcher(None, text1, text2).ratio()
def _should_replace_reasoning(self, current_content: str) -> bool:
"""判断是否需要替换推理内容。
当当前推理内容与上一次相似度大于90%返回True。
Args:
current_content: 当前的推理内容
Returns:
bool: 是否需要替换
"""
if not self._last_reasoning_content or not current_content:
logger.info(
f"{self._runtime.log_prefix} 跳过思考相似度判定: "
f"上一轮为空={not bool(self._last_reasoning_content)} "
f"当前为空={not bool(current_content)} 相似度=0.00"
)
return False
similarity = self._calculate_similarity(current_content, self._last_reasoning_content)
logger.info(f"{self._runtime.log_prefix} 思考内容相似度: {similarity:.2f}")
return similarity > 0.9
@staticmethod
def _post_process_reply_text(reply_text: str) -> list[str]:
"""沿用旧回复链的文本后处理,执行分段与错别字注入。"""
processed_segments: list[str] = []
for segment in process_llm_response(reply_text):
normalized_segment = segment.strip()
if normalized_segment:
processed_segments.append(normalized_segment)
if processed_segments:
return processed_segments
return [reply_text.strip()]
def _build_tool_invocation(self, tool_call: ToolCall, latest_thought: str) -> ToolInvocation:
"""将模型输出的工具调用转换为统一调用对象。
Args:
tool_call: 模型返回的工具调用。
latest_thought: 当前轮的最新思考文本。
Returns:
ToolInvocation: 统一工具调用对象。
"""
return ToolInvocation(
tool_name=tool_call.func_name,
arguments=dict(tool_call.args or {}),
call_id=tool_call.call_id,
session_id=self._runtime.session_id,
stream_id=self._runtime.session_id,
reasoning=latest_thought,
)
def _build_tool_execution_context(
self,
latest_thought: str,
anchor_message: SessionMessage,
) -> ToolExecutionContext:
"""构造统一工具执行上下文。
Args:
latest_thought: 当前轮的最新思考文本。
anchor_message: 当前轮的锚点消息。
Returns:
ToolExecutionContext: 统一工具执行上下文。
"""
return ToolExecutionContext(
session_id=self._runtime.session_id,
stream_id=self._runtime.session_id,
reasoning=latest_thought,
metadata={"anchor_message": anchor_message},
)
@staticmethod
def _normalize_tool_record_value(value: Any) -> Any:
"""将工具记录中的任意值规范化为可序列化结构。
Args:
value: 原始值。
Returns:
Any: 适合写入 JSON 的规范化结果。
"""
if value is None or isinstance(value, (str, int, float, bool)):
return value
if isinstance(value, datetime):
return value.isoformat()
if isinstance(value, dict):
normalized_dict: dict[str, Any] = {}
for key, item in value.items():
normalized_dict[str(key)] = MaisakaReasoningEngine._normalize_tool_record_value(item)
return normalized_dict
if isinstance(value, (list, tuple, set)):
return [MaisakaReasoningEngine._normalize_tool_record_value(item) for item in value]
if isinstance(value, bytes):
return f"<bytes:{len(value)}>"
if hasattr(value, "model_dump"):
try:
return MaisakaReasoningEngine._normalize_tool_record_value(value.model_dump())
except Exception:
return str(value)
if hasattr(value, "__dict__"):
try:
return MaisakaReasoningEngine._normalize_tool_record_value(dict(value.__dict__))
except Exception:
return str(value)
return str(value)
@staticmethod
def _truncate_tool_record_text(text: str, max_length: int = 180) -> str:
"""截断工具记录中的展示文本。
Args:
text: 原始文本。
max_length: 最长保留字符数。
Returns:
str: 截断后的文本。
"""
normalized_text = text.strip()
if len(normalized_text) <= max_length:
return normalized_text
return f"{normalized_text[: max_length - 1]}"
def _build_tool_record_payload(
self,
invocation: ToolInvocation,
result: ToolExecutionResult,
tool_spec: Optional[ToolSpec],
) -> dict[str, Any]:
"""构造统一工具落库数据。
Args:
invocation: 工具调用对象。
result: 工具执行结果。
tool_spec: 对应的工具声明。
Returns:
dict[str, Any]: 可直接写入数据库的工具记录数据。
"""
payload: dict[str, Any] = {
"call_id": invocation.call_id,
"session_id": invocation.session_id,
"stream_id": invocation.stream_id,
"arguments": self._normalize_tool_record_value(invocation.arguments),
"success": result.success,
"content": result.content,
"error_message": result.error_message,
"history_content": result.get_history_content(),
"structured_content": self._normalize_tool_record_value(result.structured_content),
"metadata": self._normalize_tool_record_value(result.metadata),
}
if tool_spec is not None:
payload["provider_name"] = tool_spec.provider_name
payload["provider_type"] = tool_spec.provider_type
payload["brief_description"] = tool_spec.brief_description
payload["detailed_description"] = tool_spec.detailed_description
payload["title"] = tool_spec.title
return payload
def _build_tool_display_prompt(
self,
invocation: ToolInvocation,
result: ToolExecutionResult,
tool_spec: Optional[ToolSpec],
) -> str:
"""构造展示给历史回放与 UI 的工具摘要。
Args:
invocation: 工具调用对象。
result: 工具执行结果。
tool_spec: 对应的工具声明。
Returns:
str: 用于展示的工具摘要文本。
"""
custom_display_prompt = result.metadata.get("record_display_prompt")
if isinstance(custom_display_prompt, str) and custom_display_prompt.strip():
return custom_display_prompt.strip()
structured_content = (
result.structured_content
if isinstance(result.structured_content, dict)
else {}
)
history_content = self._truncate_tool_record_text(result.get_history_content(), max_length=200)
normalized_args = self._normalize_tool_record_value(invocation.arguments)
if invocation.tool_name == "reply":
target_user_name = str(structured_content.get("target_user_name") or "对方").strip() or "对方"
reply_text = str(structured_content.get("reply_text") or "").strip()
if result.success and reply_text:
return f"你对{target_user_name}进行了回复:{reply_text}"
target_message_id = str(invocation.arguments.get("msg_id") or "").strip()
error_text = self._truncate_tool_record_text(result.error_message or history_content, max_length=120)
return f"你尝试回复消息 {target_message_id or 'unknown'},但失败了:{error_text}"
if invocation.tool_name == "send_emoji":
description = str(structured_content.get("description") or "").strip()
emotion_list = structured_content.get("emotion")
if isinstance(emotion_list, list):
emotion_text = "".join(str(item).strip() for item in emotion_list if str(item).strip())
else:
emotion_text = ""
if result.success and description:
if emotion_text:
return f"你发送了表情包:{description}(情绪:{emotion_text}"
return f"你发送了表情包:{description}"
return f"你尝试发送表情包,但失败了:{self._truncate_tool_record_text(result.error_message or history_content, 120)}"
if invocation.tool_name == "wait":
wait_seconds = invocation.arguments.get("seconds", 30)
return f"你让当前对话先等待 {wait_seconds} 秒。"
if invocation.tool_name == "no_reply":
return "你暂停了当前对话循环,等待新的外部消息。"
if invocation.tool_name == "query_jargon":
words = invocation.arguments.get("words", [])
if isinstance(words, list):
words_text = "".join(str(item).strip() for item in words if str(item).strip())
else:
words_text = ""
if words_text:
return f"你查询了这些黑话或词条:{words_text}"
return "你查询了一次黑话或词条信息。"
if invocation.tool_name == "query_person_info":
person_name = str(invocation.arguments.get("person_name") or "").strip()
if person_name:
return f"你查询了人物信息:{person_name}"
return "你查询了一次人物信息。"
brief_description = ""
if tool_spec is not None:
brief_description = tool_spec.brief_description.strip()
if normalized_args:
arguments_text = self._truncate_tool_record_text(
json.dumps(normalized_args, ensure_ascii=False),
max_length=160,
)
else:
arguments_text = "{}"
if result.success:
if brief_description:
return f"{brief_description} 参数={arguments_text};结果:{history_content or '执行成功'}"
return f"你调用了工具 {invocation.tool_name},参数={arguments_text};结果:{history_content or '执行成功'}"
error_text = self._truncate_tool_record_text(result.error_message or history_content, max_length=160)
return f"你调用了工具 {invocation.tool_name},参数={arguments_text};执行失败:{error_text}"
async def _store_tool_execution_record(
self,
invocation: ToolInvocation,
result: ToolExecutionResult,
tool_spec: Optional[ToolSpec],
) -> None:
"""将工具执行结果落库到统一工具记录表。
Args:
invocation: 工具调用对象。
result: 工具执行结果。
tool_spec: 对应的工具声明。
"""
if self._runtime.chat_stream is None:
logger.debug(
f"{self._runtime.log_prefix} 当前没有 chat_stream跳过工具记录存储: "
f"工具={invocation.tool_name}"
)
return
builtin_prompt = ""
if tool_spec is not None:
builtin_prompt = tool_spec.build_llm_description()
try:
await database_api.store_tool_info(
chat_stream=self._runtime.chat_stream,
builtin_prompt=builtin_prompt,
display_prompt=self._build_tool_display_prompt(invocation, result, tool_spec),
tool_id=invocation.call_id,
tool_data=self._build_tool_record_payload(invocation, result, tool_spec),
tool_name=invocation.tool_name,
tool_reasoning=invocation.reasoning,
)
except Exception:
logger.exception(
f"{self._runtime.log_prefix} 写入工具记录失败: 工具={invocation.tool_name} 调用编号={invocation.call_id}"
)
def _append_tool_execution_result(self, tool_call: ToolCall, result: ToolExecutionResult) -> None:
"""将统一工具执行结果写回 Maisaka 历史。
Args:
tool_call: 原始工具调用对象。
result: 统一工具执行结果。
"""
history_content = result.get_history_content()
if not history_content:
history_content = "工具执行成功。" if result.success else f"工具 {tool_call.func_name} 执行失败。"
self._runtime._chat_history.append(
ToolResultMessage(
content=history_content,
timestamp=datetime.now(),
tool_call_id=tool_call.call_id,
tool_name=tool_call.func_name,
success=result.success,
)
)
async def _handle_tool_calls(
self,
tool_calls: list[ToolCall],
latest_thought: str,
anchor_message: SessionMessage,
) -> bool:
"""执行一批统一工具调用。
Args:
tool_calls: 模型返回的工具调用列表。
latest_thought: 当前轮的最新思考文本。
anchor_message: 当前轮的锚点消息。
Returns:
bool: 是否需要暂停当前思考循环。
"""
if self._runtime._tool_registry is None:
for tool_call in tool_calls:
invocation = self._build_tool_invocation(tool_call, latest_thought)
result = ToolExecutionResult(
tool_name=tool_call.func_name,
success=False,
error_message="统一工具注册表尚未初始化。",
)
await self._store_tool_execution_record(invocation, result, None)
self._append_tool_execution_result(tool_call, result)
return False
execution_context = self._build_tool_execution_context(latest_thought, anchor_message)
tool_spec_map = {
tool_spec.name: tool_spec
for tool_spec in await self._runtime._tool_registry.list_tools()
}
for tool_call in tool_calls:
invocation = self._build_tool_invocation(tool_call, latest_thought)
result = await self._runtime._tool_registry.invoke(invocation, execution_context)
await self._store_tool_execution_record(
invocation,
result,
tool_spec_map.get(invocation.tool_name),
)
self._append_tool_execution_result(tool_call, result)
if not result.success and tool_call.func_name == "reply":
logger.warning(f"{self._runtime.log_prefix} 回复工具未生成可见消息,将继续下一轮循环")
if bool(result.metadata.get("pause_execution", False)):
return True
return False