diff --git a/src/chat/replyer/maisaka_generator_base.py b/src/chat/replyer/maisaka_generator_base.py index ae3ab645..18e90fd4 100644 --- a/src/chat/replyer/maisaka_generator_base.py +++ b/src/chat/replyer/maisaka_generator_base.py @@ -1,9 +1,9 @@ -import time from dataclasses import dataclass, field from datetime import datetime from typing import Any, Awaitable, Callable, Dict, List, Literal, Optional, Tuple import random +import time from rich.console import Group, RenderableType from rich.panel import Panel @@ -110,11 +110,15 @@ class BaseMaisakaReplyGenerator: return "" def _extract_guided_bot_reply(self, message: SessionBackedMessage) -> str: - speaker_name, body = parse_speaker_content(message.processed_plain_text.strip()) - bot_nickname = global_config.bot.nickname.strip() or "Bot" - if speaker_name == bot_nickname: - return self._normalize_content(body.strip()) - return "" + # 只能根据结构化来源字段判断是否为 bot 自身写回的历史消息, + # 不能依赖昵称/群名片等可控文本,避免误判和提示注入。 + if message.source_kind != "guided_reply": + return "" + + plain_text = message.processed_plain_text.strip() + _, body = parse_speaker_content(plain_text) + normalized_body = body.strip() or plain_text + return self._normalize_content(normalized_body) if normalized_body else "" def _build_target_message_block(self, reply_message: Optional[SessionMessage]) -> str: if reply_message is None: @@ -210,6 +214,7 @@ class BaseMaisakaReplyGenerator: self, reply_message: Optional[SessionMessage], reply_reason: str, + reference_info: str = "", expression_habits: str = "", stream_id: Optional[str] = None, ) -> str: @@ -234,8 +239,13 @@ class BaseMaisakaReplyGenerator: sections.append(expression_habits.strip()) if target_message_block: sections.append(target_message_block) + reply_reference_lines: List[str] = [] if reply_reason.strip(): - sections.append(f"【回复信息参考】\n{reply_reason}") + reply_reference_lines.append(f"【最新推理】\n{reply_reason.strip()}") + if reference_info.strip(): + reply_reference_lines.append(f"【参考信息】\n{reference_info.strip()}") + if reply_reference_lines: + sections.append("【回复信息参考】\n" + "\n\n".join(reply_reference_lines)) if not sections: return system_prompt return f"{system_prompt}\n\n" + "\n\n".join(sections) @@ -308,6 +318,7 @@ class BaseMaisakaReplyGenerator: chat_history: List[LLMContextMessage], reply_message: Optional[SessionMessage], reply_reason: str, + reference_info: str = "", expression_habits: str = "", stream_id: Optional[str] = None, enable_visual_message: bool = False, @@ -316,6 +327,7 @@ class BaseMaisakaReplyGenerator: system_prompt = self._build_system_prompt( reply_message=reply_message, reply_reason=reply_reason, + reference_info=reference_info, expression_habits=expression_habits, stream_id=stream_id, ) @@ -377,6 +389,7 @@ class BaseMaisakaReplyGenerator: self, extra_info: str = "", reply_reason: str = "", + reference_info: str = "", available_actions: Optional[Dict[str, ActionInfo]] = None, chosen_actions: Optional[List[object]] = None, from_plugin: bool = True, @@ -461,6 +474,7 @@ class BaseMaisakaReplyGenerator: chat_history=filtered_history, reply_message=reply_message, reply_reason=reply_reason or "", + reference_info=reference_info or "", expression_habits=merged_expression_habits, stream_id=stream_id, ) @@ -486,6 +500,7 @@ class BaseMaisakaReplyGenerator: chat_history=filtered_history, reply_message=reply_message, reply_reason=reply_reason or "", + reference_info=reference_info or "", expression_habits=merged_expression_habits, stream_id=stream_id, enable_visual_message=self._resolve_enable_visual_message(model_info), @@ -504,7 +519,6 @@ class BaseMaisakaReplyGenerator: chat_id=preview_chat_id, request_kind="replyer", selection_reason=f"ID: {preview_chat_id}", - image_display_mode="path_link" if global_config.maisaka.show_image_path else "legacy", ), title="Reply Prompt", border_style="bright_yellow", diff --git a/src/maisaka/builtin_tool/reply.py b/src/maisaka/builtin_tool/reply.py index 00c392b9..debee914 100644 --- a/src/maisaka/builtin_tool/reply.py +++ b/src/maisaka/builtin_tool/reply.py @@ -36,7 +36,8 @@ def get_tool_spec() -> ToolSpec: detailed_description=( "参数说明:\n" "- msg_id:string,必填。要回复的目标用户消息编号。\n" - "- set_quote:boolean,可选。以引用回复的方式发送,默认 true。" + "- set_quote:boolean,可选。以引用回复的方式发送,默认 true。\n" + "- reference_info:string,可选。上文中有助于回复的所有参考信息,使用平文本格式。" ), parameters_schema={ "type": "object", @@ -50,6 +51,11 @@ def get_tool_spec() -> ToolSpec: "description": "以引用回复的方式发送这条回复,不用每句都引用。", "default": True, }, + "reference_info": { + "type": "string", + "description": "有助于回复的信息,之前搜集得到的事实性信息,记忆等,使用平文本格式。", + "default": True, + }, }, "required": ["msg_id"], }, @@ -75,6 +81,7 @@ async def handle_tool( """执行 reply 内置工具。""" latest_thought = context.reasoning if context is not None else invocation.reasoning + reference_info = str(invocation.arguments.get("reference_info") or "").strip() target_message_id = str(invocation.arguments.get("msg_id") or "").strip() set_quote = bool(invocation.arguments.get("set_quote", True)) @@ -117,6 +124,7 @@ async def handle_tool( try: success, reply_result = await replyer.generate_reply_with_context( reply_reason=latest_thought, + reference_info=reference_info, stream_id=tool_ctx.runtime.session_id, reply_message=target_message, chat_history=tool_ctx.runtime._chat_history,