From b74b60cb1a83852a5e1f3ffbdca37b7e1bcffa4b Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 3 Apr 2026 15:31:44 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E7=A7=BB=E9=99=A4=E6=97=A7?= =?UTF-8?q?=E7=9A=84maisaka=20cli=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/emoji_system/maisaka_tool.py | 26 +- src/cli/maisaka_cli.py | 423 +++----------------------- src/cli/maisaka_cli_sender.py | 27 ++ src/maisaka/builtin_tool/context.py | 27 +- src/maisaka/builtin_tool/reply.py | 28 +- src/maisaka/chat_loop_service.py | 36 +-- src/maisaka/planner_message_utils.py | 109 +++++++ src/maisaka/reasoning_engine.py | 19 +- 8 files changed, 226 insertions(+), 469 deletions(-) create mode 100644 src/cli/maisaka_cli_sender.py create mode 100644 src/maisaka/planner_message_utils.py diff --git a/src/chat/emoji_system/maisaka_tool.py b/src/chat/emoji_system/maisaka_tool.py index 8e805ca4..982a9473 100644 --- a/src/chat/emoji_system/maisaka_tool.py +++ b/src/chat/emoji_system/maisaka_tool.py @@ -5,6 +5,8 @@ from typing import Any, Optional, Sequence import random +from src.chat.message_receive.chat_manager import chat_manager +from src.cli.maisaka_cli_sender import CLI_PLATFORM_NAME, render_cli_message from src.common.data_models.image_data_model import MaiEmoji from src.common.data_models.llm_service_data_models import LLMGenerationOptions from src.common.logger import get_logger @@ -291,13 +293,23 @@ async def send_emoji_for_maisaka( ) try: - sent = await send_service.emoji_to_stream( - emoji_base64=emoji_base64, - stream_id=stream_id, - storage_message=True, - set_reply=False, - reply_message=None, - ) + target_session = chat_manager.get_session_by_session_id(stream_id) + if target_session is not None and target_session.platform == CLI_PLATFORM_NAME: + preview_message = ( + f"已发送表情包:{selected_emoji.description.strip()}" + if selected_emoji.description.strip() + else "[表情包]" + ) + render_cli_message(preview_message) + sent = True + else: + sent = await send_service.emoji_to_stream( + emoji_base64=emoji_base64, + stream_id=stream_id, + storage_message=True, + set_reply=False, + reply_message=None, + ) except Exception as exc: return MaisakaEmojiSendResult( success=False, diff --git a/src/cli/maisaka_cli.py b/src/cli/maisaka_cli.py index fb333b07..d2e33323 100644 --- a/src/cli/maisaka_cli.py +++ b/src/cli/maisaka_cli.py @@ -3,40 +3,22 @@ MaiSaka CLI and conversation loop. """ from datetime import datetime -from typing import Optional import asyncio -import os -import time from rich import box -from rich.markdown import Markdown from rich.panel import Panel from rich.text import Text -from src.know_u.knowledge import KnowledgeLearner, retrieve_relevant_knowledge -from src.know_u.knowledge_store import get_knowledge_store +from src.chat.heart_flow.heartflow_manager import heartflow_manager +from src.chat.heart_flow.heartflow_message_processor import HeartFCMessageReceiver +from src.chat.message_receive.chat_manager import BotChatSession, chat_manager from src.chat.message_receive.message import SessionMessage -from src.chat.replyer.maisaka_replyer_factory import get_maisaka_replyer_class +from src.common.data_models.mai_message_data_model import MessageInfo, UserInfo +from src.common.data_models.message_component_data_model import MessageSequence, TextComponent from src.config.config import config_manager, global_config -from src.mcp_module import MCPManager -from src.mcp_module.host_llm_bridge import MCPHostLLMBridge - -from src.maisaka.chat_loop_service import MaisakaChatLoopService -from src.maisaka.context_messages import ( - AssistantMessage, - LLMContextMessage, - SessionBackedMessage, - ToolResultMessage, -) -from src.maisaka.message_adapter import format_speaker_content -from src.maisaka.tool_handlers import ( - ToolHandlerContext, - handle_mcp_tool, - handle_unknown_tool, - handle_wait, -) +from .maisaka_cli_sender import CLI_PLATFORM_NAME from .console import console from .input_reader import InputReader @@ -44,41 +26,13 @@ from .input_reader import InputReader class BufferCLI: """Maisaka 命令行交互入口。""" + _CLI_PLATFORM = CLI_PLATFORM_NAME + _CLI_USER_ID = "maisaka_user" + def __init__(self) -> None: - self._chat_loop_service: Optional[MaisakaChatLoopService] = None - self._reply_generator = get_maisaka_replyer_class()() self._reader = InputReader() - self._chat_history: Optional[list[LLMContextMessage]] = None - self._knowledge_store = get_knowledge_store() - self._knowledge_learner = KnowledgeLearner("maisaka_cli") - self._knowledge_min_messages_for_extraction = 10 - self._knowledge_min_extraction_interval = 30 - self._last_knowledge_extraction_time = 0.0 - - knowledge_stats = self._knowledge_store.get_stats() - if knowledge_stats["total_items"] > 0: - console.print(f"[success]知识库中已有 {knowledge_stats['total_items']} 条数据[/success]") - else: - console.print("[muted]知识库已初始化,当前没有数据[/muted]") - - self._chat_start_time: Optional[datetime] = None - self._last_user_input_time: Optional[datetime] = None - self._last_assistant_response_time: Optional[datetime] = None - self._user_input_times: list[datetime] = [] - self._mcp_manager: Optional[MCPManager] = None - self._mcp_host_bridge: Optional[MCPHostLLMBridge] = None - self._init_llm() - - def _init_llm(self) -> None: - """初始化 Maisaka 使用的聊天服务。""" - thinking_env = os.getenv("ENABLE_THINKING", "").strip().lower() - enable_thinking: Optional[bool] = True if thinking_env == "true" else False if thinking_env == "false" else None - - _ = enable_thinking - self._chat_loop_service = MaisakaChatLoopService() - - model_name = self._get_current_model_name() - console.print(f"[success]大模型服务已初始化[/success] [muted](模型: {model_name})[/muted]") + self._message_receiver = HeartFCMessageReceiver() + self._session: BotChatSession | None = None @staticmethod def _get_current_model_name() -> str: @@ -91,350 +45,59 @@ class BufferCLI: pass return "未配置" - def _build_tool_context(self) -> ToolHandlerContext: - """构建工具处理的共享上下文。""" - tool_context = ToolHandlerContext( - reader=self._reader, - user_input_times=self._user_input_times, - ) - tool_context.last_user_input_time = self._last_user_input_time - return tool_context - def _show_banner(self) -> None: """渲染启动横幅。""" banner = Text() banner.append("MaiSaka", style="bold cyan") banner.append(" v2.0\n", style="muted") + banner.append(f"模型: {self._get_current_model_name()}\n", style="muted") banner.append("输入内容开始对话 | Ctrl+C 退出", style="muted") - console.print(Panel(banner, box=box.DOUBLE_EDGE, border_style="cyan", padding=(1, 2))) console.print() - async def _start_chat(self, user_text: str) -> None: - """追加用户输入并继续内部循环。""" - if self._chat_loop_service is None: - console.print("[warning]大模型服务尚未初始化,已跳过本次对话。[/warning]") - return - - now = datetime.now() - self._last_user_input_time = now - self._user_input_times.append(now) - - if self._chat_history is None: - self._chat_start_time = now - self._last_assistant_response_time = None - self._chat_history = self._chat_loop_service.build_chat_context(user_text) - self._trigger_knowledge_learning([self._build_cli_session_message(user_text, now)]) - else: - self._chat_history.append( - self._build_cli_context_message( - user_text=user_text, - timestamp=now, - source_kind="user", - ) - ) - self._trigger_knowledge_learning([self._build_cli_session_message(user_text, now)]) - - await self._run_llm_loop(self._chat_history) - @staticmethod - def _build_cli_context_message( + def _build_cli_session_message( + *, user_text: str, timestamp: datetime, - source_kind: str = "user", - speaker_name: Optional[str] = None, - ) -> SessionBackedMessage: - """为 CLI 构造新的上下文消息。""" - resolved_speaker_name = speaker_name or global_config.maisaka.user_name.strip() or "用户" - visible_text = format_speaker_content( - resolved_speaker_name, - user_text, - timestamp, - ) - planner_prefix = ( - f"[时间]{timestamp.strftime('%H:%M:%S')}\n" - f"[用户]{resolved_speaker_name}\n" - "[用户群昵称]\n" - "[msg_id]\n" - "[发言内容]" - ) - from src.common.data_models.message_component_data_model import MessageSequence, TextComponent - - return SessionBackedMessage( - raw_message=MessageSequence([TextComponent(f"{planner_prefix}{user_text}")]), - visible_text=visible_text, + ) -> SessionMessage: + """构造一条供 heartflow 复用的 CLI 用户消息。""" + message = SessionMessage( + message_id=f"maisaka_cli_{int(timestamp.timestamp() * 1000)}", timestamp=timestamp, - source_kind=source_kind, + platform=BufferCLI._CLI_PLATFORM, ) - - @staticmethod - def _build_cli_session_message(user_text: str, timestamp: datetime) -> SessionMessage: - """为 CLI 的知识学习构造兼容 SessionMessage。""" - from src.common.data_models.mai_message_data_model import MessageInfo, UserInfo - from src.common.data_models.message_component_data_model import MessageSequence - - message = SessionMessage(message_id=f"maisaka_cli_{int(timestamp.timestamp() * 1000)}", timestamp=timestamp, platform="maisaka") + user_name = global_config.maisaka.user_name.strip() or "用户" message.message_info = MessageInfo( user_info=UserInfo( - user_id="maisaka_user", - user_nickname=global_config.maisaka.user_name.strip() or "用户", + user_id=BufferCLI._CLI_USER_ID, + user_nickname=user_name, user_cardname=None, ), group_info=None, additional_config={}, ) - message.session_id = "maisaka_cli" - message.raw_message = MessageSequence([]) - visible_text = format_speaker_content( - global_config.maisaka.user_name.strip() or "用户", - user_text, - timestamp, - ) - message.raw_message.text(visible_text) - message.processed_plain_text = visible_text - message.display_message = visible_text + message.raw_message = MessageSequence([TextComponent(text=user_text)]) + message.processed_plain_text = user_text + message.display_message = user_text message.initialized = True return message - def _trigger_knowledge_learning(self, messages: list[SessionMessage]) -> None: - """在 CLI 会话中按批次触发 knowledge 学习。""" - if not global_config.maisaka.enable_knowledge_module: - return - - self._knowledge_learner.add_messages(messages) - - elapsed = time.monotonic() - self._last_knowledge_extraction_time - if elapsed < self._knowledge_min_extraction_interval: - return - - cache_size = self._knowledge_learner.get_cache_size() - if cache_size < self._knowledge_min_messages_for_extraction: - return - - self._last_knowledge_extraction_time = time.monotonic() - asyncio.create_task(self._run_knowledge_learning()) - - async def _run_knowledge_learning(self) -> None: - """后台执行 knowledge 学习,避免阻塞主对话。""" - try: - added_count = await self._knowledge_learner.learn() - if added_count > 0 and global_config.maisaka.show_thinking: - console.print(f"[muted]知识学习已完成,新增 {added_count} 条数据。[/muted]") - except Exception as exc: - console.print(f"[warning]知识学习失败:{exc}[/warning]") - - async def _run_llm_loop(self, chat_history: list[LLMContextMessage]) -> None: - """ - Main inner loop for the Maisaka planner. - - Each round may produce internal thoughts and optionally call tools: - - reply(msg_id): generate a visible reply for the current round - - no_reply(): pause the inner loop until a new user message arrives - - wait(seconds): wait for new user input - """ - if self._chat_loop_service is None: - return - - consecutive_errors = 0 - last_had_tool_calls = True - - while True: - if last_had_tool_calls: - tasks = [] - status_text_parts = [] - - if global_config.maisaka.enable_knowledge_module: - tasks.append(("knowledge", retrieve_relevant_knowledge(self._chat_loop_service, chat_history))) - status_text_parts.append("知识库") - - with console.status( - f"[info]{' + '.join(status_text_parts)} 分析中...[/info]", - spinner="dots", - ): - results = await asyncio.gather(*[task for _, task in tasks], return_exceptions=True) - - knowledge_analysis = "" - if global_config.maisaka.enable_knowledge_module: - knowledge_result = results[0] if results else None - if isinstance(knowledge_result, Exception): - console.print(f"[warning]知识分析失败:{knowledge_result}[/warning]") - elif isinstance(knowledge_result, str) and knowledge_result.strip(): - knowledge_analysis = knowledge_result - if global_config.maisaka.show_thinking: - console.print( - Panel( - Markdown(knowledge_analysis), - title="知识", - border_style="bright_magenta", - padding=(0, 1), - style="dim", - ) - ) - - if chat_history and isinstance(chat_history[-1], AssistantMessage) and chat_history[-1].source == "perception": - chat_history.pop() - - perception_parts = [] - if knowledge_analysis: - perception_parts.append(f"知识库\n{knowledge_analysis}") - - if perception_parts: - chat_history.append( - AssistantMessage( - content="\n\n".join(perception_parts), - timestamp=datetime.now(), - source_kind="perception", - ) - ) - elif global_config.maisaka.show_thinking: - console.print("[muted]上一轮没有使用工具,本轮跳过模块分析。[/muted]") - - with console.status("[info]正在思考...[/info]", spinner="dots"): - try: - response = await self._chat_loop_service.chat_loop_step(chat_history) - consecutive_errors = 0 - except Exception as exc: - consecutive_errors += 1 - console.print(f"[error]大模型调用失败:{exc}[/error]") - if consecutive_errors >= 3: - console.print("[error]连续失败次数过多,结束对话。[/error]\n") - break - continue - - chat_history.append(response.raw_message) - self._last_assistant_response_time = datetime.now() - - if global_config.maisaka.show_thinking and response.content: - console.print( - Panel( - Markdown(response.content), - title="思考", - border_style="dim", - padding=(1, 2), - style="dim", - ) - ) - - if response.content and not response.tool_calls: - last_had_tool_calls = False - continue - - if not response.tool_calls: - last_had_tool_calls = False - continue - - should_stop = False - tool_context = self._build_tool_context() - - for tool_call in response.tool_calls: - if tool_call.func_name == "reply": - reply = await self._generate_visible_reply(chat_history, response.content or "") - chat_history.append( - ToolResultMessage( - content="已生成并记录可见回复。", - timestamp=datetime.now(), - tool_call_id=tool_call.call_id, - tool_name=tool_call.func_name, - ) - ) - chat_history.append( - self._build_cli_context_message( - user_text=reply, - timestamp=datetime.now(), - source_kind="guided_reply", - speaker_name=global_config.bot.nickname.strip() or "MaiSaka", - ) - ) - - elif tool_call.func_name == "no_reply": - if global_config.maisaka.show_thinking: - console.print("[muted]对话已暂停,等待新的输入...[/muted]") - chat_history.append( - ToolResultMessage( - content="当前对话循环已暂停,等待新消息到来。", - timestamp=datetime.now(), - tool_call_id=tool_call.call_id, - tool_name=tool_call.func_name, - ) - ) - should_stop = True - - elif tool_call.func_name == "wait": - tool_result = await handle_wait(tool_call, chat_history, tool_context) - if tool_context.last_user_input_time != self._last_user_input_time: - self._last_user_input_time = tool_context.last_user_input_time - if tool_result.startswith("[[QUIT]]"): - should_stop = True - - elif self._mcp_manager and self._mcp_manager.is_mcp_tool(tool_call.func_name): - await handle_mcp_tool(tool_call, chat_history, self._mcp_manager) - - else: - await handle_unknown_tool(tool_call, chat_history) - - if should_stop: - console.print("[muted]对话已暂停,等待新的输入...[/muted]\n") - break - - last_had_tool_calls = True - - async def _init_mcp(self) -> None: - """初始化 MCP 服务并注册暴露的工具。""" - self._mcp_host_bridge = MCPHostLLMBridge( - sampling_task_name=global_config.mcp.client.sampling.task_name, + async def _dispatch_input(self, user_text: str) -> None: + """将 CLI 输入转发到 heartflow 路径。""" + message = self._build_cli_session_message( + user_text=user_text, + timestamp=datetime.now(), ) - self._mcp_manager = await MCPManager.from_app_config( - global_config.mcp, - host_callbacks=self._mcp_host_bridge.build_callbacks(), + chat_manager.register_message(message) + self._session = await chat_manager.get_or_create_session( + platform=self._CLI_PLATFORM, + user_id=self._CLI_USER_ID, ) - - if self._mcp_manager and self._chat_loop_service: - mcp_tools = self._mcp_manager.get_openai_tools() - if mcp_tools: - self._chat_loop_service.set_extra_tools(mcp_tools) - summary = self._mcp_manager.get_feature_summary() - console.print( - Panel( - f"已加载 {len(mcp_tools)} 个 MCP 工具。\n{summary}", - title="MCP 能力", - border_style="green", - padding=(0, 1), - ) - ) - - async def _generate_visible_reply(self, chat_history: list[LLMContextMessage], latest_thought: str) -> str: - """根据最新思考生成并输出可见回复。""" - if not latest_thought: - return "" - - with console.status("[info]正在生成可见回复...[/info]", spinner="dots"): - success, result = await self._reply_generator.generate_reply_with_context( - reply_reason=latest_thought, - chat_history=chat_history, - ) - if success and result.text_fragments: - reply = result.text_fragments[0] - else: - reply = "..." - - console.print( - Panel( - Markdown(reply), - title="MaiSaka", - border_style="magenta", - padding=(1, 2), - ) - ) - - return reply + await self._message_receiver.process_message(message) async def run(self) -> None: """主交互循环。""" - if global_config.mcp.enable: - await self._init_mcp() - else: - console.print("[muted]MCP 已禁用(mcp.enable=false)[/muted]") - self._reader.start(asyncio.get_event_loop()) self._show_banner() @@ -442,17 +105,17 @@ class BufferCLI: while True: console.print("[bold cyan]> [/bold cyan]", end="") raw_input = await self._reader.get_line() - if raw_input is None: - console.print("\n[muted]再见![/muted]") + console.print("\n[muted]再见[/muted]") break - raw_input = raw_input.strip() - if not raw_input: + user_text = raw_input.strip() + if not user_text: continue - await self._start_chat(raw_input) + await self._dispatch_input(user_text) finally: - if self._mcp_manager: - await self._mcp_manager.close() - self._mcp_host_bridge = None + if self._session is not None: + runtime = heartflow_manager.heartflow_chat_list.pop(self._session.session_id, None) + if runtime is not None: + await runtime.stop() diff --git a/src/cli/maisaka_cli_sender.py b/src/cli/maisaka_cli_sender.py new file mode 100644 index 00000000..4f03bb7e --- /dev/null +++ b/src/cli/maisaka_cli_sender.py @@ -0,0 +1,27 @@ +"""Maisaka CLI 展示适配。""" + +from rich.markdown import Markdown +from rich.panel import Panel + +from src.common.logger import get_logger +from src.config.config import global_config + +from .console import console + +CLI_PLATFORM_NAME = "maisaka_cli" + +logger = get_logger("maisaka_cli_sender") + + +def render_cli_message(content: str, *, title: str = "") -> None: + """将 CLI 私聊实例的消息展示到终端。""" + preview_text = content.strip() or "..." + console.print( + Panel( + Markdown(preview_text), + title=title or global_config.bot.nickname.strip() or "MaiSaka", + border_style="magenta", + padding=(1, 2), + ) + ) + logger.info(f"[CLI] 已将消息输出到终端: content={preview_text!r}") diff --git a/src/maisaka/builtin_tool/context.py b/src/maisaka/builtin_tool/context.py index 3261c346..6bf3443d 100644 --- a/src/maisaka/builtin_tool/context.py +++ b/src/maisaka/builtin_tool/context.py @@ -14,6 +14,7 @@ from src.core.tooling import ToolExecutionResult from ..context_messages import SessionBackedMessage from ..message_adapter import format_speaker_content +from ..planner_message_utils import build_planner_prefix, build_session_backed_text_message if TYPE_CHECKING: from ..reasoning_engine import MaisakaReasoningEngine @@ -141,20 +142,9 @@ class BuiltinToolRuntimeContext: bot_name = global_config.bot.nickname.strip() or "MaiSaka" reply_timestamp = datetime.now() - planner_prefix = ( - f"[时间]{reply_timestamp.strftime('%H:%M:%S')}\n" - f"[用户]{bot_name}\n" - "[用户群昵称]\n" - "[msg_id]\n" - "[发言内容]" - ) - history_message = SessionBackedMessage( - raw_message=MessageSequence([TextComponent(f"{planner_prefix}{reply_text}")]), - visible_text=format_speaker_content( - bot_name, - reply_text, - reply_timestamp, - ), + history_message = build_session_backed_text_message( + speaker_name=bot_name, + text=reply_text, timestamp=reply_timestamp, source_kind="guided_reply", ) @@ -170,12 +160,9 @@ class BuiltinToolRuntimeContext: bot_name = global_config.bot.nickname.strip() or "MaiSaka" reply_timestamp = datetime.now() - planner_prefix = ( - f"[时间]{reply_timestamp.strftime('%H:%M:%S')}\n" - f"[用户]{bot_name}\n" - "[用户群昵称]\n" - "[msg_id]\n" - "[发言内容]" + planner_prefix = build_planner_prefix( + timestamp=reply_timestamp, + user_name=bot_name, ) history_message = SessionBackedMessage( raw_message=MessageSequence( diff --git a/src/maisaka/builtin_tool/reply.py b/src/maisaka/builtin_tool/reply.py index 5e51a48e..ac7fe767 100644 --- a/src/maisaka/builtin_tool/reply.py +++ b/src/maisaka/builtin_tool/reply.py @@ -3,6 +3,7 @@ from typing import Optional from src.chat.replyer.replyer_manager import replyer_manager +from src.cli.maisaka_cli_sender import CLI_PLATFORM_NAME, render_cli_message from src.common.logger import get_logger from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec from src.services import send_service @@ -134,17 +135,22 @@ async def handle_tool( combined_reply_text = "".join(reply_segments) try: sent = False - for index, segment in enumerate(reply_segments): - sent = await send_service.text_to_stream( - text=segment, - stream_id=tool_ctx.runtime.session_id, - set_reply=quote_reply if index == 0 else False, - reply_message=target_message if quote_reply and index == 0 else None, - selected_expressions=reply_result.selected_expression_ids or None, - typing=index > 0, - ) - if not sent: - break + if tool_ctx.runtime.chat_stream.platform == CLI_PLATFORM_NAME: + for segment in reply_segments: + render_cli_message(segment) + sent = True + else: + for index, segment in enumerate(reply_segments): + sent = await send_service.text_to_stream( + text=segment, + stream_id=tool_ctx.runtime.session_id, + set_reply=quote_reply if index == 0 else False, + reply_message=target_message if quote_reply and index == 0 else None, + selected_expressions=reply_result.selected_expression_ids or None, + typing=index > 0, + ) + if not sent: + break except Exception: logger.exception( f"{tool_ctx.runtime.log_prefix} 发送文字消息时发生异常,目标消息编号={target_message_id}" diff --git a/src/maisaka/chat_loop_service.py b/src/maisaka/chat_loop_service.py index b8b57eff..8efb16bc 100644 --- a/src/maisaka/chat_loop_service.py +++ b/src/maisaka/chat_loop_service.py @@ -40,6 +40,7 @@ from src.services.llm_service import LLMServiceClient from .builtin_tool import get_builtin_tools from .context_messages import AssistantMessage, LLMContextMessage, SessionBackedMessage, ToolResultMessage from .message_adapter import format_speaker_content +from .planner_message_utils import build_session_backed_text_message from .prompt_cli_renderer import PromptCLIVisualizer @@ -920,37 +921,4 @@ class MaisakaChatLoopService: if first_valid_index == 0: return selected_history - return selected_history[first_valid_index:] - - @staticmethod - def build_chat_context(user_text: str) -> List[LLMContextMessage]: - """根据用户输入构造最小对话上下文。 - - Args: - user_text: 用户输入文本。 - - Returns: - List[LLMContextMessage]: 构造好的上下文消息列表。 - """ - - timestamp = datetime.now() - visible_text = format_speaker_content( - global_config.maisaka.user_name.strip() or "用户", - user_text, - timestamp, - ) - planner_prefix = ( - f"[时间]{timestamp.strftime('%H:%M:%S')}\n" - f"[用户]{global_config.maisaka.user_name.strip() or '用户'}\n" - "[用户群昵称]\n" - "[msg_id]\n" - "[发言内容]" - ) - return [ - SessionBackedMessage( - raw_message=MessageSequence([TextComponent(f"{planner_prefix}{user_text}")]), - visible_text=visible_text, - timestamp=timestamp, - source_kind="user", - ) - ] + return selected_history[first_valid_index:] \ No newline at end of file diff --git a/src/maisaka/planner_message_utils.py b/src/maisaka/planner_message_utils.py new file mode 100644 index 00000000..ffbc6f99 --- /dev/null +++ b/src/maisaka/planner_message_utils.py @@ -0,0 +1,109 @@ +"""Maisaka 规划器消息构造工具。""" + +from datetime import datetime +from typing import Optional + +from src.chat.message_receive.message import SessionMessage +from src.common.data_models.message_component_data_model import MessageSequence, TextComponent + +from .context_messages import SessionBackedMessage +from .message_adapter import format_speaker_content + + +def build_planner_prefix( + *, + timestamp: datetime, + user_name: str, + group_card: str = "", + message_id: Optional[str] = None, + include_message_id: bool = True, +) -> str: + """构造 Maisaka 规划器使用的统一消息前缀。 + + Args: + timestamp: 消息时间。 + user_name: 展示给规划器的用户名。 + group_card: 群昵称。 + message_id: 消息 ID。 + include_message_id: 是否输出 `msg_id` 段。 + + Returns: + str: 拼接完成的规划器前缀。 + """ + + prefix_parts = [ + f"[时间]{timestamp.strftime('%H:%M:%S')}\n", + f"[用户名]{user_name}\n", + f"[用户群昵称]{group_card}\n", + ] + if include_message_id: + prefix_parts.append(f"[msg_id]{message_id or ''}\n") + prefix_parts.append("[发言内容]") + return "".join(prefix_parts) + + +def build_planner_user_prefix_from_session_message(message: SessionMessage) -> str: + """根据真实会话消息构造规划器前缀。 + + Args: + message: 原始会话消息。 + + Returns: + str: 规划器前缀字符串。 + """ + + user_info = message.message_info.user_info + user_name = user_info.user_nickname or user_info.user_id + return build_planner_prefix( + timestamp=message.timestamp, + user_name=user_name, + group_card=user_info.user_cardname or "", + message_id=message.message_id, + include_message_id=not message.is_notify and bool(message.message_id), + ) + + +def build_session_backed_text_message( + *, + speaker_name: str, + text: str, + timestamp: datetime, + source_kind: str, + group_card: str = "", + message_id: Optional[str] = None, + include_message_id: bool = True, +) -> SessionBackedMessage: + """构造带规划器前缀的纯文本历史消息。 + + Args: + speaker_name: 发言者名称。 + text: 发言内容。 + timestamp: 发言时间。 + source_kind: 上下文来源类型。 + group_card: 群昵称。 + message_id: 消息 ID。 + include_message_id: 是否输出 `msg_id` 段。 + + Returns: + SessionBackedMessage: 可直接写入历史的上下文消息。 + """ + + planner_prefix = build_planner_prefix( + timestamp=timestamp, + user_name=speaker_name, + group_card=group_card, + message_id=message_id, + include_message_id=include_message_id, + ) + return SessionBackedMessage( + raw_message=MessageSequence([TextComponent(f"{planner_prefix}{text}")]), + visible_text=format_speaker_content( + speaker_name, + text, + timestamp, + message_id if include_message_id else None, + ), + timestamp=timestamp, + message_id=message_id, + source_kind=source_kind, + ) diff --git a/src/maisaka/reasoning_engine.py b/src/maisaka/reasoning_engine.py index a263a940..1643f8c9 100644 --- a/src/maisaka/reasoning_engine.py +++ b/src/maisaka/reasoning_engine.py @@ -33,6 +33,7 @@ from .message_adapter import ( clone_message_sequence, format_speaker_content, ) +from .planner_message_utils import build_planner_user_prefix_from_session_message if TYPE_CHECKING: from .runtime import MaisakaHeartFlowChatting @@ -226,7 +227,7 @@ class MaisakaReasoningEngine: async def _build_message_sequence(self, message: SessionMessage) -> tuple[MessageSequence, str]: message_sequence = MessageSequence([]) - planner_prefix = self._build_planner_user_prefix(message) + planner_prefix = build_planner_user_prefix_from_session_message(message) appended_component = False if global_config.maisaka.direct_image_input: @@ -255,22 +256,6 @@ class MaisakaReasoningEngine: return message_sequence, legacy_visible_text - @staticmethod - def _build_planner_user_prefix(message: SessionMessage) -> str: - user_info = message.message_info.user_info - timestamp_text = message.timestamp.strftime("%H:%M:%S") - user_name = user_info.user_nickname or user_info.user_id - group_card = user_info.user_cardname or "" - prefix_parts = [ - f"[时间]{timestamp_text}\n", - f"[用户]{user_name}\n", - f"[用户群昵称]{group_card}\n", - ] - if not message.is_notify and message.message_id: - prefix_parts.append(f"[msg_id]{message.message_id}\n") - prefix_parts.append("[发言内容]") - return "".join(prefix_parts) - def _build_legacy_visible_text(self, message: SessionMessage, source_sequence: MessageSequence) -> str: user_info = message.message_info.user_info speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id