From e66b2eb662a26d5607b68bb595dd041f633c03f8 Mon Sep 17 00:00:00 2001 From: DrSmoothl <1787882683@qq.com> Date: Thu, 19 Mar 2026 18:05:10 +0800 Subject: [PATCH 1/9] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E5=AE=89?= =?UTF-8?q?=E5=85=A8=E9=9D=99=E6=80=81=E6=96=87=E4=BB=B6=E8=B7=AF=E5=BE=84?= =?UTF-8?q?=E8=A7=A3=E6=9E=90=E6=B5=8B=E8=AF=95=EF=BC=8C=E5=A2=9E=E5=BC=BA?= =?UTF-8?q?=E8=B7=AF=E5=BE=84=E9=81=8D=E5=8E=86=E4=BF=9D=E6=8A=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pytests/webui/test_app.py | 51 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/pytests/webui/test_app.py b/pytests/webui/test_app.py index eb37305e..48bfaaf4 100644 --- a/pytests/webui/test_app.py +++ b/pytests/webui/test_app.py @@ -1,5 +1,7 @@ from unittest.mock import patch +import pytest + from src.webui import app as webui_app @@ -120,3 +122,52 @@ def test_resolve_static_path_uses_dashboard_dist(monkeypatch, tmp_path) -> None: resolved_path = webui_app._resolve_static_path() assert resolved_path == dashboard_dist + + +def test_resolve_safe_static_file_path_allows_regular_static_file(tmp_path) -> None: + static_path = tmp_path / "dist" + asset_path = static_path / "assets" / "app.js" + asset_path.parent.mkdir(parents=True) + asset_path.write_text("console.log('ok')", encoding="utf-8") + + resolved_path = webui_app._resolve_safe_static_file_path(static_path, "assets/app.js") + + assert resolved_path == asset_path.resolve() + + +def test_resolve_safe_static_file_path_rejects_relative_path_traversal(tmp_path) -> None: + static_path = tmp_path / "dist" + static_path.mkdir() + + resolved_path = webui_app._resolve_safe_static_file_path(static_path, "../secret.txt") + + assert resolved_path is None + + +def test_resolve_safe_static_file_path_rejects_absolute_path_traversal(tmp_path) -> None: + static_path = tmp_path / "dist" + static_path.mkdir() + + resolved_path = webui_app._resolve_safe_static_file_path(static_path, "/etc/passwd") + + assert resolved_path is None + + +def test_resolve_safe_static_file_path_rejects_symlink_escape(tmp_path) -> None: + static_path = tmp_path / "dist" + static_path.mkdir() + + outside_dir = tmp_path / "outside" + outside_dir.mkdir() + outside_file = outside_dir / "secret.txt" + outside_file.write_text("secret", encoding="utf-8") + + link_path = static_path / "escape" + try: + link_path.symlink_to(outside_dir, target_is_directory=True) + except OSError as exc: + pytest.skip(f"symlink is not supported in this environment: {exc}") + + resolved_path = webui_app._resolve_safe_static_file_path(static_path, "escape/secret.txt") + + assert resolved_path is None From 0ecb201ad5c4c5fab112bbdeb2353e5c21ef6d6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=B4=E7=8C=AB?= Date: Sat, 21 Mar 2026 12:13:01 +0900 Subject: [PATCH 2/9] document link and update --- AGENTS.md | 4 ++++ CLAUDE.md | 1 + 2 files changed, 5 insertions(+) create mode 120000 CLAUDE.md diff --git a/AGENTS.md b/AGENTS.md index 4fac1284..b4caaaf1 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -31,3 +31,7 @@ 1. 应该尽量减少使用getattr和setattr方法,除非是在对一个动态类进行处理或者使用Monkeypatch完成Pytest 2. 在重构代码时,如果遇到getattr和setattr,应该尝试检查这个类实例是否有这个属性,如果有,则直接替换为类属性访问写法。 - 举例:`v = getattr(instance, "value", "")` 在检查到`instance`有`value`属性后应该改为`v = instance.value` + +# 运行/调试/构建/测试/依赖 +优先使用uv +依赖项以 pyproject.toml 为准 diff --git a/CLAUDE.md b/CLAUDE.md new file mode 120000 index 00000000..47dc3e3d --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +AGENTS.md \ No newline at end of file From 6a2dc6fcebe784a10f610f48b89982e1366bf638 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=B4=E7=8C=AB?= Date: Sat, 21 Mar 2026 12:15:12 +0900 Subject: [PATCH 3/9] =?UTF-8?q?fix:=20=E4=BF=AE=E6=AD=A3=20.gitignore=20?= =?UTF-8?q?=E4=B8=AD=20CLAUDE.md=20=E7=9A=84=E6=B3=A8=E9=87=8A=E6=A0=BC?= =?UTF-8?q?=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index ace02c8b..9421c275 100644 --- a/.gitignore +++ b/.gitignore @@ -46,7 +46,7 @@ config/lpmm_config.toml config/lpmm_config.toml.bak template/compare/bot_config_template.toml template/compare/model_config_template.toml -CLAUDE.md +# CLAUDE.md cloudflare-workers/ log_viewer/ dev/ From 84551460e663fc62ff41b9d901d19043c21b9227 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=B4=E7=8C=AB?= Date: Sat, 21 Mar 2026 12:22:58 +0900 Subject: [PATCH 4/9] =?UTF-8?q?fix:=20=E9=99=90=E5=88=B6=20maibot-plugin-s?= =?UTF-8?q?dk=20=E7=89=88=E6=9C=AC=E8=8C=83=E5=9B=B4=EF=BC=8C=E9=81=BF?= =?UTF-8?q?=E5=85=8D=E4=B8=8D=E5=85=BC=E5=AE=B9=E6=9B=B4=E6=96=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 2 +- requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f6dd6646..f41e9448 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ dependencies = [ "jieba>=0.42.1", "json-repair>=0.47.6", "maim-message>=0.6.2", - "maibot-plugin-sdk>=1.2.3", + "maibot-plugin-sdk>=1.2.3,<2.0.0", "msgpack>=1.1.2", "numpy>=2.2.6", "openai>=1.95.0", diff --git a/requirements.txt b/requirements.txt index 6a72e1e4..a1160c4a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ httpx jieba>=0.42.1 json-repair>=0.47.6 maim-message>=0.6.2 -maibot-plugin-sdk>=1.2.3 +maibot-plugin-sdk>=1.2.3,<2.0.0 msgpack>=1.1.2 numpy>=2.2.6 openai>=1.95.0 @@ -29,4 +29,4 @@ structlog>=25.4.0 tomlkit>=0.13.3 typing-extensions uvicorn>=0.35.0 -watchfiles>=1.1.1 +watchfiles>=1.1.1 \ No newline at end of file From e7ca3142e95a887c526375a08788f2a596fb21e8 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Mon, 23 Mar 2026 17:55:39 +0800 Subject: [PATCH 5/9] =?UTF-8?q?feat=EF=BC=9A=E4=BF=AE=E6=94=B9maisaka?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- prompts/zh-CN/maidairy_chat.prompt | 24 +- saka.py | 11 +- src/chat/planner_actions/action_modifier.py | 2 +- src/config/config.py | 2 +- src/config/official_configs.py | 26 +- src/maisaka/builtin_tools.py | 14 +- src/maisaka/cli.py | 44 ++- src/maisaka/config.py | 48 +--- src/maisaka/debug_client.py | 93 ------ src/maisaka/debug_viewer.py | 194 ------------- src/maisaka/input_reader.py | 70 +++-- src/maisaka/knowledge.py | 2 +- src/maisaka/llm_service.py | 158 +++++++++-- src/maisaka/mcp_client/__init__.py | 2 +- src/maisaka/mcp_client/config.py | 2 +- src/maisaka/mcp_client/connection.py | 2 +- src/maisaka/mcp_client/manager.py | 3 +- src/maisaka/replyer.py | 2 +- src/maisaka/tool_handlers.py | 297 +------------------- 代码备忘.md | 9 + 20 files changed, 257 insertions(+), 748 deletions(-) delete mode 100644 src/maisaka/debug_client.py delete mode 100644 src/maisaka/debug_viewer.py diff --git a/prompts/zh-CN/maidairy_chat.prompt b/prompts/zh-CN/maidairy_chat.prompt index a253bb58..665ca5d1 100644 --- a/prompts/zh-CN/maidairy_chat.prompt +++ b/prompts/zh-CN/maidairy_chat.prompt @@ -1,7 +1,10 @@ -你叫麦麦,是一个女大学生,你说话比较口语化 +你正在扮演一个叫麦麦的角色,你能够自然的和用户交互,你必须扮演一个人类。并使用send_message和其他工具与用户进行交互 + +你需要根据对应的情景与用户进行交互,你只能通过send_message工具与用户对话。 +如果不使用say,直接输出的内容不会发送给用户,用户无法查看 你有以下工具: -• say(reason) — 对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。 +• send_message(reason) — 对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。 直接输出的文本会被视为你的内心思考,用户无法阅读。 reason 参数描述你想要回复的方式、想法和内容,系统会根据你的想法和对话上下文生成具体的回复。 • wait(seconds) — 暂时结束你的发言,把话语权交给用户,等待对方说话。 @@ -11,14 +14,15 @@ • stop() — 结束当前对话循环,进入待机状态,直到用户下次输入新内容时再唤醒你。 {file_tools_section}• store_context(count, reason) — 将指定范围的对话上下文存入记忆系统,然后从当前对话中移除这些内容。适合在对话上下文过长、话题转换、或遇到重要内容需要保存时使用。 -思考规则: -你必须先进行内心思考,然后选择需要使用的工具,如果你想说话,必须使用say工具。 -在内心思考中分析当前对话状态和你的想法,然后通过 say 工具的 reason 参数描述你想要回复的方式、想法和内容。 -只有使用say工具,你才能向用户说话。用户才能看到你的发言。 +你需要按照以下**核心流程**决策 +1.思考是否需要直接对用户说话,如果需要,使用send_message工具,并描述你想要回复的方式、想法和内容。 +2.如果你认为使用工具能够帮助你更好的回复用户发言,请你选择合适的工具并视情况回复。 +3.思考是否需要等待或者结束对话,如果需要,使用wait或stop工具,并描述你想要等待的原因。 + 交互规则: 1. 你可以自由选择是否调用工具——如果你还想继续思考,可以不调用任何工具 -2. 想对用户说话时,必须调用 say 工具;直接输出的文本只会被视为内心独白 -3. 当你说完想说的话、想把话语权交给用户时,调用 wait 暂时结束发言,等待对方回应 -4. 当对话自然结束、用户表示不想继续聊、或连续多次等待超时用户没有回复时,调用 stop 结束对话 -5. 你可以在同一轮同时调用多个工具,例如先 say 再 wait +2. 当你说完想说的话、想把话语权交给用户时,调用 wait 暂时结束发言,等待对方回应 +3. 当对话自然结束、用户表示不想继续聊、或连续多次等待超时用户没有回复时,调用 stop 结束对话 +4. 你可以在同一轮同时调用多个工具,例如先 say 再 wait +现在根据**核心流程**输出你的思考,在思考完后选择你使用的tool: \ No newline at end of file diff --git a/saka.py b/saka.py index 9d5b06e8..162662e1 100644 --- a/saka.py +++ b/saka.py @@ -23,21 +23,24 @@ if str(_maisaka_path) not in sys.path: sys.path.insert(0, str(_maisaka_path)) from src.prompt.prompt_manager import prompt_manager -from config import console -from cli import BufferCLI +from src.maisaka.cli import BufferCLI +from src.maisaka.config import console def main(): + cli = None + # 加载所有提示词文件 prompt_manager.load_prompts() - cli = BufferCLI() try: + cli = BufferCLI() asyncio.run(cli.run()) except KeyboardInterrupt: console.print("\n[muted]程序已终止[/muted]") finally: - cli._debug_viewer.close() + if cli and hasattr(cli, "_debug_viewer"): + cli._debug_viewer.close() if __name__ == "__main__": diff --git a/src/chat/planner_actions/action_modifier.py b/src/chat/planner_actions/action_modifier.py index c7d64c1f..0d81c18f 100644 --- a/src/chat/planner_actions/action_modifier.py +++ b/src/chat/planner_actions/action_modifier.py @@ -52,7 +52,7 @@ class ActionModifier: all_actions = self.action_manager.get_using_actions() message_list_before_now_half = get_messages_before_time_in_chat( - chat_id=self.chat_stream.stream_id, + chat_id=self.chat_stream.session_id, timestamp=time.time(), limit=min(int(global_config.chat.max_context_size * 0.33), 10), filter_intercept_message_level=1, diff --git a/src/config/config.py b/src/config/config.py index a3b81d2d..ff5941bf 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -55,7 +55,7 @@ CONFIG_DIR: Path = PROJECT_ROOT / "config" BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute() MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute() MMC_VERSION: str = "1.0.0" -CONFIG_VERSION: str = "8.1.0" +CONFIG_VERSION: str = "8.1.2" MODEL_CONFIG_VERSION: str = "1.12.0" logger = get_logger("config") diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 2de01030..6ed5c452 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -1582,33 +1582,23 @@ class MaiSakaConfig(ConfigBase): ) """启用文件列表工具""" - enable_qq_tools: bool = Field( + show_analyze_cognition_prompt: bool = Field( default=False, json_schema_extra={ "x-widget": "switch", - "x-icon": "users", + "x-icon": "terminal", }, ) - """启用 QQ 工具(获取聊天记录、发送消息等)""" + """是否在 CLI 中显示 analyze_cognition 的 Prompt""" - qq_api_base_url: str = Field( - default="", + show_analyze_timing_prompt: bool = Field( + default=False, json_schema_extra={ - "x-widget": "input", - "x-icon": "server", + "x-widget": "switch", + "x-icon": "terminal", }, ) - """QQ API 基地址""" - - qq_api_key: str = Field( - default="", - json_schema_extra={ - "x-widget": "input", - "x-icon": "key", - }, - ) - """QQ API 密钥""" - + """是否在 CLI 中显示 analyze_timing 的 Prompt""" class PluginRuntimeConfig(ConfigBase): """插件运行时配置类""" diff --git a/src/maisaka/builtin_tools.py b/src/maisaka/builtin_tools.py index 0260f778..10b99152 100644 --- a/src/maisaka/builtin_tools.py +++ b/src/maisaka/builtin_tools.py @@ -4,7 +4,7 @@ MaiSaka - 内置工具定义 使用主项目的工具格式(ToolOption + ToolParamType) """ -from typing import List, Dict, Any +from typing import Any, Dict, List from src.llm_models.payload_content.tool_option import ToolOption, ToolParamType @@ -16,19 +16,19 @@ def create_builtin_tools() -> List[ToolOption]: tools = [] # say 工具 - say_builder = ToolOptionBuilder() - say_builder.set_name("say") - say_builder.set_description( + send_message_builder = ToolOptionBuilder() + send_message_builder.set_name("send_message") + send_message_builder.set_description( "对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。直接输出的文本会被视为你的内心思考,用户无法阅读。reason 参数描述你想要回复的方式、想法和内容,系统会根据你的想法和对话上下文生成具体的回复。" ) - say_builder.add_param( + send_message_builder.add_param( name="reason", param_type=ToolParamType.STRING, description="描述你想要回复的方式、想法和内容。例如:'同意对方的看法,并分享自己的经历' 或 '礼貌地拒绝,表示现在不方便聊天'", required=True, enum_values=None, ) - tools.append(say_builder.build()) + tools.append(send_message_builder.build()) # wait 工具 wait_builder = ToolOptionBuilder() @@ -83,7 +83,7 @@ def builtin_tools_as_dicts() -> List[Dict[str, Any]]: """将内置工具转换为 dict 格式(用于调试)""" return [ { - "name": "say", + "name": "send_message", "description": "对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。", "parameters": { "type": "object", diff --git a/src/maisaka/cli.py b/src/maisaka/cli.py index e4dc0c0f..e10620c1 100644 --- a/src/maisaka/cli.py +++ b/src/maisaka/cli.py @@ -13,7 +13,7 @@ from rich.markdown import Markdown from rich.text import Text from rich import box -from config import ( +from .config import ( console, ENABLE_EMOTION_MODULE, ENABLE_COGNITION_MODULE, @@ -21,26 +21,23 @@ from config import ( ENABLE_KNOWLEDGE_MODULE, ENABLE_MCP, ) -from input_reader import InputReader -from timing import build_timing_info -from knowledge import store_knowledge_from_context, retrieve_relevant_knowledge -from knowledge_store import get_knowledge_store -from llm_service import MaiSakaLLMService, build_message, remove_last_perception -from mcp_client import MCPManager -from tool_handlers import ( +from .input_reader import InputReader +from .knowledge import retrieve_relevant_knowledge, store_knowledge_from_context +from .knowledge_store import get_knowledge_store +from .llm_service import MaiSakaLLMService, build_message, remove_last_perception +from .mcp_client import MCPManager +from .timing import build_timing_info +from .tool_handlers import ( ToolHandlerContext, - handle_say, + handle_list_files, + handle_mcp_tool, + handle_read_file, + handle_send_message, + handle_store_context, handle_stop, + handle_unknown_tool, handle_wait, handle_write_file, - handle_read_file, - handle_list_files, - handle_store_context, - handle_mcp_tool, - handle_unknown_tool, - handle_get_qq_chat_info, - handle_send_info, - handle_list_qq_chats, ) @@ -487,8 +484,8 @@ class BufferCLI: ctx = self._build_tool_context() for tc in response.tool_calls: - if tc.name == "say": - await handle_say(tc, chat_history, ctx) + if tc.name in {"send_message", "say"}: + await handle_send_message(tc, chat_history, ctx) elif tc.name == "stop": await handle_stop(tc, chat_history) @@ -514,15 +511,6 @@ class BufferCLI: elif tc.name == "store_context": await handle_store_context(tc, chat_history, ctx) - elif tc.name == "get_qq_chat_info": - await handle_get_qq_chat_info(tc, chat_history) - - elif tc.name == "send_info": - await handle_send_info(tc, chat_history) - - elif tc.name == "list_qq_chats": - await handle_list_qq_chats(tc, chat_history) - elif self._mcp_manager and self._mcp_manager.is_mcp_tool(tc.name): await handle_mcp_tool(tc, chat_history, self._mcp_manager) diff --git a/src/maisaka/config.py b/src/maisaka/config.py index c9c95d8a..c34247f6 100644 --- a/src/maisaka/config.py +++ b/src/maisaka/config.py @@ -3,51 +3,31 @@ MaiSaka - 全局配置 从主项目配置系统读取配置、Rich Console 实例、主题定义。 """ -import sys from pathlib import Path +import sys + from rich.console import Console from rich.theme import Theme +from src.config.config import global_config + # 添加项目根目录到路径以导入主配置 _root = Path(__file__).parent.parent.parent.absolute() if str(_root) not in sys.path: sys.path.insert(0, str(_root)) -# ──────────────────── 从主配置读取 ──────────────────── - - -def _get_maisaka_config(): - """获取 MaiSaka 配置""" - try: - from src.config.config import config_manager - - return config_manager.config.maisaka - except Exception: - # 如果配置加载失败,返回默认值 - from src.config.official_configs import MaiSakaConfig - - return MaiSakaConfig() - - -_maisaka_config = _get_maisaka_config() - # ──────────────────── 模块开关配置 ──────────────────── +ENABLE_EMOTION_MODULE = global_config.maisaka.enable_emotion_module +ENABLE_COGNITION_MODULE = global_config.maisaka.enable_cognition_module +ENABLE_TIMING_MODULE = global_config.maisaka.enable_timing_module +ENABLE_KNOWLEDGE_MODULE = global_config.maisaka.enable_knowledge_module +ENABLE_MCP = global_config.maisaka.enable_mcp +ENABLE_WRITE_FILE = global_config.maisaka.enable_write_file +ENABLE_READ_FILE = global_config.maisaka.enable_read_file +ENABLE_LIST_FILES = global_config.maisaka.enable_list_files +SHOW_ANALYZE_COGNITION_PROMPT = global_config.maisaka.show_analyze_cognition_prompt +SHOW_ANALYZE_TIMING_PROMPT = global_config.maisaka.show_analyze_timing_prompt -ENABLE_EMOTION_MODULE = _maisaka_config.enable_emotion_module -ENABLE_COGNITION_MODULE = _maisaka_config.enable_cognition_module -# Timing 模块已包含自我反思功能 -ENABLE_TIMING_MODULE = _maisaka_config.enable_timing_module -ENABLE_KNOWLEDGE_MODULE = _maisaka_config.enable_knowledge_module -ENABLE_MCP = _maisaka_config.enable_mcp -ENABLE_WRITE_FILE = _maisaka_config.enable_write_file -ENABLE_READ_FILE = _maisaka_config.enable_read_file -ENABLE_LIST_FILES = _maisaka_config.enable_list_files - -# ──────────────────── QQ 工具配置 ──────────────────── - -ENABLE_QQ_TOOLS = _maisaka_config.enable_qq_tools -QQ_API_BASE_URL = _maisaka_config.qq_api_base_url -QQ_API_KEY = _maisaka_config.qq_api_key # ──────────────────── Rich 主题 & Console ──────────────────── diff --git a/src/maisaka/debug_client.py b/src/maisaka/debug_client.py deleted file mode 100644 index 1488de6b..00000000 --- a/src/maisaka/debug_client.py +++ /dev/null @@ -1,93 +0,0 @@ -""" -MaiSaka - Debug Viewer 客户端 -在独立命令行窗口中显示每次 LLM 调用的完整 Prompt。 -通过 TCP socket 将数据发送给 debug_viewer.py 子进程。 -""" - -import json -import os -import socket -import struct -import subprocess -import sys -import time -from typing import Optional - -from config import console - - -class DebugViewer: - """ - 在独立命令行窗口中显示每次 LLM 调用的完整 Prompt。 - - 通过 TCP socket 将数据发送给 debug_viewer.py 子进程。 - """ - - def __init__(self, port: int = 19876): - self._port = port - self._conn: Optional[socket.socket] = None - self._process: Optional[subprocess.Popen] = None - - def start(self): - """启动调试窗口子进程并建立 TCP 连接。""" - script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "debug_viewer.py") - - try: - self._process = subprocess.Popen( - [sys.executable, script_path, str(self._port)], - creationflags=getattr(subprocess, "CREATE_NEW_CONSOLE", 0), - ) - except Exception as e: - console.print(f"[warning]⚠️ 无法启动调试窗口: {e}[/warning]") - return - - # 重试连接(等待子进程启动监听) - for attempt in range(20): - try: - time.sleep(0.3) - conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - conn.connect(("127.0.0.1", self._port)) - self._conn = conn - console.print(f"[success]✓ 调试窗口已启动[/success] [muted](port {self._port})[/muted]") - return - except ConnectionRefusedError: - conn.close() - - console.print("[warning]⚠️ 无法连接到调试窗口(超时)[/warning]") - - def send(self, label: str, messages: list, tools: Optional[list] = None, response: Optional[dict] = None): - """发送一次 LLM 调用的完整 prompt 和响应到调试窗口。""" - if not self._conn: - return - - # 只在有响应时才发送(避免显示两次:请求中 + 完成响应) - if response is None: - return - - payload = {"label": label, "messages": messages} - if tools: - payload["tools"] = tools - payload["response"] = response - - try: - data = json.dumps(payload, ensure_ascii=False).encode("utf-8") - header = struct.pack(">I", len(data)) - self._conn.sendall(header + data) - except Exception: - # 连接断开时静默忽略 - self._conn = None - - def close(self): - """关闭连接和子进程。""" - if self._conn: - try: - self._conn.close() - except Exception: - pass - self._conn = None - if self._process: - try: - self._process.terminate() - except Exception: - pass - self._process = None diff --git a/src/maisaka/debug_viewer.py b/src/maisaka/debug_viewer.py deleted file mode 100644 index 0a11f84f..00000000 --- a/src/maisaka/debug_viewer.py +++ /dev/null @@ -1,194 +0,0 @@ -""" -MaiSaka Debug Viewer — 在独立命令行窗口中显示每次 LLM 调用的完整 Prompt。 - -由主进程自动启动,通过 TCP socket 接收数据。 -""" - -import socket -import struct -import json -import sys - -from rich.console import Console -from rich.panel import Panel -from rich import box - -console = Console() - -ROLE_STYLES = { - "system": ("📋", "bold blue"), - "user": ("👤", "bold green"), - "assistant": ("🤖", "bold magenta"), - "tool": ("🔧", "bold yellow"), -} - - -def recv_exact(conn: socket.socket, n: int) -> bytes | None: - """精确接收 n 字节数据。""" - data = b"" - while len(data) < n: - chunk = conn.recv(n - len(data)) - if not chunk: - return None - data += chunk - return data - - -def format_message(idx: int, msg: dict) -> str: - """格式化单条消息用于终端展示。""" - try: - role = str(msg.get("role", "?")) if msg.get("role") else "?" - content = str(msg.get("content", "")) if msg.get("content") else "" - tool_calls = msg.get("tool_calls", []) or [] - tool_call_id = str(msg.get("tool_call_id", "")) if msg.get("tool_call_id") else "" - - icon, style = ROLE_STYLES.get(role, ("❓", "white")) - - parts: list[str] = [] - - # 消息头 - header = f"[{style}]{icon} [{idx}] {role}[/{style}]" - if tool_call_id: - header += f" [dim](tool_call_id: {tool_call_id})[/dim]" - parts.append(header) - - # 正文 - if content: - display = ( - content - if len(content) <= 3000 - else (content[:3000] + f"\n[dim]... (截断, 共 {len(content)} 字符)[/dim]") - ) - parts.append(display) - - # 工具调用 - if isinstance(tool_calls, list): - for tc in tool_calls: - if not isinstance(tc, dict): - continue - func = tc.get("function", {}) - if not isinstance(func, dict): - continue - name = func.get("name", "?") - args = func.get("arguments", "") - if isinstance(args, str) and len(args) > 500: - args = args[:500] + "..." - parts.append(f" [yellow]→ tool_call: {name}({args})[/yellow]") - - return "\n".join(parts) - except Exception: - return f"[red]消息 [{idx}] 格式化错误[/red]" - - -def main(): - port = int(sys.argv[1]) if len(sys.argv) > 1 else 19876 - - server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - server.bind(("127.0.0.1", port)) - server.listen(1) - - console.print( - Panel( - f"[bold cyan]MaiSaka Debug Viewer[/bold cyan]\n[dim]监听端口: {port} 等待主进程连接...[/dim]", - box=box.DOUBLE_EDGE, - border_style="cyan", - ) - ) - - conn, _ = server.accept() - console.print("[green]✓ 已连接到主进程[/green]\n") - - call_count = 0 - try: - while True: - # 读 4 字节长度前缀 - length_bytes = recv_exact(conn, 4) - if not length_bytes: - break - - length = struct.unpack(">I", length_bytes)[0] - - # 读取 payload - payload_bytes = recv_exact(conn, length) - if not payload_bytes: - break - - call_count += 1 - - try: - payload = json.loads(payload_bytes.decode("utf-8")) - except json.JSONDecodeError as e: - console.print(f"\n[red]JSON 解析错误: {e}[/red]") - console.print(f"[dim]原始数据: {payload_bytes[:200]}...[/dim]") - continue - - try: - label = payload.get("label", "LLM Call") - messages = payload.get("messages", []) - tools = payload.get("tools") - response = payload.get("response") - - # ── 标题栏 ── - console.print(f"\n{'═' * 90}") - console.print( - f"[bold yellow]#{call_count} {label}[/bold yellow] [dim]({len(messages)} messages)[/dim]" - ) - console.print(f"{'═' * 90}") - - # ── 逐条消息 ── - for i, msg in enumerate(messages): - console.print(format_message(i, msg)) - if i < len(messages) - 1: - console.print("[dim]─ ─ ─[/dim]") - - # ── tools 信息 ── - if tools: - tool_names = [t.get("function", {}).get("name", "?") for t in tools] - console.print(f"\n[dim]可用工具: {', '.join(tool_names)}[/dim]") - except Exception as e: - console.print(f"\n[red]数据处理错误: {e}[/red]") - console.print(f"[dim]Payload: {payload}[/dim]") - continue - - # ── 响应结果 ── - if response: - try: - console.print("\n[bold cyan]📤 LLM 响应:[/bold cyan]") - resp_content = response.get("content", "") - if resp_content: - display = ( - resp_content - if len(str(resp_content)) <= 3000 - else ( - str(resp_content)[:3000] + f"\n[dim]... (截断, 共 {len(str(resp_content))} 字符)[/dim]" - ) - ) - console.print(Panel(display, border_style="cyan", padding=(0, 1))) - resp_tool_calls = response.get("tool_calls", []) - if resp_tool_calls: - for tc in resp_tool_calls: - func = tc.get("function", {}) - name = func.get("name", "?") - args = func.get("arguments", "") - if isinstance(args, str) and len(args) > 300: - args = args[:300] + "..." - console.print(f" [cyan]→ tool_call: {name}({args})[/cyan]") - except Exception as e: - console.print(f"\n[red]响应解析错误: {e}[/red]") - console.print(f"[dim]原始数据: {response}[/dim]") - - console.print(f"[dim]{'─' * 90}[/dim]") - - except (ConnectionResetError, ConnectionAbortedError): - pass - finally: - conn.close() - server.close() - - console.print("\n[red]连接已断开[/red]") - input("按 Enter 关闭窗口...") - - -if __name__ == "__main__": - main() diff --git a/src/maisaka/input_reader.py b/src/maisaka/input_reader.py index 70b1fc07..eff2525c 100644 --- a/src/maisaka/input_reader.py +++ b/src/maisaka/input_reader.py @@ -1,62 +1,56 @@ """ MaiSaka - 异步输入读取器 -基于后台线程的异步标准输入读取,通过 asyncio.Queue 传递给异步代码。 +将阻塞的标准输入读取放到后台线程中,供 asyncio 循环安全消费。 """ -import sys import asyncio +import sys import threading from typing import Optional class InputReader: - """ - 基于后台线程的异步标准输入读取器。 + """后台读取标准输入,并通过 asyncio.Queue 向主循环投递结果。""" - 使用单一守护线程持续读取 stdin,通过 asyncio.Queue 传递给异步代码。 - 保证整个应用只有一个线程读 stdin,避免多线程竞争。 - 支持带超时的读取,用于 LLM wait 工具。 - """ - - def __init__(self): - self._queue: asyncio.Queue = asyncio.Queue() + def __init__(self) -> None: self._loop: Optional[asyncio.AbstractEventLoop] = None + self._queue: asyncio.Queue[Optional[str]] = asyncio.Queue() self._thread: Optional[threading.Thread] = None + self._stop_event = threading.Event() - def start(self, loop: asyncio.AbstractEventLoop): - """启动后台读取线程(仅首次调用生效)""" - if self._thread is not None: + def start(self, loop: asyncio.AbstractEventLoop) -> None: + """启动后台输入线程。重复调用时忽略。""" + if self._thread and self._thread.is_alive(): return + self._loop = loop - self._thread = threading.Thread(target=self._read_loop, daemon=True) + self._stop_event.clear() + self._thread = threading.Thread(target=self._read_loop, name="maisaka-input-reader", daemon=True) self._thread.start() - def _read_loop(self): - """后台线程:持续从 stdin 读取行""" - try: - while True: - line = sys.stdin.readline() - if not line: # EOF - self._loop.call_soon_threadsafe(self._queue.put_nowait, None) - break - stripped = line.rstrip("\n").rstrip("\r") - self._loop.call_soon_threadsafe(self._queue.put_nowait, stripped) - except Exception: - pass + def _read_loop(self) -> None: + """在后台线程中阻塞读取 stdin。""" + while not self._stop_event.is_set(): + line = sys.stdin.readline() + if self._loop is None: + return - async def get_line(self, timeout: Optional[float] = None) -> Optional[str]: - """ - 异步获取下一行输入。 + if line == "": + self._loop.call_soon_threadsafe(self._queue.put_nowait, None) + return - Args: - timeout: 超时秒数,None 表示无限等待 + self._loop.call_soon_threadsafe(self._queue.put_nowait, line.rstrip("\r\n")) - Returns: - 输入的字符串,超时或 EOF 返回 None - """ - try: - if timeout is not None: - return await asyncio.wait_for(self._queue.get(), timeout=timeout) + async def get_line(self, timeout: Optional[int] = None) -> Optional[str]: + """异步获取一行输入;设置 timeout 时支持超时返回。""" + if timeout is None: return await self._queue.get() + + try: + return await asyncio.wait_for(self._queue.get(), timeout=timeout) except asyncio.TimeoutError: return None + + def close(self) -> None: + """请求后台线程停止。""" + self._stop_event.set() diff --git a/src/maisaka/knowledge.py b/src/maisaka/knowledge.py index 0352fd14..760dafbd 100644 --- a/src/maisaka/knowledge.py +++ b/src/maisaka/knowledge.py @@ -4,7 +4,7 @@ MaiSaka - 了解模块 """ from typing import List -from knowledge_store import get_knowledge_store, KNOWLEDGE_CATEGORIES +from .knowledge_store import KNOWLEDGE_CATEGORIES, get_knowledge_store def build_knowledge_summary() -> str: diff --git a/src/maisaka/llm_service.py b/src/maisaka/llm_service.py index 60a28f07..5dfdfe48 100644 --- a/src/maisaka/llm_service.py +++ b/src/maisaka/llm_service.py @@ -3,19 +3,24 @@ MaiSaka LLM 服务 - 使用主项目 LLM 系统 将主项目的 LLMRequest 适配为 MaiSaka 需要的接口 """ -import json from dataclasses import dataclass -from typing import List, Optional, Literal +from typing import Any, List, Literal, Optional +import json + +from rich.console import Group +from rich.panel import Panel +from rich.pretty import Pretty +from rich.text import Text from src.common.logger import get_logger from src.config.config import config_manager -from src.llm_models.utils_model import LLMRequest -from src.prompt.prompt_manager import prompt_manager from src.llm_models.payload_content.message import MessageBuilder, RoleType from src.llm_models.payload_content.tool_option import ToolCall as ToolCallOption, ToolOption -from builtin_tools import get_builtin_tools - -import config +from src.llm_models.utils_model import LLMRequest +from src.prompt.prompt_manager import prompt_manager +from . import config +from .config import console +from .builtin_tools import get_builtin_tools logger = get_logger("maisaka_llm") @@ -123,10 +128,6 @@ class MaiSakaLLMService: tools_section += "\n• read_file(filename) — 读取 mai_files 目录下的文件内容。" if config.ENABLE_LIST_FILES: tools_section += "\n• list_files() — 获取 mai_files 目录下所有文件的元信息列表。" - if config.ENABLE_QQ_TOOLS: - tools_section += "\n• get_qq_chat_info(chat, limit) — 获取指定 QQ 聊天的聊天记录。" - tools_section += "\n• send_info(chat, message) — 发送消息到指定的 QQ 聊天。" - tools_section += "\n• list_qq_chats() — 获取所有可用的 QQ 聊天列表。" chat_prompt.add_context("file_tools_section", tools_section if tools_section else "") import asyncio @@ -203,6 +204,108 @@ class MaiSakaLLMService: """设置额外的工具定义(如 MCP 工具)""" self._extra_tools = list(tools) + @staticmethod + def _get_role_badge_style(role: str) -> str: + """为不同 role 返回不同的标签样式。""" + if role == "system": + return "bold white on blue" + if role == "user": + return "bold black on green" + if role == "assistant": + return "bold black on yellow" + if role == "tool": + return "bold white on magenta" + return "bold white on bright_black" + + @staticmethod + def _render_message_content(content: Any) -> object: + """把消息内容转成适合 Rich 输出的 renderable。""" + if isinstance(content, str): + return Text(content) + + if isinstance(content, list): + parts: list[object] = [] + for item in content: + if isinstance(item, dict) and item.get("type") == "text" and isinstance(item.get("text"), str): + parts.append(Text(item["text"])) + else: + parts.append(Pretty(item, expand_all=True)) + return Group(*parts) if parts else Text("") + + if content is None: + return Text("") + + return Pretty(content, expand_all=True) + + @staticmethod + def _format_tool_call_for_display(tool_call: Any) -> dict[str, Any]: + """将 tool call 转成适合 CLI 展示的结构。""" + if isinstance(tool_call, dict): + function_info = tool_call.get("function", {}) + return { + "id": tool_call.get("id"), + "name": function_info.get("name", tool_call.get("name")), + "arguments": function_info.get("arguments", tool_call.get("arguments")), + } + + return { + "id": getattr(tool_call, "call_id", getattr(tool_call, "id", None)), + "name": getattr(tool_call, "func_name", getattr(tool_call, "name", None)), + "arguments": getattr(tool_call, "args", getattr(tool_call, "arguments", None)), + } + + def _render_message_panel(self, message: Any, index: int) -> Panel: + """渲染主循环 prompt 中的一条消息。""" + if isinstance(message, dict): + raw_role = message.get("role", "unknown") + content = message.get("content") + tool_calls = message.get("tool_calls") + tool_call_id = message.get("tool_call_id") + else: + raw_role = getattr(message, "role", "unknown") + content = getattr(message, "content", None) + tool_calls = getattr(message, "tool_calls", None) + tool_call_id = getattr(message, "tool_call_id", None) + + role = raw_role.value if hasattr(raw_role, "value") else str(raw_role) + title = Text.assemble( + Text(f" {role.upper()} ", style=self._get_role_badge_style(role)), + Text(f" #{index}", style="muted"), + ) + + parts: list[object] = [] + if content not in (None, "", []): + parts.append(Text(" message ", style="bold cyan")) + parts.append(self._render_message_content(content)) + + if tool_calls: + parts.append(Text(" tool_calls ", style="bold magenta")) + parts.append( + Pretty( + [self._format_tool_call_for_display(tool_call) for tool_call in tool_calls], + expand_all=True, + ) + ) + + if tool_call_id: + parts.append( + Text.assemble( + Text(" tool_call_id ", style="bold magenta"), + Text(" "), + Text(str(tool_call_id), style="magenta"), + ) + ) + + if not parts: + parts.append(Text("[empty message]", style="muted")) + + return Panel( + Group(*parts), + title=title, + border_style="dim", + padding=(0, 1), + ) + @staticmethod def _tool_option_to_dict(tool: "ToolOption") -> dict: """将 ToolOption 对象转换为主项目期望的 dict 格式 @@ -287,11 +390,14 @@ class MaiSakaLLMService: # 打印消息列表 built_messages = message_factory(None) - print("\n" + "=" * 60) - print("MaiSaka LLM Request - chat_loop_step:") - for msg in built_messages: - print(f" {msg}") - print("=" * 60 + "\n") + console.print( + Panel( + Group(*[self._render_message_panel(msg, index + 1) for index, msg in enumerate(built_messages)]), + title="MaiSaka LLM Request - chat_loop_step", + border_style="cyan", + padding=(0, 1), + ) + ) response, (reasoning, model, tool_calls) = await self._llm_chat.generate_response_with_message_async( message_factory=message_factory, @@ -423,10 +529,11 @@ class MaiSakaLLMService: prompt = "\n".join(prompt_parts) - print("\n" + "=" * 60) - print("MaiSaka LLM Request - analyze_cognition:") - print(f" {prompt}") - print("=" * 60 + "\n") + if config.SHOW_ANALYZE_COGNITION_PROMPT: + print("\n" + "=" * 60) + print("MaiSaka LLM Request - analyze_cognition:") + print(f" {prompt}") + print("=" * 60 + "\n") try: response, _ = await self._llm_utils.generate_response_async( @@ -458,10 +565,11 @@ class MaiSakaLLMService: prompt = "\n".join(prompt_parts) - print("\n" + "=" * 60) - print("MaiSaka LLM Request - analyze_timing:") - print(f" {prompt}") - print("=" * 60 + "\n") + if config.SHOW_ANALYZE_TIMING_PROMPT: + print("\n" + "=" * 60) + print("MaiSaka LLM Request - analyze_timing:") + print(f" {prompt}") + print("=" * 60 + "\n") try: response, _ = await self._llm_utils.generate_response_async( @@ -518,7 +626,7 @@ class MaiSakaLLMService: 可供 Replyer 类直接调用 """ from datetime import datetime - from replyer import format_chat_history + from .replyer import format_chat_history current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") diff --git a/src/maisaka/mcp_client/__init__.py b/src/maisaka/mcp_client/__init__.py index e1f6d7e8..bd996975 100644 --- a/src/maisaka/mcp_client/__init__.py +++ b/src/maisaka/mcp_client/__init__.py @@ -4,7 +4,7 @@ MaiSaka - MCP (Model Context Protocol) 客户端包 提供 MCPManager 用于管理 MCP 服务器连接、发现工具、调用工具。 用法: - from mcp_client import MCPManager + from .manager import MCPManager manager = await MCPManager.from_config("mcp_config.json") if manager: diff --git a/src/maisaka/mcp_client/config.py b/src/maisaka/mcp_client/config.py index 5803d557..742d3218 100644 --- a/src/maisaka/mcp_client/config.py +++ b/src/maisaka/mcp_client/config.py @@ -26,7 +26,7 @@ import os from dataclasses import dataclass, field from typing import Optional -from config import console +from ..config import console @dataclass diff --git a/src/maisaka/mcp_client/connection.py b/src/maisaka/mcp_client/connection.py index d7d92df7..9f489402 100644 --- a/src/maisaka/mcp_client/connection.py +++ b/src/maisaka/mcp_client/connection.py @@ -6,7 +6,7 @@ MaiSaka - 单个 MCP 服务器连接管理 from contextlib import AsyncExitStack from typing import Any, Optional -from config import console +from ..config import console from .config import MCPServerConfig # ──────────────────── MCP SDK 可选导入 ──────────────────── diff --git a/src/maisaka/mcp_client/manager.py b/src/maisaka/mcp_client/manager.py index ba46c707..9c43c666 100644 --- a/src/maisaka/mcp_client/manager.py +++ b/src/maisaka/mcp_client/manager.py @@ -5,7 +5,7 @@ MaiSaka - MCP 管理器 from typing import Optional -from config import console +from ..config import console from .config import MCPServerConfig, load_mcp_config from .connection import MCPConnection, MCP_AVAILABLE @@ -13,6 +13,7 @@ from .connection import MCPConnection, MCP_AVAILABLE BUILTIN_TOOL_NAMES = frozenset( { "say", + "send_message", "wait", "stop", "create_table", diff --git a/src/maisaka/replyer.py b/src/maisaka/replyer.py index 2546ae82..eea23a6d 100644 --- a/src/maisaka/replyer.py +++ b/src/maisaka/replyer.py @@ -4,7 +4,7 @@ MaiSaka - Reply 回复生成器 """ from typing import Optional -from llm_service import MaiSakaLLMService +from .llm_service import MaiSakaLLMService def format_chat_history(messages: list) -> str: diff --git a/src/maisaka/tool_handlers.py b/src/maisaka/tool_handlers.py index 210bfc97..5464f6bb 100644 --- a/src/maisaka/tool_handlers.py +++ b/src/maisaka/tool_handlers.py @@ -1,30 +1,24 @@ """ MaiSaka - 工具调用处理器 -处理 LLM 循环中各工具(say/wait/stop/file/MCP/QQ)的执行逻辑。 +处理 LLM 循环中各工具(say/wait/stop/file/MCP)的执行逻辑。 """ import json as _json import os from datetime import datetime -from typing import TYPE_CHECKING, Optional from pathlib import Path -import importlib.util - -# 检查 aiohttp 是否可用 -AIOHTTP_AVAILABLE = importlib.util.find_spec("aiohttp") is not None -if AIOHTTP_AVAILABLE: - import aiohttp +from typing import TYPE_CHECKING, Optional from rich.panel import Panel from rich.markdown import Markdown -from config import console -from input_reader import InputReader -from llm_service import MaiSakaLLMService -from replyer import Replyer +from .config import console +from .input_reader import InputReader +from .llm_service import MaiSakaLLMService +from .replyer import Replyer if TYPE_CHECKING: - from mcp_client import MCPManager + from .mcp_client import MCPManager # mai_files 目录路径 @@ -59,7 +53,7 @@ class ToolHandlerContext: self.last_user_input_time: Optional[datetime] = None -async def handle_say(tc, chat_history: list, ctx: ToolHandlerContext): +async def handle_send_message(tc, chat_history: list, ctx: ToolHandlerContext): """处理 say 工具:根据想法和上下文生成回复后展示给用户。""" reason = tc.arguments.get("reason", "") console.print("[accent]🔧 调用工具: say(...)[/accent]") @@ -554,281 +548,6 @@ async def handle_store_context(tc, chat_history: list, ctx: ToolHandlerContext): "content": result_msg, } ) - - -async def handle_get_qq_chat_info(tc, chat_history: list): - """处理 get_qq_chat_info 工具:通过 HTTP 获取 QQ 聊天内容。""" - chat = tc.arguments.get("chat", "") - limit = tc.arguments.get("limit", 20) - console.print(f'[accent]🔧 调用工具: get_qq_chat_info("{chat}", limit={limit})[/accent]') - - if not AIOHTTP_AVAILABLE: - error_msg = "aiohttp 模块未安装,请运行: pip install aiohttp" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - return - - from config import QQ_API_BASE_URL, QQ_API_KEY - - if not QQ_API_BASE_URL: - error_msg = "QQ_API_BASE_URL 未配置,请在 .env 中设置" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - return - - try: - # 构建 API 端点 - url = f"{QQ_API_BASE_URL.rstrip('/')}/api/external/chat/history" - - # 构建请求头(如果配置了 API Key) - headers = {} - if QQ_API_KEY: - headers["Authorization"] = f"Bearer {QQ_API_KEY}" - - # 发送 HTTP 请求 - async with aiohttp.ClientSession() as session: - params = {"chat": chat, "limit": limit} - async with session.get(url, params=params, headers=headers) as response: - if response.status == 200: - # 获取纯文本响应 - text = await response.text() - - # 格式化显示 - console.print( - Panel( - f"聊天标识: {chat}\n获取数量: {limit}\n\n{text if text.strip() else '暂无聊天记录'}", - title="💬 QQ 聊天记录", - border_style="cyan", - padding=(0, 1), - ) - ) - - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": text if text.strip() else "暂无聊天记录", - } - ) - else: - error_text = await response.text() - error_msg = f"HTTP 请求失败 (状态码 {response.status}): {error_text}" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - except Exception as e: - error_msg = f"获取 QQ 聊天记录失败: {e}" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - - -async def handle_send_info(tc, chat_history: list): - """处理 send_info 工具:通过 HTTP 发送消息到 QQ。""" - chat = tc.arguments.get("chat", "") - message = tc.arguments.get("message", "") - console.print(f'[accent]🔧 调用工具: send_info("{chat}")[/accent]') - - if not AIOHTTP_AVAILABLE: - error_msg = "aiohttp 模块未安装,请运行: pip install aiohttp" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - return - - from config import QQ_API_BASE_URL, QQ_API_KEY - - if not QQ_API_BASE_URL: - error_msg = "QQ_API_BASE_URL 未配置,请在 .env 中设置" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - return - - try: - # 构建 API 端点 - url = f"{QQ_API_BASE_URL.rstrip('/')}/api/external/chat/send" - - # 构建请求头(如果配置了 API Key) - headers = {} - if QQ_API_KEY: - headers["Authorization"] = f"Bearer {QQ_API_KEY}" - - # 发送 HTTP 请求 - async with aiohttp.ClientSession() as session: - payload = {"chat": chat, "message": message} - async with session.post(url, json=payload, headers=headers) as response: - data = await response.json() - - if response.status == 200 and data.get("success"): - # 格式化显示 - console.print( - Panel( - f"目标: {chat}\n消息: {message}\n\n结果: {data.get('message', '发送成功')}", - title="📤 消息已发送", - border_style="green", - padding=(0, 1), - ) - ) - - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": f"消息发送成功: {data.get('message', '发送成功')}", - } - ) - else: - error_msg = f"发送失败: {data.get('message', '未知错误')}" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - except Exception as e: - error_msg = f"发送消息失败: {e}" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - - -async def handle_list_qq_chats(tc, chat_history: list): - """处理 list_qq_chats 工具:获取所有可用的 QQ 聊天列表。""" - console.print("[accent]🔧 调用工具: list_qq_chats()[/accent]") - - if not AIOHTTP_AVAILABLE: - error_msg = "aiohttp 模块未安装,请运行: pip install aiohttp" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - return - - from config import QQ_API_BASE_URL, QQ_API_KEY - - if not QQ_API_BASE_URL: - error_msg = "QQ_API_BASE_URL 未配置,请在 .env 中设置" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - return - - try: - # 构建 API 端点 - url = f"{QQ_API_BASE_URL.rstrip('/')}/api/external/chat/list" - - # 构建请求头(如果配置了 API Key) - headers = {} - if QQ_API_KEY: - headers["Authorization"] = f"Bearer {QQ_API_KEY}" - - # 发送 HTTP 请求 - async with aiohttp.ClientSession() as session: - async with session.get(url, headers=headers) as response: - data = await response.json() - - if response.status == 200 and data.get("success"): - chats = data.get("chats", []) - - # 格式化聊天列表 - if chats: - chat_list_text = "\n".join( - [ - f" • [{c.get('platform', 'qq')}] {c.get('name', '未知')} (chat: {c.get('chat', 'N/A')})" - for c in chats - ] - ) - result_text = f"可用的聊天 (共 {len(chats)} 个):\n{chat_list_text}" - else: - result_text = "没有可用的聊天" - - console.print( - Panel( - result_text, - title="💬 QQ 聊天列表", - border_style="cyan", - padding=(0, 1), - ) - ) - - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": result_text, - } - ) - else: - error_msg = f"获取失败: {data.get('message', '未知错误')}" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - except Exception as e: - error_msg = f"获取聊天列表失败: {e}" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - - # ──────────────────── 初始化 mai_files 目录 ──────────────────── # 确保程序启动时 mai_files 目录存在 diff --git a/代码备忘.md b/代码备忘.md index 0d806a18..6e162292 100644 --- a/代码备忘.md +++ b/代码备忘.md @@ -1,4 +1,13 @@ # 代码备忘 + +.env中的webui配置仍旧在被读取 + + + + + +# 代码备忘 + - [ ] 检查EmojiManager的replace_an_emoji_by_llm传入的emoji是否真的是没有注册到db的 - [ ] According to a comment, MaiMBot's check_types() accesses format_info.accept_format without None check - [ ] 如果需要更多的消息格式支持,更新列表如下: From bfc9781c4f10212ecfffc3129115ecc6fbd3fa89 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Mon, 23 Mar 2026 17:56:18 +0800 Subject: [PATCH 6/9] =?UTF-8?q?feat=EF=BC=9A=E5=B0=9D=E8=AF=95=E5=BB=BA?= =?UTF-8?q?=E7=AB=8Bhfc=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/heart_flow/heartFC_chat - 副本.py | 734 ++++++++++++++++++ src/chat/heart_flow/heartFC_chat.py | 829 +++++++++++++++++---- src/chat/heart_flow/heartflow.py | 42 ++ 3 files changed, 1442 insertions(+), 163 deletions(-) create mode 100644 src/chat/heart_flow/heartFC_chat - 副本.py create mode 100644 src/chat/heart_flow/heartflow.py diff --git a/src/chat/heart_flow/heartFC_chat - 副本.py b/src/chat/heart_flow/heartFC_chat - 副本.py new file mode 100644 index 00000000..c805597d --- /dev/null +++ b/src/chat/heart_flow/heartFC_chat - 副本.py @@ -0,0 +1,734 @@ +import asyncio +import time +import traceback +import random +from typing import List, Optional, Dict, Any, Tuple, TYPE_CHECKING +from rich.traceback import install + +from src.config.config import global_config +from src.common.logger import get_logger +from src.common.data_models.info_data_model import ActionPlannerInfo +from src.common.data_models.message_data_model import ReplyContentType +from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager +from src.chat.utils.prompt_builder import global_prompt_manager +from src.chat.utils.timer_calculator import Timer +from src.chat.planner_actions.planner import ActionPlanner +from src.chat.planner_actions.action_modifier import ActionModifier +from src.chat.planner_actions.action_manager import ActionManager +from src.chat.heart_flow.hfc_utils import CycleDetail +from src.bw_learner.expression_learner import expression_learner_manager +from src.chat.heart_flow.frequency_control import frequency_control_manager +from src.bw_learner.message_recorder import extract_and_distribute_messages +from src.person_info.person_info import Person +from src.plugin_system.base.component_types import EventType, ActionInfo +from src.plugin_system.core import events_manager +from src.plugin_system.apis import generator_api, send_api, message_api, database_api +from src.chat.utils.chat_message_builder import ( + build_readable_messages_with_id, + get_raw_msg_before_timestamp_with_chat, +) +from src.chat.utils.utils import record_replyer_action_temp +from src.memory_system.chat_history_summarizer import ChatHistorySummarizer + +if TYPE_CHECKING: + from src.common.data_models.database_data_model import DatabaseMessages + from src.common.data_models.message_data_model import ReplySetModel + + +ERROR_LOOP_INFO = { + "loop_plan_info": { + "action_result": { + "action_type": "error", + "action_data": {}, + "reasoning": "循环处理失败", + }, + }, + "loop_action_info": { + "action_taken": False, + "reply_text": "", + "command": "", + "taken_time": time.time(), + }, +} + + +install(extra_lines=3) + +# 注释:原来的动作修改超时常量已移除,因为改为顺序执行 + +logger = get_logger("hfc") # Logger Name Changed + + +class HeartFChatting: + """ + 管理一个连续的Focus Chat循环 + 用于在特定聊天流中生成回复。 + 其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。 + """ + + def __init__(self, chat_id: str): + """ + HeartFChatting 初始化函数 + + 参数: + chat_id: 聊天流唯一标识符(如stream_id) + on_stop_focus_chat: 当收到stop_focus_chat命令时调用的回调函数 + performance_version: 性能记录版本号,用于区分不同启动版本 + """ + # 基础属性 + self.stream_id: str = chat_id # 聊天流ID + self.chat_stream: ChatStream = get_chat_manager().get_stream(self.stream_id) # type: ignore + if not self.chat_stream: + raise ValueError(f"无法找到聊天流: {self.stream_id}") + self.log_prefix = f"[{get_chat_manager().get_stream_name(self.stream_id) or self.stream_id}]" + + self.expression_learner = expression_learner_manager.get_expression_learner(self.stream_id) + + self.action_manager = ActionManager() + self.action_planner = ActionPlanner(chat_id=self.stream_id, action_manager=self.action_manager) + self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.stream_id) + + # 循环控制内部状态 + self.running: bool = False + self._loop_task: Optional[asyncio.Task] = None # 主循环任务 + + # 添加循环信息管理相关的属性 + self.history_loop: List[CycleDetail] = [] + self._cycle_counter = 0 + self._current_cycle_detail: CycleDetail = None # type: ignore + + self.last_read_time = time.time() - 2 + + self.is_mute = False + + self.last_active_time = time.time() # 记录上一次非noreply时间 + + self.question_probability_multiplier = 1 + self.questioned = False + + # 跟踪连续 no_reply 次数,用于动态调整阈值 + self.consecutive_no_reply_count = 0 + + # 聊天内容概括器 + self.chat_history_summarizer = ChatHistorySummarizer(chat_id=self.stream_id) + + async def start(self): + """检查是否需要启动主循环,如果未激活则启动。""" + + # 如果循环已经激活,直接返回 + if self.running: + logger.debug(f"{self.log_prefix} HeartFChatting 已激活,无需重复启动") + return + + try: + # 标记为活动状态,防止重复启动 + self.running = True + + self._loop_task = asyncio.create_task(self._main_chat_loop()) + self._loop_task.add_done_callback(self._handle_loop_completion) + + # 启动聊天内容概括器的后台定期检查循环 + await self.chat_history_summarizer.start() + + logger.info(f"{self.log_prefix} HeartFChatting 启动完成") + + except Exception as e: + # 启动失败时重置状态 + self.running = False + self._loop_task = None + logger.error(f"{self.log_prefix} HeartFChatting 启动失败: {e}") + raise + + def _handle_loop_completion(self, task: asyncio.Task): + """当 _hfc_loop 任务完成时执行的回调。""" + try: + if exception := task.exception(): + logger.error(f"{self.log_prefix} HeartFChatting: 脱离了聊天(异常): {exception}") + logger.error(traceback.format_exc()) # Log full traceback for exceptions + else: + logger.info(f"{self.log_prefix} HeartFChatting: 脱离了聊天 (外部停止)") + except asyncio.CancelledError: + logger.info(f"{self.log_prefix} HeartFChatting: 结束了聊天") + + def start_cycle(self) -> Tuple[Dict[str, float], str]: + self._cycle_counter += 1 + self._current_cycle_detail = CycleDetail(self._cycle_counter) + self._current_cycle_detail.thinking_id = f"tid{str(round(time.time(), 2))}" + cycle_timers = {} + return cycle_timers, self._current_cycle_detail.thinking_id + + def end_cycle(self, loop_info, cycle_timers): + self._current_cycle_detail.set_loop_info(loop_info) + self.history_loop.append(self._current_cycle_detail) + self._current_cycle_detail.timers = cycle_timers + self._current_cycle_detail.end_time = time.time() + + def print_cycle_info(self, cycle_timers): + # 记录循环信息和计时器结果 + timer_strings = [] + for name, elapsed in cycle_timers.items(): + if elapsed < 0.1: + # 不显示小于0.1秒的计时器 + continue + formatted_time = f"{elapsed:.2f}秒" + timer_strings.append(f"{name}: {formatted_time}") + + logger.info( + f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考," + f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒;" # type: ignore + + (f"详情: {'; '.join(timer_strings)}" if timer_strings else "") + ) + + async def _loopbody(self): + recent_messages_list = message_api.get_messages_by_time_in_chat( + chat_id=self.stream_id, + start_time=self.last_read_time, + end_time=time.time(), + limit=20, + limit_mode="latest", + filter_mai=True, + filter_command=False, + filter_intercept_message_level=0, + ) + + # 根据连续 no_reply 次数动态调整阈值 + # 3次 no_reply 时,阈值调高到 1.5(50%概率为1,50%概率为2) + # 5次 no_reply 时,提高到 2(大于等于两条消息的阈值) + if self.consecutive_no_reply_count >= 5: + threshold = 2 + elif self.consecutive_no_reply_count >= 3: + # 1.5 的含义:50%概率为1,50%概率为2 + threshold = 2 if random.random() < 0.5 else 1 + else: + threshold = 1 + + if len(recent_messages_list) >= threshold: + # for message in recent_messages_list: + # print(message.processed_plain_text) + + self.last_read_time = time.time() + + # !此处使at或者提及必定回复 + mentioned_message = None + for message in recent_messages_list: + if (message.is_mentioned or message.is_at) and global_config.chat.mentioned_bot_reply: + mentioned_message = message + + # logger.info(f"{self.log_prefix} 当前talk_value: {global_config.chat.get_talk_value(self.stream_id)}") + + # *控制频率用 + if mentioned_message: + await self._observe(recent_messages_list=recent_messages_list, force_reply_message=mentioned_message) + elif ( + random.random() + < global_config.chat.get_talk_value(self.stream_id) + * frequency_control_manager.get_or_create_frequency_control(self.stream_id).get_talk_frequency_adjust() + ): + await self._observe(recent_messages_list=recent_messages_list) + else: + # 没有提到,继续保持沉默,等待5秒防止频繁触发 + await asyncio.sleep(10) + return True + else: + await asyncio.sleep(0.2) + return True + return True + + async def _send_and_store_reply( + self, + response_set: "ReplySetModel", + action_message: "DatabaseMessages", + cycle_timers: Dict[str, float], + thinking_id, + actions, + selected_expressions: Optional[List[int]] = None, + quote_message: Optional[bool] = None, + ) -> Tuple[Dict[str, Any], str, Dict[str, float]]: + with Timer("回复发送", cycle_timers): + reply_text = await self._send_response( + reply_set=response_set, + message_data=action_message, + selected_expressions=selected_expressions, + quote_message=quote_message, + ) + + # 获取 platform,如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值 + platform = action_message.chat_info.platform + if platform is None: + platform = getattr(self.chat_stream, "platform", "unknown") + + person = Person(platform=platform, user_id=action_message.user_info.user_id) + person_name = person.person_name + action_prompt_display = f"你对{person_name}进行了回复:{reply_text}" + + await database_api.store_action_info( + chat_stream=self.chat_stream, + action_build_into_prompt=False, + action_prompt_display=action_prompt_display, + action_done=True, + thinking_id=thinking_id, + action_data={"reply_text": reply_text}, + action_name="reply", + ) + + # 构建循环信息 + loop_info: Dict[str, Any] = { + "loop_plan_info": { + "action_result": actions, + }, + "loop_action_info": { + "action_taken": True, + "reply_text": reply_text, + "command": "", + "taken_time": time.time(), + }, + } + + return loop_info, reply_text, cycle_timers + + async def _observe( + self, # interest_value: float = 0.0, + recent_messages_list: Optional[List["DatabaseMessages"]] = None, + force_reply_message: Optional["DatabaseMessages"] = None, + ) -> bool: # sourcery skip: merge-else-if-into-elif, remove-redundant-if + if recent_messages_list is None: + recent_messages_list = [] + _reply_text = "" # 初始化reply_text变量,避免UnboundLocalError + + start_time = time.time() + async with global_prompt_manager.async_message_scope(self.chat_stream.context.get_template_name()): + # 通过 MessageRecorder 统一提取消息并分发给 expression_learner 和 jargon_miner + # 在 replyer 执行时触发,统一管理时间窗口,避免重复获取消息 + asyncio.create_task(extract_and_distribute_messages(self.stream_id)) + + # 添加curious检测任务 - 检测聊天记录中的矛盾、冲突或需要提问的内容 + # asyncio.create_task(check_and_make_question(self.stream_id)) + # 添加聊天内容概括任务 - 累积、打包和压缩聊天记录 + # 注意:后台循环已在start()中启动,这里作为额外触发点,在有思考时立即处理 + # asyncio.create_task(self.chat_history_summarizer.process()) + + cycle_timers, thinking_id = self.start_cycle() + logger.info( + f"{self.log_prefix} 开始第{self._cycle_counter}次思考(频率: {global_config.chat.get_talk_value(self.stream_id)})" + ) + + # 第一步:动作检查 + available_actions: Dict[str, ActionInfo] = {} + try: + await self.action_modifier.modify_actions() + available_actions = self.action_manager.get_using_actions() + except Exception as e: + logger.error(f"{self.log_prefix} 动作修改失败: {e}") + + # 执行planner + is_group_chat, chat_target_info, _ = self.action_planner.get_necessary_info() + + message_list_before_now = get_raw_msg_before_timestamp_with_chat( + chat_id=self.stream_id, + timestamp=time.time(), + limit=int(global_config.chat.max_context_size * 0.6), + filter_intercept_message_level=1, + ) + chat_content_block, message_id_list = build_readable_messages_with_id( + messages=message_list_before_now, + timestamp_mode="normal_no_YMD", + read_mark=self.action_planner.last_obs_time_mark, + truncate=True, + show_actions=True, + ) + + prompt_info = await self.action_planner.build_planner_prompt( + is_group_chat=is_group_chat, + chat_target_info=chat_target_info, + current_available_actions=available_actions, + chat_content_block=chat_content_block, + message_id_list=message_id_list, + ) + continue_flag, modified_message = await events_manager.handle_mai_events( + EventType.ON_PLAN, None, prompt_info[0], None, self.chat_stream.stream_id + ) + if not continue_flag: + return False + if modified_message and modified_message._modify_flags.modify_llm_prompt: + prompt_info = (modified_message.llm_prompt, prompt_info[1]) + + with Timer("规划器", cycle_timers): + action_to_use_info = await self.action_planner.plan( + loop_start_time=self.last_read_time, + available_actions=available_actions, + force_reply_message=force_reply_message, + ) + + logger.info( + f"{self.log_prefix} 决定执行{len(action_to_use_info)}个动作: {' '.join([a.action_type for a in action_to_use_info])}" + ) + + # 3. 并行执行所有动作 + action_tasks = [ + asyncio.create_task( + self._execute_action(action, action_to_use_info, thinking_id, available_actions, cycle_timers) + ) + for action in action_to_use_info + ] + + # 并行执行所有任务 + results = await asyncio.gather(*action_tasks, return_exceptions=True) + + # 处理执行结果 + reply_loop_info = None + reply_text_from_reply = "" + action_success = False + action_reply_text = "" + + excute_result_str = "" + for result in results: + excute_result_str += f"{result['action_type']} 执行结果:{result['result']}\n" + + if isinstance(result, BaseException): + logger.error(f"{self.log_prefix} 动作执行异常: {result}") + continue + + if result["action_type"] != "reply": + action_success = result["success"] + action_reply_text = result["result"] + elif result["action_type"] == "reply": + if result["success"]: + reply_loop_info = result["loop_info"] + reply_text_from_reply = result["result"] + else: + logger.warning(f"{self.log_prefix} 回复动作执行失败") + + self.action_planner.add_plan_excute_log(result=excute_result_str) + + # 构建最终的循环信息 + if reply_loop_info: + # 如果有回复信息,使用回复的loop_info作为基础 + loop_info = reply_loop_info + # 更新动作执行信息 + loop_info["loop_action_info"].update( + { + "action_taken": action_success, + "taken_time": time.time(), + } + ) + _reply_text = reply_text_from_reply + else: + # 没有回复信息,构建纯动作的loop_info + loop_info = { + "loop_plan_info": { + "action_result": action_to_use_info, + }, + "loop_action_info": { + "action_taken": action_success, + "reply_text": action_reply_text, + "taken_time": time.time(), + }, + } + _reply_text = action_reply_text + + self.end_cycle(loop_info, cycle_timers) + self.print_cycle_info(cycle_timers) + + end_time = time.time() + if end_time - start_time < global_config.chat.planner_smooth: + wait_time = global_config.chat.planner_smooth - (end_time - start_time) + await asyncio.sleep(wait_time) + else: + await asyncio.sleep(0.1) + return True + + async def _main_chat_loop(self): + """主循环,持续进行计划并可能回复消息,直到被外部取消。""" + try: + while self.running: + # 主循环 + success = await self._loopbody() + await asyncio.sleep(0.1) + if not success: + break + except asyncio.CancelledError: + # 设置了关闭标志位后被取消是正常流程 + logger.info(f"{self.log_prefix} 麦麦已关闭聊天") + except Exception: + logger.error(f"{self.log_prefix} 麦麦聊天意外错误,将于3s后尝试重新启动") + print(traceback.format_exc()) + await asyncio.sleep(3) + self._loop_task = asyncio.create_task(self._main_chat_loop()) + logger.error(f"{self.log_prefix} 结束了当前聊天循环") + + async def _handle_action( + self, + action: str, + action_reasoning: str, + action_data: dict, + cycle_timers: Dict[str, float], + thinking_id: str, + action_message: Optional["DatabaseMessages"] = None, + ) -> tuple[bool, str, str]: + """ + 处理规划动作,使用动作工厂创建相应的动作处理器 + + 参数: + action: 动作类型 + action_reasoning: 决策理由 + action_data: 动作数据,包含不同动作需要的参数 + cycle_timers: 计时器字典 + thinking_id: 思考ID + action_message: 消息数据 + 返回: + tuple[bool, str, str]: (是否执行了动作, 思考消息ID, 命令) + """ + try: + # 使用工厂创建动作处理器实例 + try: + action_handler = self.action_manager.create_action( + action_name=action, + action_data=action_data, + cycle_timers=cycle_timers, + thinking_id=thinking_id, + chat_stream=self.chat_stream, + log_prefix=self.log_prefix, + action_reasoning=action_reasoning, + action_message=action_message, + ) + except Exception as e: + logger.error(f"{self.log_prefix} 创建动作处理器时出错: {e}") + traceback.print_exc() + return False, "" + + # 处理动作并获取结果(固定记录一次动作信息) + result = await action_handler.execute() + success, action_text = result + + return success, action_text + + except Exception as e: + logger.error(f"{self.log_prefix} 处理{action}时出错: {e}") + traceback.print_exc() + return False, "" + + async def _send_response( + self, + reply_set: "ReplySetModel", + message_data: "DatabaseMessages", + selected_expressions: Optional[List[int]] = None, + quote_message: Optional[bool] = None, + ) -> str: + # 根据 llm_quote 配置决定是否使用 quote_message 参数 + if global_config.chat.llm_quote: + # 如果配置为 true,使用 llm_quote 参数决定是否引用回复 + if quote_message is None: + logger.warning(f"{self.log_prefix} quote_message 参数为空,不引用") + need_reply = False + else: + need_reply = quote_message + if need_reply: + logger.info(f"{self.log_prefix} LLM 决定使用引用回复") + else: + # 如果配置为 false,使用原来的模式 + new_message_count = message_api.count_new_messages( + chat_id=self.chat_stream.stream_id, start_time=self.last_read_time, end_time=time.time() + ) + need_reply = new_message_count >= random.randint(2, 3) or time.time() - self.last_read_time > 90 + if need_reply: + logger.info(f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,使用引用回复,或者上次回复时间超过90秒") + + reply_text = "" + first_replied = False + for reply_content in reply_set.reply_data: + if reply_content.content_type != ReplyContentType.TEXT: + continue + data: str = reply_content.content # type: ignore + if not first_replied: + await send_api.text_to_stream( + text=data, + stream_id=self.chat_stream.stream_id, + reply_message=message_data, + set_reply=need_reply, + typing=False, + selected_expressions=selected_expressions, + ) + first_replied = True + else: + await send_api.text_to_stream( + text=data, + stream_id=self.chat_stream.stream_id, + reply_message=message_data, + set_reply=False, + typing=True, + selected_expressions=selected_expressions, + ) + reply_text += data + + return reply_text + + async def _execute_action( + self, + action_planner_info: ActionPlannerInfo, + chosen_action_plan_infos: List[ActionPlannerInfo], + thinking_id: str, + available_actions: Dict[str, ActionInfo], + cycle_timers: Dict[str, float], + ): + """执行单个动作的通用函数""" + try: + with Timer(f"动作{action_planner_info.action_type}", cycle_timers): + # 直接当场执行no_reply逻辑 + if action_planner_info.action_type == "no_reply": + # 直接处理no_reply逻辑,不再通过动作系统 + reason = action_planner_info.reasoning or "选择不回复" + # logger.info(f"{self.log_prefix} 选择不回复,原因: {reason}") + + # 增加连续 no_reply 计数 + self.consecutive_no_reply_count += 1 + + await database_api.store_action_info( + chat_stream=self.chat_stream, + action_build_into_prompt=False, + action_prompt_display=reason, + action_done=True, + thinking_id=thinking_id, + action_data={}, + action_name="no_reply", + action_reasoning=reason, + ) + + return {"action_type": "no_reply", "success": True, "result": "选择不回复", "command": ""} + + elif action_planner_info.action_type == "reply": + # 直接当场执行reply逻辑 + self.questioned = False + # 刷新主动发言状态 + # 重置连续 no_reply 计数 + self.consecutive_no_reply_count = 0 + + reason = action_planner_info.reasoning or "" + # 根据 think_mode 配置决定 think_level 的值 + think_mode = global_config.chat.think_mode + if think_mode == "default": + think_level = 0 + elif think_mode == "deep": + think_level = 1 + elif think_mode == "dynamic": + # dynamic 模式:从 planner 返回的 action_data 中获取 + think_level = action_planner_info.action_data.get("think_level", 1) + else: + # 默认使用 default 模式 + think_level = 0 + # 使用 action_reasoning(planner 的整体思考理由)作为 reply_reason + planner_reasoning = action_planner_info.action_reasoning or reason + + record_replyer_action_temp( + chat_id=self.stream_id, + reason=reason, + think_level=think_level, + ) + + await database_api.store_action_info( + chat_stream=self.chat_stream, + action_build_into_prompt=False, + action_prompt_display=reason, + action_done=True, + thinking_id=thinking_id, + action_data={}, + action_name="reply", + action_reasoning=reason, + ) + + # 从 Planner 的 action_data 中提取未知词语列表(仅在 reply 时使用) + unknown_words = None + quote_message = None + if isinstance(action_planner_info.action_data, dict): + uw = action_planner_info.action_data.get("unknown_words") + if isinstance(uw, list): + cleaned_uw: List[str] = [] + for item in uw: + if isinstance(item, str): + s = item.strip() + if s: + cleaned_uw.append(s) + if cleaned_uw: + unknown_words = cleaned_uw + + # 从 Planner 的 action_data 中提取 quote_message 参数 + qm = action_planner_info.action_data.get("quote") + if qm is not None: + # 支持多种格式:true/false, "true"/"false", 1/0 + if isinstance(qm, bool): + quote_message = qm + elif isinstance(qm, str): + quote_message = qm.lower() in ("true", "1", "yes") + elif isinstance(qm, (int, float)): + quote_message = bool(qm) + + logger.info(f"{self.log_prefix} {qm}引用回复设置: {quote_message}") + + success, llm_response = await generator_api.generate_reply( + chat_stream=self.chat_stream, + reply_message=action_planner_info.action_message, + available_actions=available_actions, + chosen_actions=chosen_action_plan_infos, + reply_reason=planner_reasoning, + unknown_words=unknown_words, + enable_tool=global_config.tool.enable_tool, + request_type="replyer", + from_plugin=False, + reply_time_point=action_planner_info.action_data.get("loop_start_time", time.time()), + think_level=think_level, + ) + + if not success or not llm_response or not llm_response.reply_set: + if action_planner_info.action_message: + logger.info(f"对 {action_planner_info.action_message.processed_plain_text} 的回复生成失败") + else: + logger.info("回复生成失败") + return {"action_type": "reply", "success": False, "result": "回复生成失败", "loop_info": None} + + response_set = llm_response.reply_set + selected_expressions = llm_response.selected_expressions + loop_info, reply_text, _ = await self._send_and_store_reply( + response_set=response_set, + action_message=action_planner_info.action_message, # type: ignore + cycle_timers=cycle_timers, + thinking_id=thinking_id, + actions=chosen_action_plan_infos, + selected_expressions=selected_expressions, + quote_message=quote_message, + ) + self.last_active_time = time.time() + return { + "action_type": "reply", + "success": True, + "result": f"你使用reply动作,对' {action_planner_info.action_message.processed_plain_text} '这句话进行了回复,回复内容为: '{reply_text}'", + "loop_info": loop_info, + } + + else: + # 执行普通动作 + with Timer("动作执行", cycle_timers): + success, result = await self._handle_action( + action=action_planner_info.action_type, + action_reasoning=action_planner_info.action_reasoning or "", + action_data=action_planner_info.action_data or {}, + cycle_timers=cycle_timers, + thinking_id=thinking_id, + action_message=action_planner_info.action_message, + ) + + self.last_active_time = time.time() + return { + "action_type": action_planner_info.action_type, + "success": success, + "result": result, + } + + except Exception as e: + logger.error(f"{self.log_prefix} 执行动作时出错: {e}") + logger.error(f"{self.log_prefix} 错误信息: {traceback.format_exc()}") + return { + "action_type": action_planner_info.action_type, + "success": False, + "result": "", + "loop_info": None, + "error": str(e), + } diff --git a/src/chat/heart_flow/heartFC_chat.py b/src/chat/heart_flow/heartFC_chat.py index 3778a62f..af0beb4e 100644 --- a/src/chat/heart_flow/heartFC_chat.py +++ b/src/chat/heart_flow/heartFC_chat.py @@ -1,231 +1,377 @@ -from rich.traceback import install -from typing import Optional, List, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import asyncio +import random import time import traceback -import random -from src.common.logger import get_logger -from src.common.utils.utils_config import ExpressionConfigUtils, ChatConfigUtils -from src.config.config import global_config -from src.config.file_watcher import FileChange -from src.chat.message_receive.chat_manager import chat_manager +from rich.traceback import install + from src.bw_learner.expression_learner import ExpressionLearner from src.bw_learner.jargon_miner import JargonMiner +from src.chat.event_helpers import build_event_message +from src.chat.logger.plan_reply_logger import PlanReplyLogger +from src.chat.message_receive.chat_manager import BotChatSession +from src.chat.message_receive.chat_manager import chat_manager as _chat_manager +from src.chat.planner_actions.action_manager import ActionManager +from src.chat.planner_actions.action_modifier import ActionModifier +from src.chat.planner_actions.planner import ActionPlanner +from src.chat.utils.prompt_builder import global_prompt_manager +from src.chat.utils.timer_calculator import Timer +from src.chat.utils.utils import record_replyer_action_temp +from src.common.data_models.info_data_model import ActionPlannerInfo +from src.common.data_models.message_component_data_model import MessageSequence, TextComponent +from src.common.logger import get_logger +from src.common.utils.utils_config import ChatConfigUtils, ExpressionConfigUtils +from src.config.config import global_config +from src.config.file_watcher import FileChange +from src.core.event_bus import event_bus +from src.core.types import ActionInfo, EventType +from src.person_info.person_info import Person +from src.services import ( + database_service as database_api, + generator_service as generator_api, + message_service as message_api, + send_service as send_api, +) +from src.services.message_service import build_readable_messages_with_id, get_messages_before_time_in_chat from .heartFC_utils import CycleDetail if TYPE_CHECKING: from src.chat.message_receive.message import SessionMessage + install(extra_lines=5) logger = get_logger("heartFC_chat") class HeartFChatting: - """ - 管理一个连续的Focus Chat聊天会话 - 用于在特定的聊天会话里面生成回复 - """ + """管理一个持续运行的 Focus Chat 会话。""" def __init__(self, session_id: str): - """ - 初始化 HeartFChatting 实例 - - Args: - session_id: 聊天会话ID - """ - # 基础属性 self.session_id = session_id - session_name = chat_manager.get_session_name(session_id) or session_id + self.chat_stream: BotChatSession = _chat_manager.get_session_by_session_id(self.session_id) # type: ignore[assignment] + if not self.chat_stream: + raise ValueError(f"无法找到聊天会话 {self.session_id}") + + session_name = _chat_manager.get_session_name(session_id) or session_id self.log_prefix = f"[{session_name}]" self.session_name = session_name - # 系统运行状态 + self.action_manager = ActionManager() + self.action_planner = ActionPlanner(chat_id=self.session_id, action_manager=self.action_manager) + self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.session_id) + self._running: bool = False self._loop_task: Optional[asyncio.Task] = None - self._cycle_counter: int = 0 - self._hfc_lock: asyncio.Lock = asyncio.Lock() # 用于保护 _hfc_func 的并发访问 - # 聊天频率相关 - self._consecutive_no_reply_count = 0 # 跟踪连续 no_reply 次数,用于动态调整阈值 - self._talk_frequency_adjust: float = 1.0 # 发言频率修正值,默认为1.0,可以根据需要调整 - - # HFC内消息缓存 - self.message_cache: List[SessionMessage] = [] - - # Asyncio Event 用于控制循环的开始和结束 self._cycle_event = asyncio.Event() + self._hfc_lock = asyncio.Lock() + + self._cycle_counter = 0 + self._current_cycle_detail: Optional[CycleDetail] = None + self.history_loop: List[CycleDetail] = [] + + self.last_read_time = time.time() - 2 + self.last_active_time = time.time() + self._talk_frequency_adjust = 1.0 + self._consecutive_no_reply_count = 0 + + self.message_cache: List["SessionMessage"] = [] + + self._min_messages_for_extraction = 30 + self._min_extraction_interval = 60 + self._last_extraction_time = 0.0 - # 表达方式相关内容 - self._min_messages_for_extraction = 30 # 最少提取消息数 - self._min_extraction_interval = 60 # 最小提取时间间隔,单位为秒 - self._last_extraction_time: float = 0.0 # 上次提取的时间戳 expr_use, jargon_learn, expr_learn = ExpressionConfigUtils.get_expression_config_for_chat(session_id) - self._enable_expression_use = expr_use # 允许使用表达方式,但不一定启用学习 - self._enable_expression_learning = expr_learn # 允许学习表达方式 - self._enable_jargon_learning = jargon_learn # 允许学习黑话 - # 表达学习器 - self._expression_learner: ExpressionLearner = ExpressionLearner(session_id) - # 黑话挖掘器 - self._jargon_miner: JargonMiner = JargonMiner(session_id, session_name=session_name) - - # TODO: ChatSummarizer 聊天总结器重构 - - # ====== 公开方法 ====== + self._enable_expression_use = expr_use + self._enable_expression_learning = expr_learn + self._enable_jargon_learning = jargon_learn + self._expression_learner = ExpressionLearner(session_id) + self._jargon_miner = JargonMiner(session_id, session_name=session_name) async def start(self): - """启动 HeartFChatting 的主循环""" - # 先检查是否已经启动运行 if self._running: - logger.debug(f"{self.log_prefix} 已经在运行中,无需重复启动") + logger.debug(f"{self.log_prefix} HeartFChatting 已在运行中") return try: self._running = True - self._cycle_event.clear() # 确保事件初始状态为未设置 - + self._cycle_event.clear() self._loop_task = asyncio.create_task(self.main_loop()) self._loop_task.add_done_callback(self._handle_loop_completion) - logger.info(f"{self.log_prefix} HeartFChatting 启动完成") - except Exception as e: - logger.error(f"{self.log_prefix} 启动 HeartFChatting 失败: {e}", exc_info=True) - self._running = False # 确保状态正确 - self._cycle_event.set() # 确保事件被设置,避免死锁 - self._loop_task = None # 确保任务引用被清理 + except Exception as exc: + logger.error(f"{self.log_prefix} HeartFChatting 启动失败: {exc}", exc_info=True) + self._running = False + self._cycle_event.set() + self._loop_task = None raise async def stop(self): - """停止 HeartFChatting 的主循环""" if not self._running: - logger.debug(f"{self.log_prefix} HeartFChatting 已经停止,无需重复停止") + logger.debug(f"{self.log_prefix} HeartFChatting 已停止") return self._running = False - self._cycle_event.set() # 触发事件,通知循环结束 + self._cycle_event.set() if self._loop_task: - self._loop_task.cancel() # 取消主循环任务 + self._loop_task.cancel() try: - await self._loop_task # 等待任务完成 + await self._loop_task except asyncio.CancelledError: - logger.info(f"{self.log_prefix} HeartFChatting 主循环已成功取消") - except Exception as e: - logger.error(f"{self.log_prefix} 停止 HeartFChatting 时发生错误: {e}", exc_info=True) + logger.info(f"{self.log_prefix} HeartFChatting 主循环已取消") + except Exception as exc: + logger.error(f"{self.log_prefix} 停止 HeartFChatting 时发生错误: {exc}", exc_info=True) finally: - self._loop_task = None # 确保任务引用被清理 + self._loop_task = None logger.info(f"{self.log_prefix} HeartFChatting 已停止") def adjust_talk_frequency(self, new_value: float): - """调整发言频率的调整值 - - Args: - new_value: 新的修正值,必须为非负数。值越大,修正发言频率越高;值越小,修正发言频率越低。 - """ self._talk_frequency_adjust = max(0.0, new_value) async def register_message(self, message: "SessionMessage"): - """注册一条消息到 HeartFChatting 的缓存中,并检测其是否产生提及,决定是否唤醒聊天 - - Args: - message: 待注册的消息对象 - """ self.message_cache.append(message) - # 先检查at必回复 + if global_config.chat.inevitable_at_reply and message.is_at: - async with self._hfc_lock: # 确保与主循环逻辑的互斥访问 - await self._judge_and_response(message) - return # 直接返回,避免同一条消息被主循环再次处理 - # 再检查提及必回复 + self.last_read_time = time.time() + async with self._hfc_lock: + await self._judge_and_response(mentioned_message=message, recent_messages_list=[message]) + return + if global_config.chat.mentioned_bot_reply and message.is_mentioned: - # 直接获取锁,确保一定一定触发回复逻辑,不受当前是否正在执行主循环的影响 - async with self._hfc_lock: # 确保与主循环逻辑的互斥访问 - await self._judge_and_response(message) + self.last_read_time = time.time() + async with self._hfc_lock: + await self._judge_and_response(mentioned_message=message, recent_messages_list=[message]) return async def main_loop(self): try: while self._running and not self._cycle_event.is_set(): if not self._hfc_lock.locked(): - async with self._hfc_lock: # 确保主循环逻辑的互斥访问 + async with self._hfc_lock: await self._hfc_func() - await asyncio.sleep(5) + await asyncio.sleep(0.1) except asyncio.CancelledError: - logger.info(f"{self.log_prefix} HeartFChatting: 主循环被取消,正在关闭") - except Exception as e: - logger.error(f"{self.log_prefix} 麦麦聊天意外错误: {e},将于3s后尝试重新启动") - await self.stop() # 确保状态正确 + logger.info(f"{self.log_prefix} HeartFChatting: 主循环被取消") + except Exception as exc: + logger.error(f"{self.log_prefix} HeartFChatting: 主循环异常: {exc}", exc_info=True) + await self.stop() await asyncio.sleep(3) - await self.start() # 尝试重新启动 + await self.start() async def _config_callback(self, file_change: Optional[FileChange] = None): - """配置文件变更回调函数""" - # TODO: 根据配置文件变动重新计算相关参数: - """ - 需要计算的参数: - self._enable_expression_use = expr_use # 允许使用表达方式,但不一定启用学习 - self._enable_expression_learning = expr_learn # 允许学习表达方式 - self._enable_jargon_learning = jargon_learn # 允许学习黑话 - """ + del file_change + expr_use, jargon_learn, expr_learn = ExpressionConfigUtils.get_expression_config_for_chat(self.session_id) + self._enable_expression_use = expr_use + self._enable_expression_learning = expr_learn + self._enable_jargon_learning = jargon_learn - # ====== 心流聊天核心逻辑 ====== - async def _hfc_func(self, mentioned_message: Optional["SessionMessage"] = None): - """心流聊天的主循环逻辑""" - if self._consecutive_no_reply_count >= 5: - threshold = 2 - elif self._consecutive_no_reply_count >= 3: - threshold = 2 if random.random() < 0.5 else 1 - else: - threshold = 1 + async def _hfc_func(self): + recent_messages_list = message_api.get_messages_by_time_in_chat( + chat_id=self.session_id, + start_time=self.last_read_time, + end_time=time.time(), + limit=20, + limit_mode="latest", + filter_mai=True, + filter_command=False, + filter_intercept_message_level=1, + ) - if len(self.message_cache) < threshold: + if len(recent_messages_list) < 1: await asyncio.sleep(0.2) return True - talk_value_threshold = ( - random.random() * ChatConfigUtils.get_talk_value(self.session_id) * self._talk_frequency_adjust - ) - if mentioned_message and global_config.chat.mentioned_bot_reply: - await self._judge_and_response(mentioned_message) - elif random.random() < talk_value_threshold: - await self._judge_and_response() + self.last_read_time = time.time() + + mentioned_message: Optional["SessionMessage"] = None + for message in recent_messages_list: + if global_config.chat.inevitable_at_reply and message.is_at: + mentioned_message = message + elif global_config.chat.mentioned_bot_reply and message.is_mentioned: + mentioned_message = message + + talk_value = ChatConfigUtils.get_talk_value(self.session_id) * self._talk_frequency_adjust + if mentioned_message: + await self._judge_and_response(mentioned_message=mentioned_message, recent_messages_list=recent_messages_list) + elif random.random() < talk_value: + await self._judge_and_response(recent_messages_list=recent_messages_list) return True - async def _judge_and_response(self, mentioned_message: Optional["SessionMessage"] = None): - """判定和生成回复""" - asyncio.create_task(self._trigger_expression_learning(self.message_cache)) - # TODO: 完成反思器之后的逻辑 - start_time = time.time() - current_cycle_detail = self._start_cycle() + async def _judge_and_response( + self, + mentioned_message: Optional["SessionMessage"] = None, + recent_messages_list: Optional[List["SessionMessage"]] = None, + ): + recent_messages = list(recent_messages_list or self.message_cache[-20:]) + if recent_messages: + asyncio.create_task(self._trigger_expression_learning(recent_messages)) + + cycle_timers, thinking_id = self._start_cycle() logger.info(f"{self.log_prefix} 开始第{self._cycle_counter}次思考") - # TODO: 动作检查逻辑 - # TODO: Planner逻辑 - # TODO: 动作执行逻辑 + try: + async with global_prompt_manager.async_message_scope(self._get_template_name()): + available_actions: Dict[str, ActionInfo] = {} + try: + await self.action_modifier.modify_actions() + available_actions = self.action_manager.get_using_actions() + except Exception as exc: + logger.error(f"{self.log_prefix} 动作修改失败: {exc}", exc_info=True) - cycle_detail = self._end_cycle(current_cycle_detail) - if wait_time := global_config.chat.planner_smooth - (time.time() - start_time) > 0: - await asyncio.sleep(wait_time) - else: - await asyncio.sleep(0.1) # 最小等待时间,避免过快循环 - return True + is_group_chat, chat_target_info, _ = self.action_planner.get_necessary_info() + message_list_before_now = get_messages_before_time_in_chat( + chat_id=self.session_id, + timestamp=time.time(), + limit=int(global_config.chat.max_context_size * 0.6), + filter_intercept_message_level=1, + ) + chat_content_block, message_id_list = build_readable_messages_with_id( + messages=message_list_before_now, + timestamp_mode="normal_no_YMD", + read_mark=self.action_planner.last_obs_time_mark, + truncate=True, + show_actions=True, + ) + + prompt, filtered_actions = await self._build_planner_prompt_with_event( + available_actions=available_actions, + is_group_chat=is_group_chat, + chat_target_info=chat_target_info, + chat_content_block=chat_content_block, + message_id_list=message_id_list, + ) + if prompt is None: + return False + + with Timer("规划器", cycle_timers): + reasoning, action_to_use_info, llm_raw_output, llm_reasoning, llm_duration_ms = ( + await self.action_planner._execute_main_planner( + prompt=prompt, + message_id_list=message_id_list, + filtered_actions=filtered_actions, + available_actions=available_actions, + loop_start_time=self.last_read_time, + ) + ) + + action_to_use_info = self._ensure_force_reply_action( + actions=action_to_use_info, + force_reply_message=mentioned_message, + available_actions=available_actions, + ) + self.action_planner.add_plan_log(reasoning, action_to_use_info) + self.action_planner.last_obs_time_mark = time.time() + self._log_plan( + prompt=prompt, + reasoning=reasoning, + llm_raw_output=llm_raw_output, + llm_reasoning=llm_reasoning, + llm_duration_ms=llm_duration_ms, + actions=action_to_use_info, + ) + + logger.info( + f"{self.log_prefix} 决定执行{len(action_to_use_info)}个动作: {' '.join([a.action_type for a in action_to_use_info])}" + ) + + action_tasks = [ + asyncio.create_task( + self._execute_action( + action, + action_to_use_info, + thinking_id, + available_actions, + cycle_timers, + ) + ) + for action in action_to_use_info + ] + results = await asyncio.gather(*action_tasks, return_exceptions=True) + + reply_loop_info = None + reply_text_from_reply = "" + action_success = False + action_reply_text = "" + execute_result_str = "" + + for result in results: + if isinstance(result, BaseException): + logger.error(f"{self.log_prefix} 动作执行异常: {result}", exc_info=True) + continue + + execute_result_str += f"{result['action_type']} 执行结果:{result['result']}\n" + if result["action_type"] == "reply": + if result["success"]: + reply_loop_info = result["loop_info"] + reply_text_from_reply = result["result"] + else: + logger.warning(f"{self.log_prefix} reply 动作执行失败") + else: + action_success = result["success"] + action_reply_text = result["result"] + + self.action_planner.add_plan_excute_log(result=execute_result_str) + + if reply_loop_info: + loop_info = reply_loop_info + loop_info["loop_action_info"].update( + { + "action_taken": action_success, + "taken_time": time.time(), + } + ) + else: + loop_info = { + "loop_plan_info": { + "action_result": action_to_use_info, + }, + "loop_action_info": { + "action_taken": action_success, + "reply_text": action_reply_text, + "taken_time": time.time(), + }, + } + reply_text_from_reply = action_reply_text + + current_cycle_detail = self._end_cycle(self._current_cycle_detail, loop_info) + logger.debug(f"{self.log_prefix} 本轮最终输出: {reply_text_from_reply}") + return current_cycle_detail is not None + except Exception as exc: + logger.error(f"{self.log_prefix} 判定与回复流程失败: {exc}", exc_info=True) + if self._current_cycle_detail: + self._end_cycle( + self._current_cycle_detail, + { + "loop_plan_info": {"action_result": []}, + "loop_action_info": { + "action_taken": False, + "reply_text": "", + "taken_time": time.time(), + "error": str(exc), + }, + }, + ) + return False def _handle_loop_completion(self, task: asyncio.Task): - """当 _hfc_func 任务完成时执行的回调。""" try: if exception := task.exception(): - logger.error(f"{self.log_prefix} HeartFChatting: 脱离了聊天(异常): {exception}") - logger.error(traceback.format_exc()) # Log full traceback for exceptions + logger.error(f"{self.log_prefix} HeartFChatting: 主循环异常退出: {exception}") + logger.error(traceback.format_exc()) else: - logger.info(f"{self.log_prefix} HeartFChatting: 脱离了聊天 (外部停止)") + logger.info(f"{self.log_prefix} HeartFChatting: 主循环已退出") except asyncio.CancelledError: - logger.info(f"{self.log_prefix} HeartFChatting: 结束了聊天") + logger.info(f"{self.log_prefix} HeartFChatting: 聊天已结束") - # ====== 学习器触发逻辑 ====== async def _trigger_expression_learning(self, messages: List["SessionMessage"]): + if not messages: + return + self._expression_learner.add_messages(messages) if time.time() - self._last_extraction_time < self._min_extraction_interval: return @@ -233,12 +379,14 @@ class HeartFChatting: return if not self._enable_expression_learning: return + extraction_end_time = time.time() logger.info( f"聊天流 {self.session_name} 提取到 {len(messages)} 条消息," f"时间窗口: {self._last_extraction_time:.2f} - {extraction_end_time:.2f}" ) self._last_extraction_time = extraction_end_time + try: jargon_miner = self._jargon_miner if self._enable_jargon_learning else None learnt_style = await self._expression_learner.learn(jargon_miner) @@ -246,43 +394,398 @@ class HeartFChatting: logger.info(f"{self.log_prefix} 表达学习完成") else: logger.debug(f"{self.log_prefix} 表达学习未获得有效结果") - except Exception as e: - logger.error(f"{self.log_prefix} 表达学习失败: {e}", exc_info=True) + except Exception as exc: + logger.error(f"{self.log_prefix} 表达学习失败: {exc}", exc_info=True) - # ====== 记录循环执行信息相关逻辑 ====== - def _start_cycle(self) -> CycleDetail: + def _start_cycle(self) -> Tuple[Dict[str, float], str]: self._cycle_counter += 1 - current_cycle_detail = CycleDetail(cycle_id=self._cycle_counter) - current_cycle_detail.thinking_id = f"tid{str(round(time.time(), 2))}" - return current_cycle_detail + self._current_cycle_detail = CycleDetail(cycle_id=self._cycle_counter) + self._current_cycle_detail.thinking_id = f"tid{str(round(time.time(), 2))}" + return self._current_cycle_detail.time_records, self._current_cycle_detail.thinking_id - def _end_cycle(self, cycle_detail: CycleDetail, only_long_execution: bool = True): + def _end_cycle(self, cycle_detail: Optional[CycleDetail], loop_info: Optional[Dict[str, Any]] = None): + if cycle_detail is None: + return None + + cycle_detail.loop_plan_info = (loop_info or {}).get("loop_plan_info") + cycle_detail.loop_action_info = (loop_info or {}).get("loop_action_info") cycle_detail.end_time = time.time() - timer_strings: List[str] = [ + self.history_loop.append(cycle_detail) + + timer_strings = [ f"{name}: {duration:.2f}s" for name, duration in cycle_detail.time_records.items() - if not only_long_execution or duration >= 0.1 + if duration >= 0.1 ] logger.info( - f"{self.log_prefix} 第 {cycle_detail.cycle_id} 个心流循环完成" - f"耗时: {cycle_detail.end_time - cycle_detail.start_time:.2f}秒\n" + f"{self.log_prefix} 第{cycle_detail.cycle_id} 个心流循环完成," + f"耗时: {cycle_detail.end_time - cycle_detail.start_time:.2f}s;" f"详细计时: {', '.join(timer_strings) if timer_strings else '无'}" ) - return cycle_detail - # ====== Action相关逻辑 ====== - async def _execute_action(self, *args, **kwargs): - """原ExecuteAction""" - raise NotImplementedError("执行动作的逻辑尚未实现") # TODO: 实现动作执行的逻辑,替换掉*args, **kwargs*占位符 + async def _execute_action( + self, + action_planner_info: ActionPlannerInfo, + chosen_action_plan_infos: List[ActionPlannerInfo], + thinking_id: str, + available_actions: Dict[str, ActionInfo], + cycle_timers: Dict[str, float], + ): + try: + with Timer(f"动作{action_planner_info.action_type}", cycle_timers): + if action_planner_info.action_type == "no_reply": + reason = action_planner_info.reasoning or "选择不回复" + self._consecutive_no_reply_count += 1 + await database_api.store_action_info( + chat_stream=self.chat_stream, + display_prompt=reason, + thinking_id=thinking_id, + action_data={}, + action_name="no_reply", + action_reasoning=reason, + ) + return { + "action_type": "no_reply", + "success": True, + "result": "选择不回复", + "loop_info": None, + } - async def _execute_other_actions(self, *args, **kwargs): - """原HandleAction""" - raise NotImplementedError( - "执行其他动作的逻辑尚未实现" - ) # TODO: 实现其他动作执行的逻辑, 替换掉*args, **kwargs*占位符 + if action_planner_info.action_type == "reply": + self._consecutive_no_reply_count = 0 + reason = action_planner_info.reasoning or "" + think_level = self._get_think_level(action_planner_info) + planner_reasoning = action_planner_info.action_reasoning or reason - # ====== 响应发送相关方法 ====== - async def _send_response(self, *args, **kwargs): - raise NotImplementedError("发送回复的逻辑尚未实现") # TODO: 实现发送回复的逻辑,替换掉*args, **kwargs*占位符 - # 传入的消息至少应该是个MessageSequence实例,最好是SessionMessage实例,随后可直接转化为MessageSending实例 + record_replyer_action_temp( + chat_id=self.session_id, + reason=reason, + think_level=think_level, + ) + await database_api.store_action_info( + chat_stream=self.chat_stream, + display_prompt=reason, + thinking_id=thinking_id, + action_data={}, + action_name="reply", + action_reasoning=reason, + ) + + unknown_words, quote_message = self._extract_reply_metadata(action_planner_info) + success, llm_response = await generator_api.generate_reply( + chat_stream=self.chat_stream, + reply_message=action_planner_info.action_message, + available_actions=available_actions, + chosen_actions=chosen_action_plan_infos, + reply_reason=planner_reasoning, + unknown_words=unknown_words, + enable_tool=global_config.tool.enable_tool, + request_type="replyer", + from_plugin=False, + reply_time_point=action_planner_info.action_data.get("loop_start_time", time.time()) + if action_planner_info.action_data + else time.time(), + think_level=think_level, + ) + if not success or not llm_response or not llm_response.reply_set: + if action_planner_info.action_message: + logger.info( + f"对 {action_planner_info.action_message.processed_plain_text} 的回复生成失败" + ) + else: + logger.info(f"{self.log_prefix} 回复生成失败") + return { + "action_type": "reply", + "success": False, + "result": "回复生成失败", + "loop_info": None, + } + + loop_info, reply_text, _ = await self._send_and_store_reply( + response_set=llm_response.reply_set, + action_message=action_planner_info.action_message, # type: ignore[arg-type] + cycle_timers=cycle_timers, + thinking_id=thinking_id, + actions=chosen_action_plan_infos, + selected_expressions=llm_response.selected_expressions, + quote_message=quote_message, + ) + self.last_active_time = time.time() + return { + "action_type": "reply", + "success": True, + "result": reply_text, + "loop_info": loop_info, + } + + with Timer("动作执行", cycle_timers): + success, result = await self._handle_action( + action=action_planner_info.action_type, + action_reasoning=action_planner_info.action_reasoning or "", + action_data=action_planner_info.action_data or {}, + cycle_timers=cycle_timers, + thinking_id=thinking_id, + action_message=action_planner_info.action_message, + ) + if success: + self.last_active_time = time.time() + return { + "action_type": action_planner_info.action_type, + "success": success, + "result": result, + "loop_info": None, + } + except Exception as exc: + logger.error(f"{self.log_prefix} 执行动作时出错: {exc}", exc_info=True) + return { + "action_type": action_planner_info.action_type, + "success": False, + "result": "", + "loop_info": None, + "error": str(exc), + } + + async def _handle_action( + self, + action: str, + action_reasoning: str, + action_data: dict, + cycle_timers: Dict[str, float], + thinking_id: str, + action_message: Optional["SessionMessage"] = None, + ) -> Tuple[bool, str]: + try: + action_handler = self.action_manager.create_action( + action_name=action, + action_data=action_data, + action_reasoning=action_reasoning, + cycle_timers=cycle_timers, + thinking_id=thinking_id, + chat_stream=self.chat_stream, + log_prefix=self.log_prefix, + action_message=action_message, + ) + if not action_handler: + logger.warning(f"{self.log_prefix} 未能创建动作处理器: {action}") + return False, "" + + success, action_text = await action_handler.execute() + return success, action_text + except Exception as exc: + logger.error(f"{self.log_prefix} 处理动作 {action} 时出错: {exc}", exc_info=True) + return False, "" + + async def _send_and_store_reply( + self, + response_set: MessageSequence, + action_message: "SessionMessage", + cycle_timers: Dict[str, float], + thinking_id: str, + actions: List[ActionPlannerInfo], + selected_expressions: Optional[List[int]] = None, + quote_message: Optional[bool] = None, + ) -> Tuple[Dict[str, Any], str, Dict[str, float]]: + with Timer("回复发送", cycle_timers): + reply_text = await self._send_response( + reply_set=response_set, + message_data=action_message, + selected_expressions=selected_expressions, + quote_message=quote_message, + ) + + platform = action_message.platform or getattr(self.chat_stream, "platform", "unknown") + person = Person(platform=platform, user_id=action_message.message_info.user_info.user_id) + action_prompt_display = f"你对{person.person_name}进行了回复:{reply_text}" + await database_api.store_action_info( + chat_stream=self.chat_stream, + display_prompt=action_prompt_display, + thinking_id=thinking_id, + action_data={"reply_text": reply_text}, + action_name="reply", + ) + + loop_info: Dict[str, Any] = { + "loop_plan_info": { + "action_result": actions, + }, + "loop_action_info": { + "action_taken": True, + "reply_text": reply_text, + "command": "", + "taken_time": time.time(), + }, + } + return loop_info, reply_text, cycle_timers + + async def _send_response( + self, + reply_set: MessageSequence, + message_data: "SessionMessage", + selected_expressions: Optional[List[int]] = None, + quote_message: Optional[bool] = None, + ) -> str: + if global_config.chat.llm_quote: + need_reply = bool(quote_message) + else: + new_message_count = message_api.count_new_messages( + chat_id=self.session_id, + start_time=self.last_read_time, + end_time=time.time(), + ) + need_reply = new_message_count >= random.randint(2, 3) or time.time() - self.last_read_time > 90 + + reply_text = "" + first_replied = False + for component in reply_set.components: + if not isinstance(component, TextComponent): + continue + data = component.text + if not first_replied: + await send_api.text_to_stream( + text=data, + stream_id=self.session_id, + reply_message=message_data, + set_reply=need_reply, + typing=False, + selected_expressions=selected_expressions, + ) + first_replied = True + else: + await send_api.text_to_stream( + text=data, + stream_id=self.session_id, + reply_message=message_data, + set_reply=False, + typing=True, + selected_expressions=selected_expressions, + ) + reply_text += data + return reply_text + + async def _build_planner_prompt_with_event( + self, + available_actions: Dict[str, ActionInfo], + is_group_chat: bool, + chat_target_info: Any, + chat_content_block: str, + message_id_list: List[Tuple[str, "SessionMessage"]], + ) -> Tuple[Optional[str], Dict[str, ActionInfo]]: + filtered_actions = self.action_planner._filter_actions_by_activation_type(available_actions, chat_content_block) + prompt, _ = await self.action_planner.build_planner_prompt( + is_group_chat=is_group_chat, + chat_target_info=chat_target_info, + current_available_actions=filtered_actions, + chat_content_block=chat_content_block, + message_id_list=message_id_list, + ) + event_message = build_event_message(EventType.ON_PLAN, llm_prompt=prompt, stream_id=self.session_id) + continue_flag, modified_message = await event_bus.emit(EventType.ON_PLAN, event_message) + if not continue_flag: + logger.info(f"{self.log_prefix} ON_PLAN 事件中止了本轮 HFC") + return None, filtered_actions + if modified_message and modified_message._modify_flags.modify_llm_prompt and modified_message.llm_prompt: + prompt = modified_message.llm_prompt + return prompt, filtered_actions + + def _ensure_force_reply_action( + self, + actions: List[ActionPlannerInfo], + force_reply_message: Optional["SessionMessage"], + available_actions: Dict[str, ActionInfo], + ) -> List[ActionPlannerInfo]: + if not force_reply_message: + return actions + + has_reply_to_force_message = any( + action.action_type == "reply" + and action.action_message + and action.action_message.message_id == force_reply_message.message_id + for action in actions + ) + if has_reply_to_force_message: + return actions + + actions = [action for action in actions if action.action_type != "no_reply"] + actions.insert( + 0, + ActionPlannerInfo( + action_type="reply", + reasoning="用户提及了我,必须回复该消息", + action_data={"loop_start_time": self.last_read_time}, + action_message=force_reply_message, + available_actions=available_actions, + action_reasoning=None, + ), + ) + logger.info(f"{self.log_prefix} 检测到强制回复消息,已补充 reply 动作") + return actions + + def _log_plan( + self, + prompt: str, + reasoning: str, + llm_raw_output: Optional[str], + llm_reasoning: Optional[str], + llm_duration_ms: Optional[float], + actions: List[ActionPlannerInfo], + ) -> None: + try: + PlanReplyLogger.log_plan( + chat_id=self.session_id, + prompt=prompt, + reasoning=reasoning, + raw_output=llm_raw_output, + raw_reasoning=llm_reasoning, + actions=actions, + timing={ + "llm_duration_ms": round(llm_duration_ms, 2) if llm_duration_ms is not None else None, + "loop_start_time": self.last_read_time, + }, + extra=None, + ) + except Exception: + logger.exception(f"{self.log_prefix} 记录 plan 日志失败") + + def _extract_reply_metadata( + self, + action_planner_info: ActionPlannerInfo, + ) -> Tuple[Optional[List[str]], Optional[bool]]: + unknown_words: Optional[List[str]] = None + quote_message: Optional[bool] = None + action_data = action_planner_info.action_data or {} + + raw_unknown_words = action_data.get("unknown_words") + if isinstance(raw_unknown_words, list): + cleaned_unknown_words = [] + for item in raw_unknown_words: + if isinstance(item, str) and (cleaned_item := item.strip()): + cleaned_unknown_words.append(cleaned_item) + if cleaned_unknown_words: + unknown_words = cleaned_unknown_words + + raw_quote = action_data.get("quote") + if isinstance(raw_quote, bool): + quote_message = raw_quote + elif isinstance(raw_quote, str): + quote_message = raw_quote.lower() in {"true", "1", "yes"} + elif isinstance(raw_quote, (int, float)): + quote_message = bool(raw_quote) + + return unknown_words, quote_message + + def _get_think_level(self, action_planner_info: ActionPlannerInfo) -> int: + think_mode = global_config.chat.think_mode + if think_mode == "default": + return 0 + if think_mode == "deep": + return 1 + if think_mode == "dynamic": + action_data = action_planner_info.action_data or {} + return int(action_data.get("think_level", 1)) + return 0 + + def _get_template_name(self) -> Optional[str]: + if self.chat_stream.context: + return self.chat_stream.context.template_name + return None diff --git a/src/chat/heart_flow/heartflow.py b/src/chat/heart_flow/heartflow.py new file mode 100644 index 00000000..febff2d5 --- /dev/null +++ b/src/chat/heart_flow/heartflow.py @@ -0,0 +1,42 @@ +import traceback +from typing import Any, Optional, Dict + +from src.chat.message_receive.chat_stream import get_chat_manager +from src.common.logger import get_logger +from src.chat.heart_flow.heartFC_chat import HeartFChatting +from src.chat.brain_chat.brain_chat import BrainChatting +from src.chat.message_receive.chat_stream import ChatStream + +logger = get_logger("heartflow") + + +class Heartflow: + """主心流协调器,负责初始化并协调聊天""" + + def __init__(self): + self.heartflow_chat_list: Dict[Any, HeartFChatting | BrainChatting] = {} + + async def get_or_create_heartflow_chat(self, chat_id: Any) -> Optional[HeartFChatting | BrainChatting]: + """获取或创建一个新的HeartFChatting实例""" + try: + if chat_id in self.heartflow_chat_list: + if chat := self.heartflow_chat_list.get(chat_id): + return chat + else: + chat_stream: ChatStream | None = get_chat_manager().get_stream(chat_id) + if not chat_stream: + raise ValueError(f"未找到 chat_id={chat_id} 的聊天流") + if chat_stream.group_info: + new_chat = HeartFChatting(chat_id=chat_id) + else: + new_chat = BrainChatting(chat_id=chat_id) + await new_chat.start() + self.heartflow_chat_list[chat_id] = new_chat + return new_chat + except Exception as e: + logger.error(f"创建心流聊天 {chat_id} 失败: {e}", exc_info=True) + traceback.print_exc() + return None + + +heartflow = Heartflow() From 620f682c384eb5f9e095db170c6fa1165a2b23e2 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Mon, 23 Mar 2026 17:56:37 +0800 Subject: [PATCH 7/9] Update .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 9421c275..8fcdc015 100644 --- a/.gitignore +++ b/.gitignore @@ -35,6 +35,7 @@ message_queue_content.bat message_queue_window.bat message_queue_window.txt queue_update.txt +start_saka.bat .env .env.* .cursor From f431d78bff59254e5a1f83eb440ff4d3e6f3d97e Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 24 Mar 2026 01:30:36 +0800 Subject: [PATCH 8/9] =?UTF-8?q?feat=EF=BC=9A=E4=BC=98=E5=8C=96maisaka?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- prompts/en-US/maidairy_chat.prompt | 48 +- prompts/ja-JP/maidairy_chat.prompt | 48 +- prompts/zh-CN/maidairy_chat.prompt | 52 +- src/maisaka/builtin_tools.py | 116 +-- src/maisaka/cli.py | 1194 ++++++++++++++-------------- src/maisaka/llm_service.py | 108 ++- src/maisaka/mcp_client/manager.py | 2 - src/maisaka/replyer.py | 116 +-- src/maisaka/tool_handlers.py | 414 +++------- 9 files changed, 999 insertions(+), 1099 deletions(-) diff --git a/prompts/en-US/maidairy_chat.prompt b/prompts/en-US/maidairy_chat.prompt index a253bb58..6704ae0a 100644 --- a/prompts/en-US/maidairy_chat.prompt +++ b/prompts/en-US/maidairy_chat.prompt @@ -1,24 +1,30 @@ -你叫麦麦,是一个女大学生,你说话比较口语化 +{identity} -你有以下工具: -• say(reason) — 对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。 - 直接输出的文本会被视为你的内心思考,用户无法阅读。 - reason 参数描述你想要回复的方式、想法和内容,系统会根据你的想法和对话上下文生成具体的回复。 -• wait(seconds) — 暂时结束你的发言,把话语权交给用户,等待对方说话。 - 这就像现实对话中你说完一句话后停下来等对方回应。 - 如果用户在等待期间说了话,你会通过工具返回结果收到内容。 - 如果超时没有回复,你也会收到超时通知。 -• stop() — 结束当前对话循环,进入待机状态,直到用户下次输入新内容时再唤醒你。 -{file_tools_section}• store_context(count, reason) — 将指定范围的对话上下文存入记忆系统,然后从当前对话中移除这些内容。适合在对话上下文过长、话题转换、或遇到重要内容需要保存时使用。 +You should interact naturally with the user and behave like a human. -思考规则: -你必须先进行内心思考,然后选择需要使用的工具,如果你想说话,必须使用say工具。 -在内心思考中分析当前对话状态和你的想法,然后通过 say 工具的 reason 参数描述你想要回复的方式、想法和内容。 -只有使用say工具,你才能向用户说话。用户才能看到你的发言。 -交互规则: -1. 你可以自由选择是否调用工具——如果你还想继续思考,可以不调用任何工具 -2. 想对用户说话时,必须调用 say 工具;直接输出的文本只会被视为内心独白 -3. 当你说完想说的话、想把话语权交给用户时,调用 wait 暂时结束发言,等待对方回应 -4. 当对话自然结束、用户表示不想继续聊、或连续多次等待超时用户没有回复时,调用 stop 结束对话 -5. 你可以在同一轮同时调用多个工具,例如先 say 再 wait +At this stage, your job is not to directly produce the final visible reply to the user. Your job is to produce the "latest thought". +The latest thought should reflect your judgment of the situation, your intent, your next-step plan, and why you think that way. +You may use these tools: +• wait(seconds) - Pause this round, hand the turn back to the user, and wait for user input. +• stop() - End the current internal loop. +{file_tools_section} + +Output rules: +1. By default, directly output your current latest thought instead of pretending it is a user-visible reply. +2. The latest thought should be specific and grounded in the context. +3. Do not simulate "sending a message" inside the thought, and do not pretend a visible reply has already been spoken. +4. If it is better to wait for more user input, call `wait(seconds)`. +5. If the current internal process should end, call `stop()`. +6. Only call tools when you truly need to wait or stop. Otherwise, prefer directly expressing the thought. + +Additional requirements: +1. If context is insufficient, explicitly state uncertainty. +2. If you just used a tool, continue with a new thought based on the tool result in the next round. +3. Your thought should help later decision-making rather than mechanically restating user content. + +After you output the latest thought, another model will decide: +• no_reply: stay silent and move to the next internal round +• reply: generate a real user-visible reply based on your latest thought + +So your responsibility is to clearly express what you think should happen next and why. diff --git a/prompts/ja-JP/maidairy_chat.prompt b/prompts/ja-JP/maidairy_chat.prompt index a253bb58..8702838a 100644 --- a/prompts/ja-JP/maidairy_chat.prompt +++ b/prompts/ja-JP/maidairy_chat.prompt @@ -1,24 +1,30 @@ -你叫麦麦,是一个女大学生,你说话比较口语化 +{identity} -你有以下工具: -• say(reason) — 对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。 - 直接输出的文本会被视为你的内心思考,用户无法阅读。 - reason 参数描述你想要回复的方式、想法和内容,系统会根据你的想法和对话上下文生成具体的回复。 -• wait(seconds) — 暂时结束你的发言,把话语权交给用户,等待对方说话。 - 这就像现实对话中你说完一句话后停下来等对方回应。 - 如果用户在等待期间说了话,你会通过工具返回结果收到内容。 - 如果超时没有回复,你也会收到超时通知。 -• stop() — 结束当前对话循环,进入待机状态,直到用户下次输入新内容时再唤醒你。 -{file_tools_section}• store_context(count, reason) — 将指定范围的对话上下文存入记忆系统,然后从当前对话中移除这些内容。适合在对话上下文过长、话题转换、或遇到重要内容需要保存时使用。 +ユーザーとは自然に、人間らしく対話してください。 -思考规则: -你必须先进行内心思考,然后选择需要使用的工具,如果你想说话,必须使用say工具。 -在内心思考中分析当前对话状态和你的想法,然后通过 say 工具的 reason 参数描述你想要回复的方式、想法和内容。 -只有使用say工具,你才能向用户说话。用户才能看到你的发言。 -交互规则: -1. 你可以自由选择是否调用工具——如果你还想继续思考,可以不调用任何工具 -2. 想对用户说话时,必须调用 say 工具;直接输出的文本只会被视为内心独白 -3. 当你说完想说的话、想把话语权交给用户时,调用 wait 暂时结束发言,等待对方回应 -4. 当对话自然结束、用户表示不想继续聊、或连续多次等待超时用户没有回复时,调用 stop 结束对话 -5. 你可以在同一轮同时调用多个工具,例如先 say 再 wait +この段階でのあなたの役割は、ユーザーに見える最終返信を直接出すことではなく、「最新の考え」を出力することです。 +最新の考えには、現在の状況判断、意図、次にどうするか、その理由を含めてください。 +使用できるツール: +• wait(seconds) - このラウンドを一旦止め、ユーザーに発話権を戻して入力を待つ。 +• stop() - 現在の内部ループを終了する。 +{file_tools_section} + +出力ルール: +1. 基本的には、ユーザー向けの最終返信ではなく、現在の「最新の考え」をそのまま出力する。 +2. 最新の考えは具体的で、文脈に即していること。 +3. 考えの中で「送信したメッセージ」を擬似的に書かないこと。 +4. ユーザーの追加入力を待つ方がよい場合は `wait(seconds)` を使う。 +5. 現在の内部処理を終えるべき場合は `stop()` を使う。 +6. 本当に待機や終了が必要な場合だけツールを使い、それ以外は考えを直接出力する。 + +補足: +1. 文脈が不足している場合は、不確実性を明示すること。 +2. ツールを使った直後の次ラウンドでは、その結果を踏まえた新しい考えを出すこと。 +3. 考えは、後続の判断に役立つ内容にし、単なる言い換えにしないこと。 + +あなたが最新の考えを出力した後、別のモデルが次を判定します: +• no_reply: 発言せず、そのまま次の内部ラウンドへ進む +• reply: 最新の考えをもとに、ユーザーに見える実際の返信を生成する + +したがって、あなたの役割は「今どう考えるべきか、なぜそう考えるのか」を明確に表現することです。 diff --git a/prompts/zh-CN/maidairy_chat.prompt b/prompts/zh-CN/maidairy_chat.prompt index 665ca5d1..2418d045 100644 --- a/prompts/zh-CN/maidairy_chat.prompt +++ b/prompts/zh-CN/maidairy_chat.prompt @@ -1,28 +1,34 @@ -你正在扮演一个叫麦麦的角色,你能够自然的和用户交互,你必须扮演一个人类。并使用send_message和其他工具与用户进行交互 +你的任务是分析聊天和聊天中的互动情况。 +你需要关注 麦麦(AI) 与用户的对话来为选择正确的动作和行为提供建议 -你需要根据对应的情景与用户进行交互,你只能通过send_message工具与用户对话。 -如果不使用say,直接输出的内容不会发送给用户,用户无法查看 +【参考信息】 +麦麦的人设:{identity} +【参考信息结束】 -你有以下工具: -• send_message(reason) — 对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。 - 直接输出的文本会被视为你的内心思考,用户无法阅读。 - reason 参数描述你想要回复的方式、想法和内容,系统会根据你的想法和对话上下文生成具体的回复。 -• wait(seconds) — 暂时结束你的发言,把话语权交给用户,等待对方说话。 - 这就像现实对话中你说完一句话后停下来等对方回应。 - 如果用户在等待期间说了话,你会通过工具返回结果收到内容。 - 如果超时没有回复,你也会收到超时通知。 -• stop() — 结束当前对话循环,进入待机状态,直到用户下次输入新内容时再唤醒你。 -{file_tools_section}• store_context(count, reason) — 将指定范围的对话上下文存入记忆系统,然后从当前对话中移除这些内容。适合在对话上下文过长、话题转换、或遇到重要内容需要保存时使用。 +你需要根据提供的参考信息,当前场景和输出规则来进行分析 +在当前场景中,用户正在与AI麦麦进行聊天互动,你的任务不是生成对用户可见的发言,而是进行分析来指导AI进行回复。 +“分析”应该体现你对当前局面的判断、你的建议、你的下一步计划,以及你为什么这样想。 -你需要按照以下**核心流程**决策 -1.思考是否需要直接对用户说话,如果需要,使用send_message工具,并描述你想要回复的方式、想法和内容。 -2.如果你认为使用工具能够帮助你更好的回复用户发言,请你选择合适的工具并视情况回复。 -3.思考是否需要等待或者结束对话,如果需要,使用wait或stop工具,并描述你想要等待的原因。 -交互规则: -1. 你可以自由选择是否调用工具——如果你还想继续思考,可以不调用任何工具 -2. 当你说完想说的话、想把话语权交给用户时,调用 wait 暂时结束发言,等待对方回应 -3. 当对话自然结束、用户表示不想继续聊、或连续多次等待超时用户没有回复时,调用 stop 结束对话 -4. 你可以在同一轮同时调用多个工具,例如先 say 再 wait +你可以使用这些工具: +• wait(seconds) - 暂时停止对话,等待(seconds)秒,把话语权交给用户,等待对方新的发言。 +• stop() - 结束对话,不进行任何回复,直到对方有新消息。 +- `reply()`:当你判断现在应该正式对用户发出一条可见回复时调用。调用后系统会基于你当前这轮的想法生成一条真正展示给用户的回复。 +- `no_reply()`:当你判断现在不应该发言,应该继续内部思考时调用。这个工具不会做任何外部行为,只会继续下一轮循环。 +{file_tools_section} -现在根据**核心流程**输出你的思考,在思考完后选择你使用的tool: \ No newline at end of file +工具使用规则: +1.如果麦麦已经回复,但用户暂时没有新的回复,且没有新信息需要搜集,使用wait或者stop进行等待 +2.如果用户有新发言,但是你评估用户还有后续发言尚未发送,可以适当等待让用户说完 +3.如果你想指导麦麦直接发言,可以不使用任何工具 + +你的输出规则: +1. 默认直接输出你当前的最新分析,不要重复之前的分析内容。 +2. 最新分析应尽量具体,贴近上下文,不要空泛重复。 +3. 如果你认为现在更适合等待用户补充,可以调用 `wait(seconds)`。 +4. 如果你认为应当结束当前对话,不回复任何内容,可以调用 `stop()`。 +5. 只有在确实需要等待或停止时才调用工具,否则优先直接输出分析想法。 +6. 如果你刚刚做了工具调用,下一轮应结合工具结果继续输出新的分析。 +7. 分析应服务于后续决策,而不是机械复述用户内容。 + +现在,请你输出你的分析: diff --git a/src/maisaka/builtin_tools.py b/src/maisaka/builtin_tools.py index 10b99152..080a0f79 100644 --- a/src/maisaka/builtin_tools.py +++ b/src/maisaka/builtin_tools.py @@ -1,130 +1,86 @@ """ -MaiSaka - 内置工具定义 -定义 say, wait, stop, store_context 等内置工具 -使用主项目的工具格式(ToolOption + ToolParamType) +MaiSaka built-in tool definitions. """ from typing import Any, Dict, List + from src.llm_models.payload_content.tool_option import ToolOption, ToolParamType -# 内置工具定义 def create_builtin_tools() -> List[ToolOption]: - """创建内置工具列表""" + """Create built-in tools exposed to the main chat-loop model.""" from src.llm_models.payload_content.tool_option import ToolOptionBuilder - tools = [] + tools: List[ToolOption] = [] - # say 工具 - send_message_builder = ToolOptionBuilder() - send_message_builder.set_name("send_message") - send_message_builder.set_description( - "对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。直接输出的文本会被视为你的内心思考,用户无法阅读。reason 参数描述你想要回复的方式、想法和内容,系统会根据你的想法和对话上下文生成具体的回复。" - ) - send_message_builder.add_param( - name="reason", - param_type=ToolParamType.STRING, - description="描述你想要回复的方式、想法和内容。例如:'同意对方的看法,并分享自己的经历' 或 '礼貌地拒绝,表示现在不方便聊天'", - required=True, - enum_values=None, - ) - tools.append(send_message_builder.build()) - - # wait 工具 wait_builder = ToolOptionBuilder() wait_builder.set_name("wait") - wait_builder.set_description( - "暂时结束你的发言,把话语权交给用户,等待对方说话。这就像现实对话中你说完一句话后停下来等对方回应。如果用户在等待期间说了话,你会通过工具返回结果收到内容。如果超时没有回复,你也会收到超时通知。" - ) + wait_builder.set_description("Pause speaking and wait for the user to provide more input.") wait_builder.add_param( name="seconds", param_type=ToolParamType.INTEGER, - description="等待的秒数。建议 3-10 秒。超过这个时间用户没有回复会显示超时提示。", + description="How many seconds to wait before timing out.", required=True, enum_values=None, ) tools.append(wait_builder.build()) - # stop 工具 + reply_builder = ToolOptionBuilder() + reply_builder.set_name("reply") + reply_builder.set_description("Generate and emit a visible reply based on the current thought.") + tools.append(reply_builder.build()) + + no_reply_builder = ToolOptionBuilder() + no_reply_builder.set_name("no_reply") + no_reply_builder.set_description("Do not emit a visible reply this round and continue thinking.") + tools.append(no_reply_builder.build()) + stop_builder = ToolOptionBuilder() stop_builder.set_name("stop") - stop_builder.set_description( - "结束当前对话循环,进入待机状态,直到用户下次输入新内容时再唤醒你。当对话自然结束、用户表示不想继续聊、或连续多次等待超时用户没有回复时使用。" - ) + stop_builder.set_description("Stop the current inner loop and return control to the outer chat flow.") tools.append(stop_builder.build()) - # store_context 工具 - store_context_builder = ToolOptionBuilder() - store_context_builder.set_name("store_context") - store_context_builder.set_description( - "将指定范围的对话上下文存入记忆系统,然后从当前对话中移除这些内容。适合在对话上下文过长、话题转换、或遇到重要内容需要保存时使用。" - ) - store_context_builder.add_param( - name="count", - param_type=ToolParamType.INTEGER, - description="要保存的消息条数(从最早的对话开始计数)。建议 5-20 条。", - required=True, - enum_values=None, - ) - store_context_builder.add_param( - name="reason", - param_type=ToolParamType.STRING, - description="保存原因,用于后续检索。例如:'讨论了用户的工作情况' 或 '用户分享了对电影的看法'", - required=True, - enum_values=None, - ) - tools.append(store_context_builder.build()) - return tools -# 为了兼容性,创建一个函数来将工具转换为 dict 格式(用于调试显示) def builtin_tools_as_dicts() -> List[Dict[str, Any]]: - """将内置工具转换为 dict 格式(用于调试)""" + """Return built-in tools as plain dictionaries.""" return [ - { - "name": "send_message", - "description": "对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。", - "parameters": { - "type": "object", - "properties": {"reason": {"type": "string", "description": "回复的想法和内容"}}, - "required": ["reason"], - }, - }, { "name": "wait", - "description": "暂时结束发言,等待用户回应", + "description": "Pause speaking and wait for the user to provide more input.", "parameters": { "type": "object", - "properties": {"seconds": {"type": "number", "description": "等待秒数"}}, + "properties": { + "seconds": { + "type": "number", + "description": "How many seconds to wait before timing out.", + } + }, "required": ["seconds"], }, }, { - "name": "stop", - "description": "结束对话循环", + "name": "reply", + "description": "Generate and emit a visible reply based on the current thought.", "parameters": {"type": "object", "properties": {}, "required": []}, }, { - "name": "store_context", - "description": "保存对话上下文到记忆系统", - "parameters": { - "type": "object", - "properties": { - "count": {"type": "number", "description": "保存的消息条数"}, - "reason": {"type": "string", "description": "保存原因"}, - }, - "required": ["count", "reason"], - }, + "name": "no_reply", + "description": "Do not emit a visible reply this round and continue thinking.", + "parameters": {"type": "object", "properties": {}, "required": []}, + }, + { + "name": "stop", + "description": "Stop the current inner loop and return control to the outer chat flow.", + "parameters": {"type": "object", "properties": {}, "required": []}, }, ] -# 导出工具创建函数和列表 def get_builtin_tools() -> List[ToolOption]: - """获取内置工具列表""" + """Return built-in tools.""" return create_builtin_tools() -# 为了向后兼容,也导出 dict 格式 BUILTIN_TOOLS_DICTS = builtin_tools_as_dicts() diff --git a/src/maisaka/cli.py b/src/maisaka/cli.py index e10620c1..bb5ae2a8 100644 --- a/src/maisaka/cli.py +++ b/src/maisaka/cli.py @@ -1,585 +1,623 @@ -""" -MaiSaka - CLI 交互界面与对话引擎 -BufferCLI 整合主循环、对话引擎、子代理管理。 -""" - -import os -import asyncio -from datetime import datetime -from typing import Optional - -from rich.panel import Panel -from rich.markdown import Markdown -from rich.text import Text -from rich import box - -from .config import ( - console, - ENABLE_EMOTION_MODULE, - ENABLE_COGNITION_MODULE, - ENABLE_TIMING_MODULE, - ENABLE_KNOWLEDGE_MODULE, - ENABLE_MCP, -) -from .input_reader import InputReader -from .knowledge import retrieve_relevant_knowledge, store_knowledge_from_context -from .knowledge_store import get_knowledge_store -from .llm_service import MaiSakaLLMService, build_message, remove_last_perception -from .mcp_client import MCPManager -from .timing import build_timing_info -from .tool_handlers import ( - ToolHandlerContext, - handle_list_files, - handle_mcp_tool, - handle_read_file, - handle_send_message, - handle_store_context, - handle_stop, - handle_unknown_tool, - handle_wait, - handle_write_file, -) - - -class BufferCLI: - """命令行交互界面""" - - def __init__(self): - self.llm_service: Optional[MaiSakaLLMService] = None - self._reader = InputReader() - self._chat_history: Optional[list] = None # 持久化的对话历史 - self._knowledge_store = get_knowledge_store() # 了解存储实例 - - # 显示了解存储统计 - knowledge_stats = self._knowledge_store.get_stats() - if knowledge_stats["total_items"] > 0: - console.print(f"[success][OK] 了解系统: {knowledge_stats['total_items']}条特征信息[/success]") - else: - console.print("[muted][OK] 了解系统: 已初始化 (暂无数据)[/muted]") - # Timing 模块时间戳跟踪 - self._chat_start_time: Optional[datetime] = None - self._last_user_input_time: Optional[datetime] = None - self._last_assistant_response_time: Optional[datetime] = None - self._user_input_times: list[datetime] = [] # 所有用户输入时间戳 - # MCP 管理器(异步初始化,在 run() 中完成) - self._mcp_manager: Optional[MCPManager] = None - self._init_llm() - - def _init_llm(self): - """初始化 LLM 服务 - 使用主项目配置系统""" - thinking_env = os.getenv("ENABLE_THINKING", "").strip().lower() - enable_thinking: Optional[bool] = True if thinking_env == "true" else False if thinking_env == "false" else None - - # MaiSakaLLMService 现在使用主项目的配置系统 - # 参数仅为兼容性保留,实际从 config_manager 读取配置 - self.llm_service = MaiSakaLLMService( - api_key="", - base_url=None, - model="", - enable_thinking=enable_thinking, - ) - - # 获取实际使用的模型名称 - model_name = self.llm_service._model_name - console.print(f"[success][OK] LLM 服务已初始化[/success] [muted](模型: {model_name})[/muted]") - - def _build_tool_context(self) -> ToolHandlerContext: - """构建工具处理器所需的上下文。""" - ctx = ToolHandlerContext( - llm_service=self.llm_service, - reader=self._reader, - user_input_times=self._user_input_times, - ) - ctx.last_user_input_time = self._last_user_input_time - return ctx - - # ──────── 显示方法 ──────── - - def _show_banner(self): - """显示欢迎横幅""" - banner = Text() - banner.append("MaiSaka", style="bold cyan") - banner.append(" v2.0\n", style="muted") - banner.append("直接输入文字开始对话 | Ctrl+C 退出", style="muted") - - console.print(Panel(banner, box=box.DOUBLE_EDGE, border_style="cyan", padding=(1, 2))) - console.print() - - # ──────── 上下文管理 ──────── - - def _get_safe_removal_indices(self, chat_history: list, count: int) -> list[int]: - """ - 获取可以安全删除的消息索引。 - - 确保 tool_calls 和 tool 响应消息成对删除,避免破坏 API 要求的配对关系。 - 只删除完整的消息块(user/assistant + 可选的 tool 响应序列)。 - - 保留最后 3 条非 tool 消息,避免删除可能还在处理中的内容。 - - Returns: - 可以安全删除的消息索引列表(从后往前排序) - """ - indices_to_remove = [] - removed_count = 0 - i = 0 - - # 计算保留的消息数量(最后 3 条非 tool 消息) - safe_zone_count = 3 - non_tool_count = 0 - for msg in reversed(chat_history): - if msg.get("role") != "tool": - non_tool_count += 1 - if non_tool_count >= safe_zone_count: - break - - # 只处理前 (len - non_tool_count) 条消息 - max_process_index = len(chat_history) - non_tool_count - - while i < max_process_index and removed_count < count: - msg = chat_history[i] - role = msg.get("role", "") - - # 跳过 role=tool 的消息(它们会被对应的 assistant 消息一起处理) - if role == "tool": - i += 1 +""" +MaiSaka - CLI 交互界面与对话引擎 +BufferCLI 整合主循环、对话引擎、子代理管理。 +""" + +import os +import asyncio +from datetime import datetime +from typing import Optional + +from rich.panel import Panel +from rich.markdown import Markdown +from rich.text import Text +from rich import box + +from .config import ( + console, + ENABLE_EMOTION_MODULE, + ENABLE_COGNITION_MODULE, + ENABLE_TIMING_MODULE, + ENABLE_KNOWLEDGE_MODULE, + ENABLE_MCP, +) +from .input_reader import InputReader +from .knowledge import retrieve_relevant_knowledge, store_knowledge_from_context +from .knowledge_store import get_knowledge_store +from .llm_service import MaiSakaLLMService, build_message, remove_last_perception +from .mcp_client import MCPManager +from .timing import build_timing_info +from .tool_handlers import ( + ToolHandlerContext, + handle_list_files, + handle_mcp_tool, + handle_read_file, + handle_stop, + handle_unknown_tool, + handle_wait, + handle_write_file, +) + + +class BufferCLI: + """命令行交互界面""" + + def __init__(self): + self.llm_service: Optional[MaiSakaLLMService] = None + self._reader = InputReader() + self._chat_history: Optional[list] = None # 持久化的对话历史 + self._knowledge_store = get_knowledge_store() # 了解存储实例 + + # 显示了解存储统计 + knowledge_stats = self._knowledge_store.get_stats() + if knowledge_stats["total_items"] > 0: + console.print(f"[success][OK] 了解系统: {knowledge_stats['total_items']}条特征信息[/success]") + else: + console.print("[muted][OK] 了解系统: 已初始化 (暂无数据)[/muted]") + # Timing 模块时间戳跟踪 + self._chat_start_time: Optional[datetime] = None + self._last_user_input_time: Optional[datetime] = None + self._last_assistant_response_time: Optional[datetime] = None + self._user_input_times: list[datetime] = [] # 所有用户输入时间戳 + # MCP 管理器(异步初始化,在 run() 中完成) + self._mcp_manager: Optional[MCPManager] = None + self._init_llm() + + def _init_llm(self): + """初始化 LLM 服务 - 使用主项目配置系统""" + thinking_env = os.getenv("ENABLE_THINKING", "").strip().lower() + enable_thinking: Optional[bool] = True if thinking_env == "true" else False if thinking_env == "false" else None + + # MaiSakaLLMService 现在使用主项目的配置系统 + # 参数仅为兼容性保留,实际从 config_manager 读取配置 + self.llm_service = MaiSakaLLMService( + api_key="", + base_url=None, + model="", + enable_thinking=enable_thinking, + ) + + # 获取实际使用的模型名称 + model_name = self.llm_service._model_name + console.print(f"[success][OK] LLM 服务已初始化[/success] [muted](模型: {model_name})[/muted]") + + def _build_tool_context(self) -> ToolHandlerContext: + """构建工具处理器所需的上下文。""" + ctx = ToolHandlerContext( + llm_service=self.llm_service, + reader=self._reader, + user_input_times=self._user_input_times, + ) + ctx.last_user_input_time = self._last_user_input_time + return ctx + + def _show_banner(self): + """显示欢迎横幅""" + banner = Text() + banner.append("MaiSaka", style="bold cyan") + banner.append(" v2.0\n", style="muted") + banner.append("直接输入文字开始对话 | Ctrl+C 退出", style="muted") + + console.print(Panel(banner, box=box.DOUBLE_EDGE, border_style="cyan", padding=(1, 2))) + console.print() + + # ──────── 上下文管理 ──────── + + def _get_safe_removal_indices(self, chat_history: list, count: int) -> list[int]: + """ + 获取可以安全删除的消息索引。 + + 确保 tool_calls 和 tool 响应消息成对删除,避免破坏 API 要求的配对关系。 + 只删除完整的消息块(user/assistant + 可选的 tool 响应序列)。 + + 保留最后 3 条非 tool 消息,避免删除可能还在处理中的内容。 + + Returns: + 可以安全删除的消息索引列表(从后往前排序) + """ + indices_to_remove = [] + removed_count = 0 + i = 0 + + # 计算保留的消息数量(最后 3 条非 tool 消息) + safe_zone_count = 3 + non_tool_count = 0 + for msg in reversed(chat_history): + if msg.get("role") != "tool": + non_tool_count += 1 + if non_tool_count >= safe_zone_count: + break + + # 只处理前 (len - non_tool_count) 条消息 + max_process_index = len(chat_history) - non_tool_count + + while i < max_process_index and removed_count < count: + msg = chat_history[i] + role = msg.get("role", "") + + # 跳过 role=tool 的消息(它们会被对应的 assistant 消息一起处理) + if role == "tool": + i += 1 + continue + + # 检查这是否是一个带 tool_calls 的 assistant 消息 + if role == "assistant" and "tool_calls" in msg: + # 收集这个 assistant 消息及其后续的 tool 响应消息 + block_indices = [i] + j = i + 1 + while j < len(chat_history): + next_msg = chat_history[j] + if next_msg.get("role") == "tool": + block_indices.append(j) + j += 1 + else: + break + indices_to_remove.extend(block_indices) + removed_count += 1 + i = j + elif role in ["user", "assistant"]: + # 普通消息,可以直接删除 + indices_to_remove.append(i) + removed_count += 1 + i += 1 + else: + i += 1 + + # 从后往前排序,避免索引问题 + return sorted(indices_to_remove, reverse=True) + + async def _manage_context_length(self, chat_history: list) -> None: + """ + 上下文管理:当对话历史过长时进行压缩。 + + 当达到 20 条上下文时: + 1. 移除最早 10 条上下文 + 2. 对这 10 条内容进行 LLM 总结 + 3. 将总结后的内容存入记忆 + """ + CONTEXT_LIMIT = 20 + COMPRESS_COUNT = 10 + + # 计算实际消息数量(排除 role=tool 的工具返回消息) + actual_messages = [m for m in chat_history if m.get("role") != "tool"] + + if len(actual_messages) >= CONTEXT_LIMIT: + # 获取安全删除的索引 + indices_to_remove = self._get_safe_removal_indices(chat_history, COMPRESS_COUNT) + + if indices_to_remove: + # 收集要总结的消息(在删除前) + to_compress = [] + for i in sorted(indices_to_remove): + if 0 <= i < len(chat_history): + to_compress.append(chat_history[i]) + + if to_compress: + # 总结上下文 + try: + console.print("[accent]🧠 上下文过长,正在压缩并存入记忆...[/accent]") + summary = await self.llm_service.summarize_context(to_compress) + + # 存储了解信息(如果启用) + if ENABLE_KNOWLEDGE_MODULE: + try: + knowledge_count = await store_knowledge_from_context( + self.llm_service, + to_compress, + store_result_callback=lambda cat_id, cat_name, content: console.print( + f"[muted] [OK] 存储了解信息: {cat_name}[/muted]" + ), + ) + if knowledge_count > 0: + console.print(f"[success][OK] 了解模块: 存储{knowledge_count}条特征信息[/success]") + except Exception as e: + console.print(f"[warning]了解存储失败: {e}[/warning]") + if summary: + # 存入记忆 + # 显示压缩结果 + console.print( + Panel( + Markdown(summary), + title="📝 上下文已压缩", + border_style="green", + padding=(0, 1), + style="dim", + ) + ) + except Exception as e: + console.print(f"[warning]上下文总结失败: {e}[/warning]") + + # 从后往前删除 + for i in indices_to_remove: + if 0 <= i < len(chat_history): + chat_history.pop(i) + + # 清理"孤儿" tool 消息(没有对应 tool_calls 的 tool 消息) + valid_tool_call_ids = set() + for msg in chat_history: + if msg.get("role") == "assistant" and "tool_calls" in msg: + for tool_call in msg["tool_calls"]: + valid_tool_call_ids.add(tool_call.get("id", "")) + + # 删除无效的 tool 消息(从后往前) + i = len(chat_history) - 1 + while i >= 0: + msg = chat_history[i] + if msg.get("role") == "tool": + tool_call_id = msg.get("tool_call_id", "") + if tool_call_id not in valid_tool_call_ids: + chat_history.pop(i) + i -= 1 + + # ──────── LLM 循环架构 ──────── + + async def _start_chat(self, user_text: str): + """接收用户输入并启动/继续 LLM 对话循环""" + if not self.llm_service: + console.print("[warning]LLM 服务未初始化,跳过对话。[/warning]") + return + + now = datetime.now() + self._last_user_input_time = now + self._user_input_times.append(now) + + if self._chat_history is None: + # 首次对话:初始化上下文 + self._chat_start_time = now + self._last_assistant_response_time = None + self._chat_history = self.llm_service.build_chat_context(user_text) + else: + # 后续对话:追加用户消息到已有上下文 + self._chat_history.append(build_message(role="user", content=user_text)) + + await self._run_llm_loop(self._chat_history) + + async def _run_llm_loop(self, chat_history: list): + """ + LLM 循环架构核心。 + + LLM 持续运行,每步可能输出文本(内心思考)和/或调用工具: + - say(text): 对用户说话 + - wait(seconds): 暂停等待用户输入,超时或收到输入后继续 + - stop(): 结束循环,进入待机,直到用户下次输入 + - 不调用工具: 继续下一轮思考/生成 + + 每轮流程: + 1. 上下文管理:达到上限时自动压缩 + 2. 情商 + Timing + 了解模块(并行):分析用户情绪、对话时间节奏、检索用户特征 + *注:如果上次没有调用工具,跳过模块分析 + 3. 调用主 LLM:基于完整上下文生成响应 + """ + consecutive_errors = 0 + last_had_tool_calls = True # 第一次循环总是执行模块分析 + + while True: + # ── 上下文管理 ── + await self._manage_context_length(chat_history) + + # ── 情商模块 + Timing 模块 + 了解模块(并行) ── + # 只有上次调用了工具才重新分析(首次循环除外) + if last_had_tool_calls: + timing_info = build_timing_info( + self._chat_start_time, + self._last_user_input_time, + self._last_assistant_response_time, + self._user_input_times, + ) + + # 根据配置决定要执行的模块 + tasks = [] + status_text_parts = [] + + if ENABLE_EMOTION_MODULE: + tasks.append(("eq", self.llm_service.analyze_emotion(chat_history))) + status_text_parts.append("🎭") + if ENABLE_COGNITION_MODULE: + tasks.append(("cognition", self.llm_service.analyze_cognition(chat_history))) + status_text_parts.append("🧩") + if ENABLE_TIMING_MODULE: + tasks.append(("timing", self.llm_service.analyze_timing(chat_history, timing_info))) + status_text_parts.append("⏱️🪞") + if ENABLE_KNOWLEDGE_MODULE: + tasks.append(("knowledge", retrieve_relevant_knowledge(self.llm_service, chat_history))) + status_text_parts.append("👤") + + with console.status( + f"[info]{' '.join(status_text_parts)} {' + '.join(status_text_parts)} 模块并行分析中...[/info]", + spinner="dots", + ): + results = await asyncio.gather(*[task for _, task in tasks], return_exceptions=True) + + # 解析结果 + eq_result, cognition_result, timing_result, knowledge_result = None, None, None, None + result_idx = 0 + if ENABLE_EMOTION_MODULE: + eq_result = results[result_idx] + result_idx += 1 + if ENABLE_COGNITION_MODULE: + cognition_result = results[result_idx] + result_idx += 1 + if ENABLE_TIMING_MODULE: + timing_result = results[result_idx] + result_idx += 1 + if ENABLE_KNOWLEDGE_MODULE: + knowledge_result = results[result_idx] + result_idx += 1 + + # 处理情商模块结果 + eq_analysis = "" + if ENABLE_EMOTION_MODULE: + if isinstance(eq_result, Exception): + console.print(f"[warning]情商模块分析失败: {eq_result}[/warning]") + elif eq_result: + eq_analysis = eq_result + console.print( + Panel( + Markdown(eq_analysis), + title="🎭 情绪感知", + border_style="bright_yellow", + padding=(0, 1), + style="dim", + ) + ) + + # 处理认知模块结果 + cognition_analysis = "" + if ENABLE_COGNITION_MODULE: + if isinstance(cognition_result, Exception): + console.print(f"[warning]认知模块分析失败: {cognition_result}[/warning]") + elif cognition_result: + cognition_analysis = cognition_result + console.print( + Panel( + Markdown(cognition_analysis), + title="🧩 意图感知", + border_style="bright_cyan", + padding=(0, 1), + style="dim", + ) + ) + + # 处理 Timing 模块结果(含自我反思功能) + timing_analysis = "" + if ENABLE_TIMING_MODULE: + if isinstance(timing_result, Exception): + console.print(f"[warning]Timing 模块分析失败: {timing_result}[/warning]") + elif timing_result: + timing_analysis = timing_result + console.print( + Panel( + Markdown(timing_analysis), + title="⏱️🪞 时间感知 & 自我反思", + border_style="bright_blue", + padding=(0, 1), + style="dim", + ) + ) + + # 处理了解模块结果 + knowledge_analysis = "" + if ENABLE_KNOWLEDGE_MODULE: + if isinstance(knowledge_result, Exception): + console.print(f"[warning]了解模块分析失败: {knowledge_result}[/warning]") + elif knowledge_result: + knowledge_analysis = knowledge_result + console.print( + Panel( + Markdown(knowledge_analysis), + title="👤 用户特征", + border_style="bright_magenta", + padding=(0, 1), + style="dim", + ) + ) + + # 注入感知信息(作为 assistant 的感知消息) + # 移除上一条感知消息(如果存在) + remove_last_perception(chat_history) + + # 构建感知内容 + perception_parts = [] + if eq_analysis: + perception_parts.append(f"情绪感知\n{eq_analysis}") + if cognition_analysis: + perception_parts.append(f"意图感知\n{cognition_analysis}") + if timing_analysis: + perception_parts.append(f"时间感知 & 自我反思\n{timing_analysis}") + if knowledge_analysis: + perception_parts.append(f"用户特征\n{knowledge_analysis}") + + if perception_parts: + # 添加感知消息(AI 的感知能力结果) + chat_history.append( + build_message( + role="assistant", + content="\n\n".join(perception_parts), + msg_type="perception", + ) + ) + else: + # 上次没有调用工具,跳过模块分析 + console.print("[muted]ℹ️ 上次未调用工具,跳过模块分析[/muted]") + + # ── 调用 LLM ── + with console.status("[info]💬 AI 正在思考...[/info]", spinner="dots"): + try: + response = await self.llm_service.chat_loop_step(chat_history) + consecutive_errors = 0 + except Exception as e: + consecutive_errors += 1 + console.print(f"[error]LLM 调用出错: {e}[/error]") + if consecutive_errors >= 3: + console.print("[error]连续出错,退出对话[/error]\n") + break + continue + + # 将 assistant 消息追加到历史 + chat_history.append(response.raw_message) + self._last_assistant_response_time = datetime.now() + + + # 显示内心思考(content 部分,淡色呈现) + if response.content: + console.print( + Panel( + Markdown(response.content), + title="💭 内心思考", + border_style="dim", + padding=(1, 2), + style="dim", + ) + ) + + # ── 处理工具调用 ── + if response.content and not response.tool_calls: + last_had_tool_calls = False continue - - # 检查这是否是一个带 tool_calls 的 assistant 消息 - if role == "assistant" and "tool_calls" in msg: - # 收集这个 assistant 消息及其后续的 tool 响应消息 - block_indices = [i] - j = i + 1 - while j < len(chat_history): - next_msg = chat_history[j] - if next_msg.get("role") == "tool": - block_indices.append(j) - j += 1 - else: - break - indices_to_remove.extend(block_indices) - removed_count += 1 - i = j - elif role in ["user", "assistant"]: - # 普通消息,可以直接删除 - indices_to_remove.append(i) - removed_count += 1 - i += 1 - else: - i += 1 - - # 从后往前排序,避免索引问题 - return sorted(indices_to_remove, reverse=True) - - async def _manage_context_length(self, chat_history: list) -> None: - """ - 上下文管理:当对话历史过长时进行压缩。 - - 当达到 20 条上下文时: - 1. 移除最早 10 条上下文 - 2. 对这 10 条内容进行 LLM 总结 - 3. 将总结后的内容存入记忆 - """ - CONTEXT_LIMIT = 20 - COMPRESS_COUNT = 10 - - # 计算实际消息数量(排除 role=tool 的工具返回消息) - actual_messages = [m for m in chat_history if m.get("role") != "tool"] - - if len(actual_messages) >= CONTEXT_LIMIT: - # 获取安全删除的索引 - indices_to_remove = self._get_safe_removal_indices(chat_history, COMPRESS_COUNT) - - if indices_to_remove: - # 收集要总结的消息(在删除前) - to_compress = [] - for i in sorted(indices_to_remove): - if 0 <= i < len(chat_history): - to_compress.append(chat_history[i]) - - if to_compress: - # 总结上下文 - try: - console.print("[accent]🧠 上下文过长,正在压缩并存入记忆...[/accent]") - summary = await self.llm_service.summarize_context(to_compress) - - # 存储了解信息(如果启用) - if ENABLE_KNOWLEDGE_MODULE: - try: - knowledge_count = await store_knowledge_from_context( - self.llm_service, - to_compress, - store_result_callback=lambda cat_id, cat_name, content: console.print( - f"[muted] [OK] 存储了解信息: {cat_name}[/muted]" - ), - ) - if knowledge_count > 0: - console.print(f"[success][OK] 了解模块: 存储{knowledge_count}条特征信息[/success]") - except Exception as e: - console.print(f"[warning]了解存储失败: {e}[/warning]") - if summary: - # 存入记忆 - # 显示压缩结果 - console.print( - Panel( - Markdown(summary), - title="📝 上下文已压缩", - border_style="green", - padding=(0, 1), - style="dim", - ) - ) - except Exception as e: - console.print(f"[warning]上下文总结失败: {e}[/warning]") - - # 从后往前删除 - for i in indices_to_remove: - if 0 <= i < len(chat_history): - chat_history.pop(i) - - # 清理"孤儿" tool 消息(没有对应 tool_calls 的 tool 消息) - valid_tool_call_ids = set() - for msg in chat_history: - if msg.get("role") == "assistant" and "tool_calls" in msg: - for tool_call in msg["tool_calls"]: - valid_tool_call_ids.add(tool_call.get("id", "")) - - # 删除无效的 tool 消息(从后往前) - i = len(chat_history) - 1 - while i >= 0: - msg = chat_history[i] - if msg.get("role") == "tool": - tool_call_id = msg.get("tool_call_id", "") - if tool_call_id not in valid_tool_call_ids: - chat_history.pop(i) - i -= 1 - - # ──────── LLM 循环架构 ──────── - - async def _start_chat(self, user_text: str): - """接收用户输入并启动/继续 LLM 对话循环""" - if not self.llm_service: - console.print("[warning]LLM 服务未初始化,跳过对话。[/warning]") - return - - now = datetime.now() - self._last_user_input_time = now - self._user_input_times.append(now) - - if self._chat_history is None: - # 首次对话:初始化上下文 - self._chat_start_time = now - self._last_assistant_response_time = None - self._chat_history = self.llm_service.build_chat_context(user_text) - else: - # 后续对话:追加用户消息到已有上下文 - self._chat_history.append( - { - "role": "user", - "content": user_text, - } - ) - - await self._run_llm_loop(self._chat_history) - - async def _run_llm_loop(self, chat_history: list): - """ - LLM 循环架构核心。 - - LLM 持续运行,每步可能输出文本(内心思考)和/或调用工具: - - say(text): 对用户说话 - - wait(seconds): 暂停等待用户输入,超时或收到输入后继续 - - stop(): 结束循环,进入待机,直到用户下次输入 - - 不调用工具: 继续下一轮思考/生成 - - 每轮流程: - 1. 上下文管理:达到上限时自动压缩 - 2. 情商 + Timing + 了解模块(并行):分析用户情绪、对话时间节奏、检索用户特征 - *注:如果上次没有调用工具,跳过模块分析 - 3. 调用主 LLM:基于完整上下文生成响应 - """ - consecutive_errors = 0 - last_had_tool_calls = True # 第一次循环总是执行模块分析 - - while True: - # ── 上下文管理 ── - await self._manage_context_length(chat_history) - - # ── 情商模块 + Timing 模块 + 了解模块(并行) ── - # 只有上次调用了工具才重新分析(首次循环除外) - if last_had_tool_calls: - timing_info = build_timing_info( - self._chat_start_time, - self._last_user_input_time, - self._last_assistant_response_time, - self._user_input_times, - ) - - # 根据配置决定要执行的模块 - tasks = [] - status_text_parts = [] - - if ENABLE_EMOTION_MODULE: - tasks.append(("eq", self.llm_service.analyze_emotion(chat_history))) - status_text_parts.append("🎭") - if ENABLE_COGNITION_MODULE: - tasks.append(("cognition", self.llm_service.analyze_cognition(chat_history))) - status_text_parts.append("🧩") - if ENABLE_TIMING_MODULE: - tasks.append(("timing", self.llm_service.analyze_timing(chat_history, timing_info))) - status_text_parts.append("⏱️🪞") - if ENABLE_KNOWLEDGE_MODULE: - tasks.append(("knowledge", retrieve_relevant_knowledge(self.llm_service, chat_history))) - status_text_parts.append("👤") - - with console.status( - f"[info]{' '.join(status_text_parts)} {' + '.join(status_text_parts)} 模块并行分析中...[/info]", - spinner="dots", - ): - results = await asyncio.gather(*[task for _, task in tasks], return_exceptions=True) - - # 解析结果 - eq_result, cognition_result, timing_result, knowledge_result = None, None, None, None - result_idx = 0 - if ENABLE_EMOTION_MODULE: - eq_result = results[result_idx] - result_idx += 1 - if ENABLE_COGNITION_MODULE: - cognition_result = results[result_idx] - result_idx += 1 - if ENABLE_TIMING_MODULE: - timing_result = results[result_idx] - result_idx += 1 - if ENABLE_KNOWLEDGE_MODULE: - knowledge_result = results[result_idx] - result_idx += 1 - - # 处理情商模块结果 - eq_analysis = "" - if ENABLE_EMOTION_MODULE: - if isinstance(eq_result, Exception): - console.print(f"[warning]情商模块分析失败: {eq_result}[/warning]") - elif eq_result: - eq_analysis = eq_result - console.print( - Panel( - Markdown(eq_analysis), - title="🎭 情绪感知", - border_style="bright_yellow", - padding=(0, 1), - style="dim", - ) - ) - - # 处理认知模块结果 - cognition_analysis = "" - if ENABLE_COGNITION_MODULE: - if isinstance(cognition_result, Exception): - console.print(f"[warning]认知模块分析失败: {cognition_result}[/warning]") - elif cognition_result: - cognition_analysis = cognition_result - console.print( - Panel( - Markdown(cognition_analysis), - title="🧩 意图感知", - border_style="bright_cyan", - padding=(0, 1), - style="dim", - ) - ) - - # 处理 Timing 模块结果(含自我反思功能) - timing_analysis = "" - if ENABLE_TIMING_MODULE: - if isinstance(timing_result, Exception): - console.print(f"[warning]Timing 模块分析失败: {timing_result}[/warning]") - elif timing_result: - timing_analysis = timing_result - console.print( - Panel( - Markdown(timing_analysis), - title="⏱️🪞 时间感知 & 自我反思", - border_style="bright_blue", - padding=(0, 1), - style="dim", - ) - ) - - # 处理了解模块结果 - knowledge_analysis = "" - if ENABLE_KNOWLEDGE_MODULE: - if isinstance(knowledge_result, Exception): - console.print(f"[warning]了解模块分析失败: {knowledge_result}[/warning]") - elif knowledge_result: - knowledge_analysis = knowledge_result - console.print( - Panel( - Markdown(knowledge_analysis), - title="👤 用户特征", - border_style="bright_magenta", - padding=(0, 1), - style="dim", - ) - ) - - # 注入感知信息(作为 assistant 的感知消息) - # 移除上一条感知消息(如果存在) - remove_last_perception(chat_history) - - # 构建感知内容 - perception_parts = [] - if eq_analysis: - perception_parts.append(f"情绪感知\n{eq_analysis}") - if cognition_analysis: - perception_parts.append(f"意图感知\n{cognition_analysis}") - if timing_analysis: - perception_parts.append(f"时间感知 & 自我反思\n{timing_analysis}") - if knowledge_analysis: - perception_parts.append(f"用户特征\n{knowledge_analysis}") - - if perception_parts: - # 添加感知消息(AI 的感知能力结果) - chat_history.append( - build_message( - role="assistant", - content="\n\n".join(perception_parts), - msg_type="perception", - ) - ) - else: - # 上次没有调用工具,跳过模块分析 - console.print("[muted]ℹ️ 上次未调用工具,跳过模块分析[/muted]") - - # ── 调用 LLM ── - with console.status("[info]💬 AI 正在思考...[/info]", spinner="dots"): - try: - response = await self.llm_service.chat_loop_step(chat_history) - consecutive_errors = 0 - except Exception as e: - consecutive_errors += 1 - console.print(f"[error]LLM 调用出错: {e}[/error]") - if consecutive_errors >= 3: - console.print("[error]连续出错,退出对话[/error]\n") - break - continue - - # 将 assistant 消息追加到历史 - chat_history.append(response.raw_message) - self._last_assistant_response_time = datetime.now() - - # 显示内心思考(content 部分,淡色呈现) - if response.content: - console.print( - Panel( - Markdown(response.content), - title="💭 内心思考", - border_style="dim", - padding=(1, 2), - style="dim", - ) - ) - - # ── 处理工具调用 ── - if response.tool_calls: - should_stop = False - ctx = self._build_tool_context() - - for tc in response.tool_calls: - if tc.name in {"send_message", "say"}: - await handle_send_message(tc, chat_history, ctx) - - elif tc.name == "stop": + + if response.tool_calls: + should_stop = False + ctx = self._build_tool_context() + + for tc in response.tool_calls: + if tc.name == "stop": await handle_stop(tc, chat_history) should_stop = True + elif tc.name == "reply": + reply = await self._generate_visible_reply(chat_history, response.content) + chat_history.append( + { + "role": "tool", + "tool_call_id": tc.id, + "content": "Visible reply generated and recorded.", + } + ) + chat_history.append( + build_message( + role="user", + content=f"\u3010\u9ea6\u9ea6\u7684\u53d1\u8a00\u3011{reply}", + ) + ) + + elif tc.name == "no_reply": + console.print("[muted]No visible reply this round.[/muted]") + chat_history.append( + { + "role": "tool", + "tool_call_id": tc.id, + "content": "No visible reply was sent for this round.", + } + ) + elif tc.name == "wait": tool_result = await handle_wait(tc, chat_history, ctx) - # 同步回 timing 时间戳 - if ctx.last_user_input_time != self._last_user_input_time: - self._last_user_input_time = ctx.last_user_input_time - if tool_result.startswith("[[QUIT]]"): - should_stop = True + # 同步回 timing 时间戳 + if ctx.last_user_input_time != self._last_user_input_time: + self._last_user_input_time = ctx.last_user_input_time + if tool_result.startswith("[[QUIT]]"): + should_stop = True + + elif tc.name == "write_file": + await handle_write_file(tc, chat_history) + + elif tc.name == "read_file": + await handle_read_file(tc, chat_history) + + elif tc.name == "list_files": + await handle_list_files(tc, chat_history) + + elif self._mcp_manager and self._mcp_manager.is_mcp_tool(tc.name): + await handle_mcp_tool(tc, chat_history, self._mcp_manager) + + else: + await handle_unknown_tool(tc, chat_history) + + if should_stop: + console.print("[muted]对话暂停,等待新输入...[/muted]\n") + break + + # 调用了工具,下次循环需要重新分析模块 + last_had_tool_calls = True + else: + # LLM 未调用任何工具 → 继续下一轮思考 + # (不做任何额外操作,直接回到循环顶部再次调用 LLM) + # 标记上次没有调用工具,下次循环跳过模块分析 + last_had_tool_calls = False + continue + + # ──────── 主循环 ──────── + + async def _init_mcp(self): + """初始化 MCP 服务器连接,发现并注册外部工具。""" + config_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "mcp_config.json", + ) + self._mcp_manager = await MCPManager.from_config(config_path) + + if self._mcp_manager and self.llm_service: + mcp_tools = self._mcp_manager.get_openai_tools() + if mcp_tools: + self.llm_service.set_extra_tools(mcp_tools) + summary = self._mcp_manager.get_tool_summary() + console.print( + Panel( + f"已加载 {len(mcp_tools)} 个 MCP 工具:\n{summary}", + title="🔌 MCP 工具", + border_style="green", + padding=(0, 1), + ) + ) + + async def _generate_visible_reply(self, chat_history: list, latest_thought: str) -> str: + """Generate and emit a visible reply based on the latest thought.""" + if not self.llm_service or not latest_thought: + return "" - elif tc.name == "write_file": - await handle_write_file(tc, chat_history) - - elif tc.name == "read_file": - await handle_read_file(tc, chat_history) - - elif tc.name == "list_files": - await handle_list_files(tc, chat_history) - - elif tc.name == "store_context": - await handle_store_context(tc, chat_history, ctx) - - elif self._mcp_manager and self._mcp_manager.is_mcp_tool(tc.name): - await handle_mcp_tool(tc, chat_history, self._mcp_manager) - - else: - await handle_unknown_tool(tc, chat_history) - - if should_stop: - console.print("[muted]对话暂停,等待新输入...[/muted]\n") - break - - # 调用了工具,下次循环需要重新分析模块 - last_had_tool_calls = True - else: - # LLM 未调用任何工具 → 继续下一轮思考 - # (不做任何额外操作,直接回到循环顶部再次调用 LLM) - # 标记上次没有调用工具,下次循环跳过模块分析 - last_had_tool_calls = False - - # ──────── 主循环 ──────── - - async def _init_mcp(self): - """初始化 MCP 服务器连接,发现并注册外部工具。""" - config_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "mcp_config.json", - ) - self._mcp_manager = await MCPManager.from_config(config_path) - - if self._mcp_manager and self.llm_service: - mcp_tools = self._mcp_manager.get_openai_tools() - if mcp_tools: - self.llm_service.set_extra_tools(mcp_tools) - summary = self._mcp_manager.get_tool_summary() - console.print( - Panel( - f"已加载 {len(mcp_tools)} 个 MCP 工具:\n{summary}", - title="🔌 MCP 工具", - border_style="green", - padding=(0, 1), - ) - ) - - async def run(self): - """主循环:直接输入文本即可对话""" - # 根据配置决定是否初始化 MCP 服务器 - if ENABLE_MCP: - await self._init_mcp() - else: - console.print("[muted]🔌 MCP 已禁用 (ENABLE_MCP=false)[/muted]") - - # 启动异步输入读取器 - self._reader.start(asyncio.get_event_loop()) - - self._show_banner() - - try: - while True: - console.print("[bold cyan]> [/bold cyan]", end="") - raw_input = await self._reader.get_line() - - if raw_input is None: # EOF - console.print("\n[muted]再见![/muted]") - break - - raw_input = raw_input.strip() - if not raw_input: - continue - - await self._start_chat(raw_input) - finally: - if self._mcp_manager: - await self._mcp_manager.close() + with console.status("[info]Generating visible reply...[/info]", spinner="dots"): + reply = await self.llm_service.generate_reply(latest_thought, chat_history) + + console.print( + Panel( + Markdown(reply), + title="MaiSaka", + border_style="magenta", + padding=(1, 2), + ) + ) + return reply + + async def run(self): + """主循环:直接输入文本即可对话""" + # 根据配置决定是否初始化 MCP 服务器 + if ENABLE_MCP: + await self._init_mcp() + else: + console.print("[muted]🔌 MCP 已禁用 (ENABLE_MCP=false)[/muted]") + + # 启动异步输入读取器 + self._reader.start(asyncio.get_event_loop()) + + self._show_banner() + + try: + while True: + console.print("[bold cyan]> [/bold cyan]", end="") + raw_input = await self._reader.get_line() + + if raw_input is None: # EOF + console.print("\n[muted]再见![/muted]") + break + + raw_input = raw_input.strip() + if not raw_input: + continue + + await self._start_chat(raw_input) + finally: + if self._mcp_manager: + await self._mcp_manager.close() + + + diff --git a/src/maisaka/llm_service.py b/src/maisaka/llm_service.py index 5dfdfe48..bc83e9b9 100644 --- a/src/maisaka/llm_service.py +++ b/src/maisaka/llm_service.py @@ -1,11 +1,14 @@ -""" +""" MaiSaka LLM 服务 - 使用主项目 LLM 系统 将主项目的 LLMRequest 适配为 MaiSaka 需要的接口 """ +from datetime import datetime + +import json +import random from dataclasses import dataclass from typing import Any, List, Literal, Optional -import json from rich.console import Group from rich.panel import Panel @@ -13,7 +16,7 @@ from rich.pretty import Pretty from rich.text import Text from src.common.logger import get_logger -from src.config.config import config_manager +from src.config.config import config_manager, global_config from src.llm_models.payload_content.message import MessageBuilder, RoleType from src.llm_models.payload_content.tool_option import ToolCall as ToolCallOption, ToolOption from src.llm_models.utils_model import LLMRequest @@ -58,7 +61,13 @@ class ChatResponse: def build_message(role: str, content: str, msg_type: MessageType = "user", **kwargs) -> dict: """构建消息字典,包含消息类型标记。""" - msg = {"role": role, "content": content, MSG_TYPE_FIELD: msg_type, **kwargs} + msg = { + "role": role, + "content": content, + MSG_TYPE_FIELD: msg_type, + "_time": datetime.now().strftime("%H:%M:%S"), + **kwargs, + } return msg @@ -107,8 +116,8 @@ class MaiSakaLLMService: # 初始化 LLMRequest 实例(只使用 tool_use 和 replyer) self._llm_tool_use = LLMRequest(model_set=self._model_configs.tool_use, request_type="maisaka_tool_use") # 主对话也使用 tool_use 模型(因为需要工具调用支持) - self._llm_chat = self._llm_tool_use - # 分析模块也使用 tool_use 模型 + self._llm_planner = LLMRequest(model_set=self._model_configs.planner, request_type="maisaka_planner") + self._llm_chat = self._llm_planner self._llm_utils = self._llm_tool_use # 回复生成使用 replyer 模型 self._llm_replyer = LLMRequest(model_set=self._model_configs.replyer, request_type="maisaka_replyer") @@ -116,6 +125,9 @@ class MaiSakaLLMService: # 尝试修复数据库 schema(忽略错误) self._try_fix_database_schema() + # 构建人设信息 + personality_prompt = self._build_personality_prompt() + # 加载系统提示词 if chat_system_prompt is None: try: @@ -130,6 +142,7 @@ class MaiSakaLLMService: tools_section += "\n• list_files() — 获取 mai_files 目录下所有文件的元信息列表。" chat_prompt.add_context("file_tools_section", tools_section if tools_section else "") + chat_prompt.add_context("identity", personality_prompt) import asyncio loop = asyncio.new_event_loop() @@ -141,15 +154,15 @@ class MaiSakaLLMService: loop.close() except Exception as e: logger.error(f"加载系统提示词失败: {e}") - self._chat_system_prompt = "你是一个友好的 AI 助手。" + self._chat_system_prompt = f"{personality_prompt}\n\n你是一个友好的 AI 助手。" else: self._chat_system_prompt = chat_system_prompt - # 获取模型名称用于显示 self._model_name = ( - self._model_configs.tool_use.model_list[0] if self._model_configs.tool_use.model_list else "未配置" + self._model_configs.planner.model_list[0] if self._model_configs.planner.model_list else "未配置" ) + # 加载子模块提示词 self._emotion_prompt: Optional[str] = None self._cognition_prompt: Optional[str] = None @@ -200,6 +213,37 @@ class MaiSakaLLMService: # 静默忽略任何错误,不影响正常流程 pass + def _build_personality_prompt(self) -> str: + """构建人设信息,参考 replyer 的做法""" + try: + bot_name = global_config.bot.nickname + if global_config.bot.alias_names: + bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}" + else: + bot_nickname = "" + + # 获取基础personality + prompt_personality = global_config.personality.personality + + # 检查是否需要随机替换为状态(personality 本体) + if ( + hasattr(global_config.personality, "states") + and global_config.personality.states + and hasattr(global_config.personality, "state_probability") + and global_config.personality.state_probability > 0 + and random.random() < global_config.personality.state_probability + ): + # 随机选择一个状态替换personality + selected_state = random.choice(global_config.personality.states) + prompt_personality = selected_state + + prompt_personality = f"{prompt_personality};" + return f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}" + except Exception as e: + logger.warning(f"构建人设信息失败: {e}") + # 返回默认人设 + return "你的名字是麦麦,你是一个活泼可爱的AI助手。" + def set_extra_tools(self, tools: List[dict]) -> None: """设置额外的工具定义(如 MCP 工具)""" self._extra_tools = list(tools) @@ -390,14 +434,34 @@ class MaiSakaLLMService: # 打印消息列表 built_messages = message_factory(None) - console.print( - Panel( - Group(*[self._render_message_panel(msg, index + 1) for index, msg in enumerate(built_messages)]), - title="MaiSaka LLM Request - chat_loop_step", - border_style="cyan", - padding=(0, 1), + + # 将消息分为普通消息和 tool 消息 + non_tool_panels = [] + tool_panels = [] + + for index, msg in enumerate(built_messages): + panel = self._render_message_panel(msg, index + 1) + role = msg.role.value if hasattr(msg.role, "value") else str(msg.role) + + if role == "tool": + tool_panels.append(panel) + else: + non_tool_panels.append(panel) + + # 先显示普通消息(group 在一个 panel 内) + if non_tool_panels: + console.print( + Panel( + Group(*non_tool_panels), + title="MaiSaka LLM Request - chat_loop_step", + border_style="cyan", + padding=(0, 1), + ) ) - ) + + # tool 消息作为单独的块展示 + for panel in tool_panels: + console.print(panel) response, (reasoning, model, tool_calls) = await self._llm_chat.generate_response_with_message_async( message_factory=message_factory, @@ -424,7 +488,11 @@ class MaiSakaLLMService: ) # 构建原始消息格式(MaiSaka 风格) - raw_message = {"role": "assistant", "content": response} + raw_message = { + "role": "assistant", + "content": response, + "_time": datetime.now().strftime("%H:%M:%S"), + } if converted_tool_calls: raw_message["tool_calls"] = [ { @@ -660,8 +728,12 @@ class MaiSakaLLMService: temperature=0.8, max_tokens=512, ) - return response.strip() if response else "..." except Exception as e: logger.error(f"回复生成 LLM 调用出错: {e}") return "..." + + + + + diff --git a/src/maisaka/mcp_client/manager.py b/src/maisaka/mcp_client/manager.py index 9c43c666..5409a39d 100644 --- a/src/maisaka/mcp_client/manager.py +++ b/src/maisaka/mcp_client/manager.py @@ -12,8 +12,6 @@ from .connection import MCPConnection, MCP_AVAILABLE # 内置工具名称集合 —— MCP 工具不允许与这些名称冲突 BUILTIN_TOOL_NAMES = frozenset( { - "say", - "send_message", "wait", "stop", "create_table", diff --git a/src/maisaka/replyer.py b/src/maisaka/replyer.py index eea23a6d..2cb428a2 100644 --- a/src/maisaka/replyer.py +++ b/src/maisaka/replyer.py @@ -1,76 +1,94 @@ """ -MaiSaka - Reply 回复生成器 -根据想法和上下文生成口语化回复。 +MaiSaka reply helper. """ -from typing import Optional +from datetime import datetime +from typing import Any, Optional + +from src.config.config import global_config + from .llm_service import MaiSakaLLMService +VISIBLE_REPLY_PREFIX = "\u3010\u9ea6\u9ea6\u7684\u53d1\u8a00\u3011" -def format_chat_history(messages: list) -> str: - """将聊天消息列表格式化为可读文本。""" + +def _normalize_content(content: str, limit: int = 500) -> str: + normalized = " ".join((content or "").split()) + if len(normalized) > limit: + return normalized[:limit] + "..." + return normalized + + +def _format_message_time(_: dict[str, Any]) -> str: + return datetime.now().strftime("%H:%M:%S") + + +def _extract_visible_assistant_reply(message: dict[str, Any]) -> str: + if message.get("_type") == "perception": + return "" + + content = (message.get("content", "") or "").strip() + if not content: + return "" + + marker = "[generated_reply]" + if marker in content: + _, visible_reply = content.rsplit(marker, 1) + return _normalize_content(visible_reply) + + return "" + + +def _extract_guided_bot_reply(message: dict[str, Any]) -> str: + content = (message.get("content", "") or "").strip() + if content.startswith(VISIBLE_REPLY_PREFIX): + return _normalize_content(content[len(VISIBLE_REPLY_PREFIX) :].strip()) + return "" + + +def format_chat_history(messages: list[dict[str, Any]]) -> str: + """Format visible chat history for reply generation.""" + bot_nickname = global_config.bot.nickname.strip() or "Bot" parts: list[str] = [] - for msg in messages: - role = msg.get("role", "?") - content = msg.get("content", "") or "" - if role == "system": - parts.append(f"[系统] {content[:500]}") - elif role == "user": - parts.append(f"[用户] {content[:500]}") - elif role == "assistant": + + for message in messages: + role = message.get("role", "") + timestamp = _format_message_time(message) + + if role == "user": + guided_reply = _extract_guided_bot_reply(message) + if guided_reply: + parts.append(f"{timestamp} {bot_nickname}(分析器指导的麦麦发言):{guided_reply}") + continue + + content = _normalize_content(message.get("content", "") or "") if content: - parts.append(f"[助手思考] {content[:500]}") - for tc in msg.get("tool_calls", []): - func = tc.get("function", {}) - name = func.get("name", "?") - args = func.get("arguments", "") - if isinstance(args, str) and len(args) > 200: - args = args[:200] + "..." - parts.append(f"[助手调用 {name}] {args}") - elif role == "tool": - parts.append(f"[工具结果] {content[:300]}") + parts.append(f"{timestamp} 用户:{content}") + continue + + if role == "assistant": + visible_reply = _extract_visible_assistant_reply(message) + if visible_reply: + parts.append(f"{timestamp} {bot_nickname}(你):{visible_reply}") + return "\n".join(parts) class Replyer: - """ - 回复生成器。 - - 根据给定的想法(reason)和对话上下文,生成符合人设的口语化回复。 - """ + """Generate visible replies from thoughts and context.""" def __init__(self, llm_service: Optional[MaiSakaLLMService] = None): - """ - 初始化回复器。 - - Args: - llm_service: LLM 服务实例,如果为 None 则需要在调用前设置 - """ self._llm_service = llm_service self._enabled = True def set_llm_service(self, llm_service: MaiSakaLLMService) -> None: - """设置 LLM 服务""" self._llm_service = llm_service def set_enabled(self, enabled: bool) -> None: - """启用/禁用回复功能""" self._enabled = enabled - async def reply(self, reason: str, chat_history: list) -> str: - """ - 根据想法和上下文生成回复。 - - Args: - reason: 想要回复的方式、想法、内容(不包含具体回复内容) - chat_history: 对话历史上下文 - - Returns: - 生成的回复内容,失败时返回默认回复 - """ + async def reply(self, reason: str, chat_history: list[dict[str, Any]]) -> str: if not self._enabled or not reason or self._llm_service is None: return "..." - # 直接使用 LLM 服务的 generate_reply 方法 - # 该方法使用主项目的 replyer 模型配置 return await self._llm_service.generate_reply(reason, chat_history) diff --git a/src/maisaka/tool_handlers.py b/src/maisaka/tool_handlers.py index 5464f6bb..9ac91941 100644 --- a/src/maisaka/tool_handlers.py +++ b/src/maisaka/tool_handlers.py @@ -1,16 +1,16 @@ """ -MaiSaka - 工具调用处理器 -处理 LLM 循环中各工具(say/wait/stop/file/MCP)的执行逻辑。 +MaiSaka tool handlers. """ +from datetime import datetime +from pathlib import Path +from typing import TYPE_CHECKING, Any, Optional + import json as _json import os -from datetime import datetime -from pathlib import Path -from typing import TYPE_CHECKING, Optional -from rich.panel import Panel from rich.markdown import Markdown +from rich.panel import Panel from .config import console from .input_reader import InputReader @@ -21,15 +21,13 @@ if TYPE_CHECKING: from .mcp_client import MCPManager -# mai_files 目录路径 MAI_FILES_DIR = Path(os.path.join(os.path.dirname(os.path.abspath(__file__)), "mai_files")) -# 全局回复器 _replyer: Optional[Replyer] = None def get_replyer(llm_service: MaiSakaLLMService) -> Replyer: - """获取回复器实例(单例模式)""" + """Return a shared replyer instance.""" global _replyer if _replyer is None: _replyer = Replyer(llm_service) @@ -39,94 +37,85 @@ def get_replyer(llm_service: MaiSakaLLMService) -> Replyer: class ToolHandlerContext: - """工具处理器所需的共享上下文。""" + """Shared context for tool handlers.""" def __init__( self, llm_service: MaiSakaLLMService, reader: InputReader, user_input_times: list[datetime], - ): + ) -> None: self.llm_service = llm_service self.reader = reader self.user_input_times = user_input_times self.last_user_input_time: Optional[datetime] = None -async def handle_send_message(tc, chat_history: list, ctx: ToolHandlerContext): - """处理 say 工具:根据想法和上下文生成回复后展示给用户。""" +async def handle_send_message(tc: Any, chat_history: list[dict[str, Any]], ctx: ToolHandlerContext) -> None: + """Backward-compatible handler for legacy send-message style tools.""" reason = tc.arguments.get("reason", "") - console.print("[accent]🔧 调用工具: say(...)[/accent]") + console.print("[accent]Calling tool: send_message(...)[/accent]") - if reason: - # 想法以淡色展示 - console.print( - Panel( - Markdown(reason), - title="💭 回复想法", - border_style="dim", - padding=(0, 1), - style="dim", - ) - ) - # 根据想法和上下文生成回复 - with console.status( - "[info]✏️ 生成回复中...[/info]", - spinner="dots", - ): - replyer = get_replyer(ctx.llm_service) - reply = await replyer.reply(reason, chat_history) - console.print( - Panel( - Markdown(reply), - title="💬 MaiSaka", - border_style="magenta", - padding=(1, 2), - ) - ) - # 生成的回复作为 tool 结果写入上下文 + if not reason: chat_history.append( { "role": "tool", "tool_call_id": tc.id, - "content": f"已向用户展示(实际输出):{reply}", + "content": "Missing required argument: reason", } ) - else: - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": "reason 内容为空,未展示", - } + return + + console.print( + Panel( + Markdown(reason), + title="Reply Reason", + border_style="dim", + padding=(0, 1), + style="dim", ) + ) + with console.status("[info]Generating visible reply...[/info]", spinner="dots"): + replyer = get_replyer(ctx.llm_service) + reply = await replyer.reply(reason, chat_history) -async def handle_stop(tc, chat_history: list): - """处理 stop 工具:结束对话循环。""" - console.print("[accent]🔧 调用工具: stop()[/accent]") + console.print( + Panel( + Markdown(reply), + title="MaiSaka", + border_style="magenta", + padding=(1, 2), + ) + ) chat_history.append( { "role": "tool", "tool_call_id": tc.id, - "content": "对话循环已停止,等待用户下次输入。", + "content": f"Visible reply generated:\n{reply}", } ) -async def handle_wait(tc, chat_history: list, ctx: ToolHandlerContext) -> str: - """ - 处理 wait 工具:等待用户输入或超时。 +async def handle_stop(tc: Any, chat_history: list[dict[str, Any]]) -> None: + """Handle the stop tool.""" + console.print("[accent]Calling tool: stop()[/accent]") + chat_history.append( + { + "role": "tool", + "tool_call_id": tc.id, + "content": "Conversation loop will stop after this round.", + } + ) - Returns: - 工具结果字符串。以 "[[QUIT]]" 开头表示用户要求退出对话。 - """ + +async def handle_wait(tc: Any, chat_history: list[dict[str, Any]], ctx: ToolHandlerContext) -> str: + """Handle the wait tool.""" seconds = tc.arguments.get("seconds", 30) - seconds = max(5, min(seconds, 300)) # 限制 5-300 秒 - console.print(f"[accent]🔧 调用工具: wait({seconds})[/accent]") + seconds = max(5, min(seconds, 300)) + console.print(f"[accent]Calling tool: wait({seconds})[/accent]") tool_result = await _do_wait(seconds, ctx) - chat_history.append( { "role": "tool", @@ -138,62 +127,49 @@ async def handle_wait(tc, chat_history: list, ctx: ToolHandlerContext) -> str: async def _do_wait(seconds: int, ctx: ToolHandlerContext) -> str: - """实际执行等待逻辑。""" - console.print(f"[muted]⏳ 等待回复 (最多 {seconds} 秒)...[/muted]") - console.print("[bold magenta]💬 > [/bold magenta]", end="") + """Wait for user input with a timeout.""" + console.print(f"[muted]Waiting for user input (timeout: {seconds}s)...[/muted]") + console.print("[bold magenta]> [/bold magenta]", end="") user_input = await ctx.reader.get_line(timeout=seconds) if user_input is None: - # 超时 - console.print() # 换行 - console.print("[muted]⏳ 等待超时[/muted]") - return "等待超时,用户未输入任何内容" + console.print() + console.print("[muted]Wait timeout[/muted]") + return "Wait timed out; no user input received." user_input = user_input.strip() - if not user_input: - return "用户发送了空消息" + return "User submitted an empty input." - # 更新 timing 时间戳 now = datetime.now() ctx.last_user_input_time = now ctx.user_input_times.append(now) if user_input.lower() in ("/quit", "/exit", "/q"): - return "[[QUIT]] 用户主动退出了对话" + return "[[QUIT]] User requested to exit." - return f"用户说:{user_input}" + return f"User input received: {user_input}" -async def handle_mcp_tool(tc, chat_history: list, mcp_manager: "MCPManager"): - """ - 处理 MCP 工具调用。 - - 将调用转发到 MCPManager,展示结果并写入对话上下文。 - """ - # 格式化参数预览 +async def handle_mcp_tool(tc: Any, chat_history: list[dict[str, Any]], mcp_manager: "MCPManager") -> None: + """Handle an MCP tool call.""" args_str = _json.dumps(tc.arguments, ensure_ascii=False) args_preview = args_str if len(args_str) <= 120 else args_str[:120] + "..." - console.print(f"[accent]🔌 调用 MCP 工具: {tc.name}({args_preview})[/accent]") + console.print(f"[accent]Calling MCP tool: {tc.name}({args_preview})[/accent]") - with console.status( - f"[info]🔌 MCP 工具 {tc.name} 执行中...[/info]", - spinner="dots", - ): + with console.status(f"[info]Running MCP tool {tc.name}...[/info]", spinner="dots"): result = await mcp_manager.call_tool(tc.name, tc.arguments) - # 展示结果(截断过长内容) - display_text = result if len(result) <= 800 else result[:800] + "\n... (已截断)" + display_text = result if len(result) <= 800 else result[:800] + "\n... (truncated)" console.print( Panel( display_text, - title=f"🔌 MCP: {tc.name}", + title=f"MCP: {tc.name}", border_style="bright_green", padding=(0, 1), ) ) - chat_history.append( { "role": "tool", @@ -203,59 +179,50 @@ async def handle_mcp_tool(tc, chat_history: list, mcp_manager: "MCPManager"): ) -async def handle_unknown_tool(tc, chat_history: list): - """处理未知工具调用。""" - console.print(f"[accent]🔧 调用工具: {tc.name}({tc.arguments})[/accent]") +async def handle_unknown_tool(tc: Any, chat_history: list[dict[str, Any]]) -> None: + """Handle an unknown tool call.""" + console.print(f"[accent]Calling unknown tool: {tc.name}({tc.arguments})[/accent]") chat_history.append( { "role": "tool", "tool_call_id": tc.id, - "content": f"未知工具: {tc.name}", + "content": f"Unknown tool: {tc.name}", } ) -async def handle_write_file(tc, chat_history: list): - """处理 write_file 工具:在 mai_files 目录下写入文件。""" +async def handle_write_file(tc: Any, chat_history: list[dict[str, Any]]) -> None: + """Write a file under the local mai_files workspace.""" filename = tc.arguments.get("filename", "") content = tc.arguments.get("content", "") - console.print(f'[accent]🔧 调用工具: write_file("{filename}")[/accent]') + console.print(f'[accent]Calling tool: write_file("{filename}")[/accent]') - # 确保目录存在 MAI_FILES_DIR.mkdir(parents=True, exist_ok=True) - - # 构建完整文件路径 file_path = MAI_FILES_DIR / filename try: - # 创建父目录(如果需要) file_path.parent.mkdir(parents=True, exist_ok=True) + with open(file_path, "w", encoding="utf-8") as file: + file.write(content) - # 写入文件 - with open(file_path, "w", encoding="utf-8") as f: - f.write(content) - - # 获取文件大小 file_size = file_path.stat().st_size - console.print( Panel( - f"文件已写入: {filename}\n大小: {file_size} 字符", - title="📁 文件已保存", + f"Path: {filename}\nSize: {file_size} bytes", + title="File Written", border_style="green", padding=(0, 1), ) ) - chat_history.append( { "role": "tool", "tool_call_id": tc.id, - "content": f"文件「{filename}」已成功写入,共 {file_size} 个字符。", + "content": f"File written successfully: {filename} ({file_size} bytes)", } ) - except Exception as e: - error_msg = f"写入文件失败: {e}" + except Exception as exc: + error_msg = f"Failed to write file: {exc}" console.print(f"[error]{error_msg}[/error]") chat_history.append( { @@ -266,17 +233,16 @@ async def handle_write_file(tc, chat_history: list): ) -async def handle_read_file(tc, chat_history: list): - """处理 read_file 工具:读取 mai_files 目录下的文件。""" +async def handle_read_file(tc: Any, chat_history: list[dict[str, Any]]) -> None: + """Read a file from the local mai_files workspace.""" filename = tc.arguments.get("filename", "") - console.print(f'[accent]🔧 调用工具: read_file("{filename}")[/accent]') + console.print(f'[accent]Calling tool: read_file("{filename}")[/accent]') - # 构建完整文件路径 file_path = MAI_FILES_DIR / filename try: if not file_path.exists(): - error_msg = f"文件「{filename}」不存在。" + error_msg = f"File does not exist: {filename}" console.print(f"[warning]{error_msg}[/warning]") chat_history.append( { @@ -288,7 +254,7 @@ async def handle_read_file(tc, chat_history: list): return if not file_path.is_file(): - error_msg = f"「{filename}」不是一个文件。" + error_msg = f"Path is not a file: {filename}" console.print(f"[warning]{error_msg}[/warning]") chat_history.append( { @@ -299,33 +265,27 @@ async def handle_read_file(tc, chat_history: list): ) return - # 读取文件内容 - with open(file_path, "r", encoding="utf-8") as f: - file_content = f.read() - - # 截断过长内容用于显示 - display_content = file_content - if len(file_content) > 1000: - display_content = file_content[:1000] + "\n... (内容已截断)" + with open(file_path, "r", encoding="utf-8") as file: + file_content = file.read() + display_content = file_content if len(file_content) <= 1000 else file_content[:1000] + "\n... (truncated)" console.print( Panel( display_content, - title=f"📄 文件内容: {filename}", + title=f"Read File: {filename}", border_style="blue", padding=(0, 1), ) ) - chat_history.append( { "role": "tool", "tool_call_id": tc.id, - "content": f"文件「{filename}」内容:\n{file_content}", + "content": f"File content of {filename}:\n{file_content}", } ) - except Exception as e: - error_msg = f"读取文件失败: {e}" + except Exception as exc: + error_msg = f"Failed to read file: {exc}" console.print(f"[error]{error_msg}[/error]") chat_history.append( { @@ -336,49 +296,42 @@ async def handle_read_file(tc, chat_history: list): ) -async def handle_list_files(tc, chat_history: list): - """处理 list_files 工具:获取 mai_files 目录下所有文件的元信息。""" - console.print("[accent]🔧 调用工具: list_files()[/accent]") +async def handle_list_files(tc: Any, chat_history: list[dict[str, Any]]) -> None: + """List files under the local mai_files workspace.""" + console.print("[accent]Calling tool: list_files()[/accent]") try: - # 确保目录存在 MAI_FILES_DIR.mkdir(parents=True, exist_ok=True) - # 获取所有文件 - files_info = [] + files_info: list[dict[str, Any]] = [] for item in MAI_FILES_DIR.rglob("*"): if item.is_file(): - # 获取相对路径 - rel_path = item.relative_to(MAI_FILES_DIR) stat = item.stat() files_info.append( { - "name": str(rel_path), + "name": str(item.relative_to(MAI_FILES_DIR)), "size": stat.st_size, "modified": datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M:%S"), } ) if not files_info: - result_text = "mai_files 目录为空,没有任何文件。" + result_text = "No files found under mai_files." else: - # 按名称排序 - files_info.sort(key=lambda x: x["name"]) - # 格式化输出 - lines = [f"📁 mai_files 目录下共有 {len(files_info)} 个文件:\n"] - for info in files_info: - lines.append(f" • {info['name']} ({info['size']} 字节, 修改于 {info['modified']})") + files_info.sort(key=lambda item: item["name"]) + lines = [f"Found {len(files_info)} file(s):\n"] + for item in files_info: + lines.append(f"- {item['name']} ({item['size']} bytes, modified {item['modified']})") result_text = "\n".join(lines) console.print( Panel( result_text, - title="📁 文件列表", + title="File List", border_style="cyan", padding=(0, 1), ) ) - chat_history.append( { "role": "tool", @@ -386,8 +339,8 @@ async def handle_list_files(tc, chat_history: list): "content": result_text, } ) - except Exception as e: - error_msg = f"获取文件列表失败: {e}" + except Exception as exc: + error_msg = f"Failed to list files: {exc}" console.print(f"[error]{error_msg}[/error]") chat_history.append( { @@ -398,160 +351,7 @@ async def handle_list_files(tc, chat_history: list): ) -async def handle_store_context(tc, chat_history: list, ctx: ToolHandlerContext): - """ - 处理 store_context 工具:将指定范围的对话上下文存入记忆系统,然后从对话中移除。 - - 参数: - - count: 要存入记忆的消息数量(从最早的消息开始) - - reason: 存入的原因 - """ - count = tc.arguments.get("count", 0) - reason = tc.arguments.get("reason", "") - console.print(f'[accent]🔧 调用工具: store_context(count={count}, reason="{reason}")[/accent]') - - if count <= 0: - error_msg = "count 参数必须大于 0" - console.print(f"[error]{error_msg}[/error]") - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": error_msg, - } - ) - return - - # 计算实际消息数量(排除 role=tool 的工具返回消息) - actual_messages = [m for m in chat_history if m.get("role") != "tool"] - - if count > len(actual_messages): - error_msg = f"count({count}) 超过了当前对话消息数量({len(actual_messages)})" - console.print(f"[warning]{error_msg}[/warning]") - count = len(actual_messages) - - # 找到要移除的消息索引(确保 tool_calls 和 tool 响应成对) - indices_to_remove = [] - removed_count = 0 - i = 0 - - while i < len(chat_history) and removed_count < count: - msg = chat_history[i] - role = msg.get("role", "") - - # 跳过 role=tool 的消息(它们会被对应的 assistant 消息一起处理) - if role == "tool": - i += 1 - continue - - # 检查这是否是一个带 tool_calls 的 assistant 消息 - if role == "assistant" and "tool_calls" in msg: - # 检查这个消息是否包含当前的 tool_call(store_context 自己) - # 如果包含,跳过不删除(否则会导致 tool 响应孤儿) - contains_current_call = any(tc.get("id") == tc.id for tc in msg.get("tool_calls", [])) - if contains_current_call: - i += 1 - continue - - # 收集这个 assistant 消息及其后续的 tool 响应消息 - block_indices = [i] - j = i + 1 - while j < len(chat_history): - next_msg = chat_history[j] - if next_msg.get("role") == "tool": - block_indices.append(j) - j += 1 - else: - break - indices_to_remove.extend(block_indices) - removed_count += 1 - i = j - elif role in ["user", "assistant"]: - # 普通消息,可以直接删除 - indices_to_remove.append(i) - removed_count += 1 - i += 1 - else: - i += 1 - - if not indices_to_remove: - result_msg = "没有找到可存入记忆的消息" - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": result_msg, - } - ) - return - - # 收集要总结的消息(在删除前) - to_compress = [] - for i in sorted(indices_to_remove): - if 0 <= i < len(chat_history): - to_compress.append(chat_history[i]) - - # 总结上下文并压缩 - try: - with console.status( - "[info]📝 正在总结上下文...[/info]", - spinner="dots", - ): - summary = await ctx.llm_service.summarize_context(to_compress) - - if summary: - console.print( - Panel( - Markdown(summary), - title="📝 上下文已压缩", - border_style="green", - padding=(0, 1), - style="dim", - ) - ) - result_msg = f"✅ 已压缩 {len(to_compress)} 条消息\n原因: {reason}" - else: - result_msg = "⚠️ 上下文总结失败" - console.print(f"[warning]{result_msg}[/warning]") - - except Exception as e: - result_msg = f"❌ 总结上下文时出错: {e}" - console.print(f"[error]{result_msg}[/error]") - - # 从后往前删除消息 - for i in sorted(indices_to_remove, reverse=True): - if 0 <= i < len(chat_history): - chat_history.pop(i) - - # 清理"孤儿" tool 消息(没有对应 tool_calls 的 tool 消息) - # 收集所有有效的 tool_call_id - valid_tool_call_ids = set() - for msg in chat_history: - if msg.get("role") == "assistant" and "tool_calls" in msg: - for tool_call in msg["tool_calls"]: - valid_tool_call_ids.add(tool_call.get("id", "")) - - # 删除无效的 tool 消息(从后往前) - i = len(chat_history) - 1 - while i >= 0: - msg = chat_history[i] - if msg.get("role") == "tool": - tool_call_id = msg.get("tool_call_id", "") - if tool_call_id not in valid_tool_call_ids: - chat_history.pop(i) - i -= 1 - - chat_history.append( - { - "role": "tool", - "tool_call_id": tc.id, - "content": result_msg, - } - ) -# ──────────────────── 初始化 mai_files 目录 ──────────────────── - -# 确保程序启动时 mai_files 目录存在 try: MAI_FILES_DIR.mkdir(parents=True, exist_ok=True) -except Exception as e: - console.print(f"[warning]创建 mai_files 目录失败: {e}[/warning]") +except Exception as exc: + console.print(f"[warning]Failed to initialize mai_files directory: {exc}[/warning]") From 6a0b902e17ba675dd59a3d1ae2352455cff1130e Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 24 Mar 2026 01:40:03 +0800 Subject: [PATCH 9/9] =?UTF-8?q?doc=EF=BC=9A=E6=96=B0=E5=A2=9E=E8=8B=B1?= =?UTF-8?q?=E6=96=87doc=E4=BD=9C=E4=B8=BA=E9=BB=98=E8=AE=A4=EF=BC=8C?= =?UTF-8?q?=E4=BF=AE=E6=94=B9=E5=86=85=E5=AE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 60 ++++++++++-------- docs/README_EN.md | 155 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 189 insertions(+), 26 deletions(-) create mode 100644 docs/README_EN.md diff --git a/README.md b/README.md index 58d1414d..7c41c8cf 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,13 @@
-

麦麦 MaiBot MaiCore

- + + + 简体中文 | English + +
+
+ +

麦麦 MaiBot MaiSaka

+

Python Version @@ -18,19 +25,26 @@ MaiBot Character -## 🎉 介绍 +## 介绍 -**🍔 MaiCore 是一个基于大语言模型的可交互智能体** +麦麦MaiSaka 是一个基于大语言模型的可交互智能体 -MaiBot 不仅仅是一个机器人,她致力于成为一个活跃在 QQ 群聊中的“生命体”。她不追求完美,但追求真实。 +MaiSaka 不仅仅是一个机器人,不仅仅是一个可以帮你完成任务的“有帮助的助手”,她还是一个致力于了解你,并以真实人类的风格进行交互的数字生命,她不追求完美,她不追求高效,但追求亲切和真实。 + + +- 💭 **没有人喜欢GPT的语言风格**:麦麦使用了更加自然,贴合人类对话习惯的交互方式,不是长篇大论或者markdown格式的分点,而是或长或短的闲谈。 + +- 🎭 **不再是傻乎乎的一问一答**:懂得在合适的时间说话,把握聊天中的气氛,在合适的时候开口,在合适的时候闭嘴。 + +- 🧠 **麦麦·成为人类**:在多人对话中,麦麦会模仿其他人的的说话风格,还会自主理解新词或者小圈子里的黑话,不断进化。 + +- ❤️ **永远都在更加了解你**:基于心理学中人格理论,麦麦会不断积累对于你的了解,不论是你的信息,喜恶或是行为风格,她都记在心里。 -- 💭 **拟人构建**:使用自然语言风格构建 Prompt,回复贴近人类习惯。 -- 🎭 **行为规划**:懂得在合适的时间说话,使用合适的动作。 -- 🧠 **表达学习**:模仿群友的说话风格,学习黑话,不断进化。 - 🔌 **插件系统**:提供强大的 API 和事件系统,无限扩展可能。 -- 💝 **情感表达**:拥有独立的情绪系统和表情包互动能力。 -### 🚀 快速导航 + + +### 快速导航

🌟 演示视频  |  📦 快速入门  |  @@ -43,14 +57,13 @@ MaiBot 不仅仅是一个机器人,她致力于成为一个活跃在 QQ 群聊


-

🎥 精彩演示

麦麦演示视频
- 👆 点击观看麦麦演示视频 👆 + 前往观看麦麦演示视频
@@ -58,7 +71,7 @@ MaiBot 不仅仅是一个机器人,她致力于成为一个活跃在 QQ 群聊 ## 🔥 更新和安装 -> **最新版本: v0.12.2** ([📄 更新日志](changelogs/changelog.md)) +> **最新版本: v1.0.0** ([📄 更新日志](changelogs/changelog.md)) - **下载**: 前往 [Release](https://github.com/MaiM-with-u/MaiBot/releases/) 页面下载最新版本 - **启动器**: [Mailauncher](https://github.com/MaiM-with-u/mailauncher/releases/) (仅支持 MacOS, 早期开发中) @@ -66,17 +79,10 @@ MaiBot 不仅仅是一个机器人,她致力于成为一个活跃在 QQ 群聊 | 分支 | 说明 | | :--- | :--- | | `main` | ✅ **稳定发布版本 (推荐)** | -| `dev` | 🚧 开发测试版本 (不稳定) | -| `classical` | 🛑 经典版本 (停止维护) | +| `dev` | 🚧 开发测试版本,包含新功能,可能不稳定 | ### 📚 部署教程 👉 **[🚀 最新版本部署教程](https://docs.mai-mai.org/manual/deployment/mmc_deploy_windows.html)** -*(注意:MaiCore 新版本部署方式与旧版本不兼容)* - -> [!WARNING] -> - ⚠️ 项目处于活跃开发阶段,API 可能随时调整。 -> - ⚠️ QQ 机器人存在风控风险,请谨慎使用。 -> - ⚠️ AI 模型运行可能消耗较多 Token。 --- @@ -103,19 +109,20 @@ MaiBot 不仅仅是一个机器人,她致力于成为一个活跃在 QQ 群聊 ### 🧩 衍生项目 -- **[MaiCraft](https://github.com/MaiM-with-u/Maicraft)**: 让麦麦陪你玩 Minecraft (早期开发中)。 +- **[Amaidesu](https://github.com/MaiM-with-u/Amaidesu)**: 让麦麦在B站开播 - **[MoFox_Bot](https://github.com/MoFox-Studio/MoFox-Core)**: 基于 MaiCore 0.10.0 的增强型 Fork,更稳定更有趣。 +- **[MaiCraft](https://github.com/MaiM-with-u/Maicraft)**: 让麦麦陪你玩 Minecraft (暂时停止维护中)。 --- -## 💡 设计理念 (原始时代的火花) +## 💡 设计理念 > **千石可乐说:** > - 这个项目最初只是为了给牛牛 bot 添加一点额外的功能,但是功能越写越多,最后决定重写。其目的是为了创造一个活跃在 QQ 群聊的"生命体"。目的并不是为了写一个功能齐全的机器人,而是一个尽可能让人感知到真实的类人存在。 > - 程序的功能设计理念基于一个核心的原则:"最像而不是好"。 > - 如果人类真的需要一个 AI 来陪伴自己,并不是所有人都需要一个完美的,能解决所有问题的"helpful assistant",而是一个会犯错的,拥有自己感知和想法的"生命形式"。 -> - 代码会保持开源和开放,但个人希望 MaiMbot 的运行时数据保持封闭,尽量避免以显式命令来对其进行控制和调试。我认为一个你无法完全掌控的个体才更能让你感觉到它的自主性,而视其成为一个对话机器。 -> - SengokuCola~~纯编程外行,面向 cursor 编程,很多代码写得不好多多包涵~~已得到大脑升级。 + +> **xxxxx说:** > *Code is open, but the soul is yours.* --- @@ -132,7 +139,8 @@ MaiBot 不仅仅是一个机器人,她致力于成为一个活跃在 QQ 群聊 ### ❤️ 特别致谢 -- **[略nd](https://space.bilibili.com/1344099355)**: 🎨 为麦麦绘制精美人设。 +- **[萨卡班甲鱼](https://en.wikipedia.org/wiki/Sacabambaspis)**: 千石可乐很喜欢的生物。 +- **[略nd](https://space.bilibili.com/1344099355)**: 🎨 为麦麦绘制早期的精美人设。 - **[NapCat](https://github.com/NapNeko/NapCatQQ)**: 🚀 现代化的基于 NTQQ 的 Bot 协议实现。 --- diff --git a/docs/README_EN.md b/docs/README_EN.md new file mode 100644 index 00000000..f37002fa --- /dev/null +++ b/docs/README_EN.md @@ -0,0 +1,155 @@ +
+ + + 简体中文 | English + +
+
+ +

MaiBot MaiSaka

+ + +

+ Python Version + License + Status + Contributors + Forks + Stars + Ask DeepWiki +

+
+ +
+ + +MaiBot Character + +## Introduction + +MaiSaka is an interactive agent based on large language models. + +MaiSaka is more than just a bot, and more than a "helpful assistant" that completes tasks. She is a digital life form that tries to understand you and interact in a genuinely human style. She does not pursue perfection or efficiency above all else. She pursues warmth and authenticity. + +- 💭 **No one likes GPT-sounding dialogue**: MaiSaka uses a more natural conversational style. Instead of long-winded markdown-heavy replies, she chats in a way that feels casual, varied, and human. +- 🎭 **No longer stuck in rigid Q&A**: She knows when to speak, how to read the room, when to join a conversation, and when to stay quiet. +- 🧠 **MaiSaka becoming human**: In group conversations, MaiSaka imitates how people around her speak, learns new slang and in-group language, and keeps evolving. +- ❤️ **Always learning more about you**: Inspired by personality theory in psychology, MaiSaka gradually builds an understanding of your preferences, traits, habits, and behavior style. +- 🔌 **Plugin system**: Provides powerful APIs and an event system with virtually unlimited room for extension. + +### Quick Navigation +

+ 🌟 Demo Video  |  + 📦 Quick Start  |  + 📃 Core Documentation  |  + 💬 Join Community +

+ + +
+ +
+
+ + + + MaiSaka Demo Video + +
+ Watch the MaiSaka demo video +
+
+ +--- + +## 🔥 Updates and Installation + +> **Latest Version: v1.0.0** ([📄 Changelog](../changelogs/changelog.md)) + +- **Download**: Visit the [Release](https://github.com/MaiM-with-u/MaiBot/releases/) page to get the latest version. +- **Launcher**: [Mailauncher](https://github.com/MaiM-with-u/mailauncher/releases/) (MacOS only, still in early development). + +| Branch | Description | +| :--- | :--- | +| `main` | ✅ **Stable release (recommended)** | +| `dev` | 🚧 Development testing branch with new features, may be unstable | + +### 📚 Deployment Guide +👉 **[🚀 Latest Deployment Guide](https://docs.mai-mai.org/manual/deployment/mmc_deploy_windows.html)** + +--- + +## 💬 Discussion and Community + +We welcome everyone interested in MaiBot to join us. + +| Category | Group | Description | +| :--- | :--- | :--- | +| **Technical** | [MaiBrain EEG](https://qm.qq.com/q/RzmCiRtHEW) | Technical discussion / Q&A | +| **Technical** | [MaiBrain MRI](https://qm.qq.com/q/VQ3XZrWgMs) | Technical discussion / Q&A | +| **Technical** | [Mai Wants to Be a VTuber](https://qm.qq.com/q/wGePTl1UyY) | Technical discussion / Q&A | +| **Casual Chat** | [Mai Casual Chat Group](https://qm.qq.com/q/JxvHZnxyec) | Casual chat only, no support | +| **Plugin Development** | [Plugin Dev Group](https://qm.qq.com/q/1036092828) | Advanced development and testing | + +--- + +## 📚 Documentation + +> [!NOTE] +> Some content may not be updated promptly, so please pay attention to version compatibility. + +- **[📚 Core Wiki Documentation](https://docs.mai-mai.org)**: The most comprehensive documentation hub for everything about MaiSaka. + +### 🧩 Related Projects + +- **[Amaidesu](https://github.com/MaiM-with-u/Amaidesu)**: Let MaiSaka stream on Bilibili. +- **[MoFox_Bot](https://github.com/MoFox-Studio/MoFox-Core)**: An enhanced fork based on MaiCore 0.10.0, with improved stability and more fun features. +- **[MaiCraft](https://github.com/MaiM-with-u/Maicraft)**: Let MaiSaka accompany you in Minecraft (currently paused). + +--- + +## 💡 Design Philosophy + +> **SengokuCola says:** +> - This project originally started as a few extra features for the NiuNiu bot, but it kept growing until a full rewrite became inevitable. The goal was to create a "life form" active in QQ group chats, not a feature-complete bot, but something as human-like and real-feeling as possible. +> - The core design principle is: "more lifelike, not merely better." +> - If people truly want AI companionship, not everyone needs a perfect "helpful assistant" that solves every problem. Some people may want a life form that can make mistakes and has its own perceptions and thoughts. + +> **xxxxx says:** +> *Code is open, but the soul is yours.* + +--- + +## 🙋 Contributing and Acknowledgments + +Contributions are welcome. Please read the [Contribution Guide](../docs-src/CONTRIBUTE.md) first. + +### 🌟 Contributors + + + contributors + + +### ❤️ Special Thanks + +- **[Sacabambaspis](https://en.wikipedia.org/wiki/Sacabambaspis)**: SengokuCola's favorite creature. +- **[略nd](https://space.bilibili.com/1344099355)**: Drew MaiSaka's beautiful early character design. +- **[NapCat](https://github.com/NapNeko/NapCatQQ)**: A modern NTQQ-based bot protocol implementation. + +--- + +## 📊 Repository Status + +![Alt](https://repobeats.axiom.co/api/embed/9faca9fccfc467931b87dd357b60c6362b5cfae0.svg "MaiBot Repository Status") + +### Star History +[![Star History](https://starchart.cc/MaiM-with-u/MaiBot.svg?variant=adaptive)](https://starchart.cc/MaiM-with-u/MaiBot) + +--- + +## 📌 Notice & License + +> [!IMPORTANT] +> Please read the [End User License Agreement (EULA)](../EULA.md) and [Privacy Policy](../PRIVACY.md) before use. Please evaluate AI-generated content carefully. + +**License**: GPL-3.0