feat:优化maisaka

This commit is contained in:
SengokuCola
2026-03-24 01:30:36 +08:00
parent 620f682c38
commit f431d78bff
9 changed files with 999 additions and 1099 deletions

View File

@@ -1,130 +1,86 @@
"""
MaiSaka - 内置工具定义
定义 say, wait, stop, store_context 等内置工具
使用主项目的工具格式ToolOption + ToolParamType
MaiSaka built-in tool definitions.
"""
from typing import Any, Dict, List
from src.llm_models.payload_content.tool_option import ToolOption, ToolParamType
# 内置工具定义
def create_builtin_tools() -> List[ToolOption]:
"""创建内置工具列表"""
"""Create built-in tools exposed to the main chat-loop model."""
from src.llm_models.payload_content.tool_option import ToolOptionBuilder
tools = []
tools: List[ToolOption] = []
# say 工具
send_message_builder = ToolOptionBuilder()
send_message_builder.set_name("send_message")
send_message_builder.set_description(
"对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。直接输出的文本会被视为你的内心思考用户无法阅读。reason 参数描述你想要回复的方式、想法和内容,系统会根据你的想法和对话上下文生成具体的回复。"
)
send_message_builder.add_param(
name="reason",
param_type=ToolParamType.STRING,
description="描述你想要回复的方式、想法和内容。例如:'同意对方的看法,并分享自己的经历''礼貌地拒绝,表示现在不方便聊天'",
required=True,
enum_values=None,
)
tools.append(send_message_builder.build())
# wait 工具
wait_builder = ToolOptionBuilder()
wait_builder.set_name("wait")
wait_builder.set_description(
"暂时结束你的发言,把话语权交给用户,等待对方说话。这就像现实对话中你说完一句话后停下来等对方回应。如果用户在等待期间说了话,你会通过工具返回结果收到内容。如果超时没有回复,你也会收到超时通知。"
)
wait_builder.set_description("Pause speaking and wait for the user to provide more input.")
wait_builder.add_param(
name="seconds",
param_type=ToolParamType.INTEGER,
description="等待的秒数。建议 3-10 秒。超过这个时间用户没有回复会显示超时提示。",
description="How many seconds to wait before timing out.",
required=True,
enum_values=None,
)
tools.append(wait_builder.build())
# stop 工具
reply_builder = ToolOptionBuilder()
reply_builder.set_name("reply")
reply_builder.set_description("Generate and emit a visible reply based on the current thought.")
tools.append(reply_builder.build())
no_reply_builder = ToolOptionBuilder()
no_reply_builder.set_name("no_reply")
no_reply_builder.set_description("Do not emit a visible reply this round and continue thinking.")
tools.append(no_reply_builder.build())
stop_builder = ToolOptionBuilder()
stop_builder.set_name("stop")
stop_builder.set_description(
"结束当前对话循环,进入待机状态,直到用户下次输入新内容时再唤醒你。当对话自然结束、用户表示不想继续聊、或连续多次等待超时用户没有回复时使用。"
)
stop_builder.set_description("Stop the current inner loop and return control to the outer chat flow.")
tools.append(stop_builder.build())
# store_context 工具
store_context_builder = ToolOptionBuilder()
store_context_builder.set_name("store_context")
store_context_builder.set_description(
"将指定范围的对话上下文存入记忆系统,然后从当前对话中移除这些内容。适合在对话上下文过长、话题转换、或遇到重要内容需要保存时使用。"
)
store_context_builder.add_param(
name="count",
param_type=ToolParamType.INTEGER,
description="要保存的消息条数(从最早的对话开始计数)。建议 5-20 条。",
required=True,
enum_values=None,
)
store_context_builder.add_param(
name="reason",
param_type=ToolParamType.STRING,
description="保存原因,用于后续检索。例如:'讨论了用户的工作情况''用户分享了对电影的看法'",
required=True,
enum_values=None,
)
tools.append(store_context_builder.build())
return tools
# 为了兼容性,创建一个函数来将工具转换为 dict 格式(用于调试显示)
def builtin_tools_as_dicts() -> List[Dict[str, Any]]:
"""将内置工具转换为 dict 格式(用于调试)"""
"""Return built-in tools as plain dictionaries."""
return [
{
"name": "send_message",
"description": "对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。",
"parameters": {
"type": "object",
"properties": {"reason": {"type": "string", "description": "回复的想法和内容"}},
"required": ["reason"],
},
},
{
"name": "wait",
"description": "暂时结束发言,等待用户回应",
"description": "Pause speaking and wait for the user to provide more input.",
"parameters": {
"type": "object",
"properties": {"seconds": {"type": "number", "description": "等待秒数"}},
"properties": {
"seconds": {
"type": "number",
"description": "How many seconds to wait before timing out.",
}
},
"required": ["seconds"],
},
},
{
"name": "stop",
"description": "结束对话循环",
"name": "reply",
"description": "Generate and emit a visible reply based on the current thought.",
"parameters": {"type": "object", "properties": {}, "required": []},
},
{
"name": "store_context",
"description": "保存对话上下文到记忆系统",
"parameters": {
"type": "object",
"properties": {
"count": {"type": "number", "description": "保存的消息条数"},
"reason": {"type": "string", "description": "保存原因"},
},
"required": ["count", "reason"],
},
"name": "no_reply",
"description": "Do not emit a visible reply this round and continue thinking.",
"parameters": {"type": "object", "properties": {}, "required": []},
},
{
"name": "stop",
"description": "Stop the current inner loop and return control to the outer chat flow.",
"parameters": {"type": "object", "properties": {}, "required": []},
},
]
# 导出工具创建函数和列表
def get_builtin_tools() -> List[ToolOption]:
"""获取内置工具列表"""
"""Return built-in tools."""
return create_builtin_tools()
# 为了向后兼容,也导出 dict 格式
BUILTIN_TOOLS_DICTS = builtin_tools_as_dicts()

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,14 @@
"""
"""
MaiSaka LLM 服务 - 使用主项目 LLM 系统
将主项目的 LLMRequest 适配为 MaiSaka 需要的接口
"""
from datetime import datetime
import json
import random
from dataclasses import dataclass
from typing import Any, List, Literal, Optional
import json
from rich.console import Group
from rich.panel import Panel
@@ -13,7 +16,7 @@ from rich.pretty import Pretty
from rich.text import Text
from src.common.logger import get_logger
from src.config.config import config_manager
from src.config.config import config_manager, global_config
from src.llm_models.payload_content.message import MessageBuilder, RoleType
from src.llm_models.payload_content.tool_option import ToolCall as ToolCallOption, ToolOption
from src.llm_models.utils_model import LLMRequest
@@ -58,7 +61,13 @@ class ChatResponse:
def build_message(role: str, content: str, msg_type: MessageType = "user", **kwargs) -> dict:
"""构建消息字典,包含消息类型标记。"""
msg = {"role": role, "content": content, MSG_TYPE_FIELD: msg_type, **kwargs}
msg = {
"role": role,
"content": content,
MSG_TYPE_FIELD: msg_type,
"_time": datetime.now().strftime("%H:%M:%S"),
**kwargs,
}
return msg
@@ -107,8 +116,8 @@ class MaiSakaLLMService:
# 初始化 LLMRequest 实例(只使用 tool_use 和 replyer
self._llm_tool_use = LLMRequest(model_set=self._model_configs.tool_use, request_type="maisaka_tool_use")
# 主对话也使用 tool_use 模型(因为需要工具调用支持)
self._llm_chat = self._llm_tool_use
# 分析模块也使用 tool_use 模型
self._llm_planner = LLMRequest(model_set=self._model_configs.planner, request_type="maisaka_planner")
self._llm_chat = self._llm_planner
self._llm_utils = self._llm_tool_use
# 回复生成使用 replyer 模型
self._llm_replyer = LLMRequest(model_set=self._model_configs.replyer, request_type="maisaka_replyer")
@@ -116,6 +125,9 @@ class MaiSakaLLMService:
# 尝试修复数据库 schema忽略错误
self._try_fix_database_schema()
# 构建人设信息
personality_prompt = self._build_personality_prompt()
# 加载系统提示词
if chat_system_prompt is None:
try:
@@ -130,6 +142,7 @@ class MaiSakaLLMService:
tools_section += "\n• list_files() — 获取 mai_files 目录下所有文件的元信息列表。"
chat_prompt.add_context("file_tools_section", tools_section if tools_section else "")
chat_prompt.add_context("identity", personality_prompt)
import asyncio
loop = asyncio.new_event_loop()
@@ -141,15 +154,15 @@ class MaiSakaLLMService:
loop.close()
except Exception as e:
logger.error(f"加载系统提示词失败: {e}")
self._chat_system_prompt = "你是一个友好的 AI 助手。"
self._chat_system_prompt = f"{personality_prompt}\n\n你是一个友好的 AI 助手。"
else:
self._chat_system_prompt = chat_system_prompt
# 获取模型名称用于显示
self._model_name = (
self._model_configs.tool_use.model_list[0] if self._model_configs.tool_use.model_list else "未配置"
self._model_configs.planner.model_list[0] if self._model_configs.planner.model_list else "未配置"
)
# 加载子模块提示词
self._emotion_prompt: Optional[str] = None
self._cognition_prompt: Optional[str] = None
@@ -200,6 +213,37 @@ class MaiSakaLLMService:
# 静默忽略任何错误,不影响正常流程
pass
def _build_personality_prompt(self) -> str:
"""构建人设信息,参考 replyer 的做法"""
try:
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
# 获取基础personality
prompt_personality = global_config.personality.personality
# 检查是否需要随机替换为状态personality 本体)
if (
hasattr(global_config.personality, "states")
and global_config.personality.states
and hasattr(global_config.personality, "state_probability")
and global_config.personality.state_probability > 0
and random.random() < global_config.personality.state_probability
):
# 随机选择一个状态替换personality
selected_state = random.choice(global_config.personality.states)
prompt_personality = selected_state
prompt_personality = f"{prompt_personality};"
return f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
except Exception as e:
logger.warning(f"构建人设信息失败: {e}")
# 返回默认人设
return "你的名字是麦麦你是一个活泼可爱的AI助手。"
def set_extra_tools(self, tools: List[dict]) -> None:
"""设置额外的工具定义(如 MCP 工具)"""
self._extra_tools = list(tools)
@@ -390,14 +434,34 @@ class MaiSakaLLMService:
# 打印消息列表
built_messages = message_factory(None)
console.print(
Panel(
Group(*[self._render_message_panel(msg, index + 1) for index, msg in enumerate(built_messages)]),
title="MaiSaka LLM Request - chat_loop_step",
border_style="cyan",
padding=(0, 1),
# 将消息分为普通消息和 tool 消息
non_tool_panels = []
tool_panels = []
for index, msg in enumerate(built_messages):
panel = self._render_message_panel(msg, index + 1)
role = msg.role.value if hasattr(msg.role, "value") else str(msg.role)
if role == "tool":
tool_panels.append(panel)
else:
non_tool_panels.append(panel)
# 先显示普通消息group 在一个 panel 内)
if non_tool_panels:
console.print(
Panel(
Group(*non_tool_panels),
title="MaiSaka LLM Request - chat_loop_step",
border_style="cyan",
padding=(0, 1),
)
)
)
# tool 消息作为单独的块展示
for panel in tool_panels:
console.print(panel)
response, (reasoning, model, tool_calls) = await self._llm_chat.generate_response_with_message_async(
message_factory=message_factory,
@@ -424,7 +488,11 @@ class MaiSakaLLMService:
)
# 构建原始消息格式MaiSaka 风格)
raw_message = {"role": "assistant", "content": response}
raw_message = {
"role": "assistant",
"content": response,
"_time": datetime.now().strftime("%H:%M:%S"),
}
if converted_tool_calls:
raw_message["tool_calls"] = [
{
@@ -660,8 +728,12 @@ class MaiSakaLLMService:
temperature=0.8,
max_tokens=512,
)
return response.strip() if response else "..."
except Exception as e:
logger.error(f"回复生成 LLM 调用出错: {e}")
return "..."

View File

@@ -12,8 +12,6 @@ from .connection import MCPConnection, MCP_AVAILABLE
# 内置工具名称集合 —— MCP 工具不允许与这些名称冲突
BUILTIN_TOOL_NAMES = frozenset(
{
"say",
"send_message",
"wait",
"stop",
"create_table",

View File

@@ -1,76 +1,94 @@
"""
MaiSaka - Reply 回复生成器
根据想法和上下文生成口语化回复。
MaiSaka reply helper.
"""
from typing import Optional
from datetime import datetime
from typing import Any, Optional
from src.config.config import global_config
from .llm_service import MaiSakaLLMService
VISIBLE_REPLY_PREFIX = "\u3010\u9ea6\u9ea6\u7684\u53d1\u8a00\u3011"
def format_chat_history(messages: list) -> str:
"""将聊天消息列表格式化为可读文本。"""
def _normalize_content(content: str, limit: int = 500) -> str:
normalized = " ".join((content or "").split())
if len(normalized) > limit:
return normalized[:limit] + "..."
return normalized
def _format_message_time(_: dict[str, Any]) -> str:
return datetime.now().strftime("%H:%M:%S")
def _extract_visible_assistant_reply(message: dict[str, Any]) -> str:
if message.get("_type") == "perception":
return ""
content = (message.get("content", "") or "").strip()
if not content:
return ""
marker = "[generated_reply]"
if marker in content:
_, visible_reply = content.rsplit(marker, 1)
return _normalize_content(visible_reply)
return ""
def _extract_guided_bot_reply(message: dict[str, Any]) -> str:
content = (message.get("content", "") or "").strip()
if content.startswith(VISIBLE_REPLY_PREFIX):
return _normalize_content(content[len(VISIBLE_REPLY_PREFIX) :].strip())
return ""
def format_chat_history(messages: list[dict[str, Any]]) -> str:
"""Format visible chat history for reply generation."""
bot_nickname = global_config.bot.nickname.strip() or "Bot"
parts: list[str] = []
for msg in messages:
role = msg.get("role", "?")
content = msg.get("content", "") or ""
if role == "system":
parts.append(f"[系统] {content[:500]}")
elif role == "user":
parts.append(f"[用户] {content[:500]}")
elif role == "assistant":
for message in messages:
role = message.get("role", "")
timestamp = _format_message_time(message)
if role == "user":
guided_reply = _extract_guided_bot_reply(message)
if guided_reply:
parts.append(f"{timestamp} {bot_nickname}(分析器指导的麦麦发言):{guided_reply}")
continue
content = _normalize_content(message.get("content", "") or "")
if content:
parts.append(f"[助手思考] {content[:500]}")
for tc in msg.get("tool_calls", []):
func = tc.get("function", {})
name = func.get("name", "?")
args = func.get("arguments", "")
if isinstance(args, str) and len(args) > 200:
args = args[:200] + "..."
parts.append(f"[助手调用 {name}] {args}")
elif role == "tool":
parts.append(f"[工具结果] {content[:300]}")
parts.append(f"{timestamp} 用户:{content}")
continue
if role == "assistant":
visible_reply = _extract_visible_assistant_reply(message)
if visible_reply:
parts.append(f"{timestamp} {bot_nickname}(你):{visible_reply}")
return "\n".join(parts)
class Replyer:
"""
回复生成器。
根据给定的想法reason和对话上下文生成符合人设的口语化回复。
"""
"""Generate visible replies from thoughts and context."""
def __init__(self, llm_service: Optional[MaiSakaLLMService] = None):
"""
初始化回复器。
Args:
llm_service: LLM 服务实例,如果为 None 则需要在调用前设置
"""
self._llm_service = llm_service
self._enabled = True
def set_llm_service(self, llm_service: MaiSakaLLMService) -> None:
"""设置 LLM 服务"""
self._llm_service = llm_service
def set_enabled(self, enabled: bool) -> None:
"""启用/禁用回复功能"""
self._enabled = enabled
async def reply(self, reason: str, chat_history: list) -> str:
"""
根据想法和上下文生成回复。
Args:
reason: 想要回复的方式、想法、内容(不包含具体回复内容)
chat_history: 对话历史上下文
Returns:
生成的回复内容,失败时返回默认回复
"""
async def reply(self, reason: str, chat_history: list[dict[str, Any]]) -> str:
if not self._enabled or not reason or self._llm_service is None:
return "..."
# 直接使用 LLM 服务的 generate_reply 方法
# 该方法使用主项目的 replyer 模型配置
return await self._llm_service.generate_reply(reason, chat_history)

View File

@@ -1,16 +1,16 @@
"""
MaiSaka - 工具调用处理器
处理 LLM 循环中各工具say/wait/stop/file/MCP的执行逻辑。
MaiSaka tool handlers.
"""
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional
import json as _json
import os
from datetime import datetime
from pathlib import Path
from typing import TYPE_CHECKING, Optional
from rich.panel import Panel
from rich.markdown import Markdown
from rich.panel import Panel
from .config import console
from .input_reader import InputReader
@@ -21,15 +21,13 @@ if TYPE_CHECKING:
from .mcp_client import MCPManager
# mai_files 目录路径
MAI_FILES_DIR = Path(os.path.join(os.path.dirname(os.path.abspath(__file__)), "mai_files"))
# 全局回复器
_replyer: Optional[Replyer] = None
def get_replyer(llm_service: MaiSakaLLMService) -> Replyer:
"""获取回复器实例(单例模式)"""
"""Return a shared replyer instance."""
global _replyer
if _replyer is None:
_replyer = Replyer(llm_service)
@@ -39,94 +37,85 @@ def get_replyer(llm_service: MaiSakaLLMService) -> Replyer:
class ToolHandlerContext:
"""工具处理器所需的共享上下文。"""
"""Shared context for tool handlers."""
def __init__(
self,
llm_service: MaiSakaLLMService,
reader: InputReader,
user_input_times: list[datetime],
):
) -> None:
self.llm_service = llm_service
self.reader = reader
self.user_input_times = user_input_times
self.last_user_input_time: Optional[datetime] = None
async def handle_send_message(tc, chat_history: list, ctx: ToolHandlerContext):
"""处理 say 工具:根据想法和上下文生成回复后展示给用户。"""
async def handle_send_message(tc: Any, chat_history: list[dict[str, Any]], ctx: ToolHandlerContext) -> None:
"""Backward-compatible handler for legacy send-message style tools."""
reason = tc.arguments.get("reason", "")
console.print("[accent]🔧 调用工具: say(...)[/accent]")
console.print("[accent]Calling tool: send_message(...)[/accent]")
if reason:
# 想法以淡色展示
console.print(
Panel(
Markdown(reason),
title="💭 回复想法",
border_style="dim",
padding=(0, 1),
style="dim",
)
)
# 根据想法和上下文生成回复
with console.status(
"[info]✏️ 生成回复中...[/info]",
spinner="dots",
):
replyer = get_replyer(ctx.llm_service)
reply = await replyer.reply(reason, chat_history)
console.print(
Panel(
Markdown(reply),
title="💬 MaiSaka",
border_style="magenta",
padding=(1, 2),
)
)
# 生成的回复作为 tool 结果写入上下文
if not reason:
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": f"已向用户展示(实际输出):{reply}",
"content": "Missing required argument: reason",
}
)
else:
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": "reason 内容为空,未展示",
}
return
console.print(
Panel(
Markdown(reason),
title="Reply Reason",
border_style="dim",
padding=(0, 1),
style="dim",
)
)
with console.status("[info]Generating visible reply...[/info]", spinner="dots"):
replyer = get_replyer(ctx.llm_service)
reply = await replyer.reply(reason, chat_history)
async def handle_stop(tc, chat_history: list):
"""处理 stop 工具:结束对话循环。"""
console.print("[accent]🔧 调用工具: stop()[/accent]")
console.print(
Panel(
Markdown(reply),
title="MaiSaka",
border_style="magenta",
padding=(1, 2),
)
)
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": "对话循环已停止,等待用户下次输入。",
"content": f"Visible reply generated:\n{reply}",
}
)
async def handle_wait(tc, chat_history: list, ctx: ToolHandlerContext) -> str:
"""
处理 wait 工具:等待用户输入或超时。
async def handle_stop(tc: Any, chat_history: list[dict[str, Any]]) -> None:
"""Handle the stop tool."""
console.print("[accent]Calling tool: stop()[/accent]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": "Conversation loop will stop after this round.",
}
)
Returns:
工具结果字符串。以 "[[QUIT]]" 开头表示用户要求退出对话。
"""
async def handle_wait(tc: Any, chat_history: list[dict[str, Any]], ctx: ToolHandlerContext) -> str:
"""Handle the wait tool."""
seconds = tc.arguments.get("seconds", 30)
seconds = max(5, min(seconds, 300)) # 限制 5-300 秒
console.print(f"[accent]🔧 调用工具: wait({seconds})[/accent]")
seconds = max(5, min(seconds, 300))
console.print(f"[accent]Calling tool: wait({seconds})[/accent]")
tool_result = await _do_wait(seconds, ctx)
chat_history.append(
{
"role": "tool",
@@ -138,62 +127,49 @@ async def handle_wait(tc, chat_history: list, ctx: ToolHandlerContext) -> str:
async def _do_wait(seconds: int, ctx: ToolHandlerContext) -> str:
"""实际执行等待逻辑。"""
console.print(f"[muted]⏳ 等待回复 (最多 {seconds})...[/muted]")
console.print("[bold magenta]💬 > [/bold magenta]", end="")
"""Wait for user input with a timeout."""
console.print(f"[muted]Waiting for user input (timeout: {seconds}s)...[/muted]")
console.print("[bold magenta]> [/bold magenta]", end="")
user_input = await ctx.reader.get_line(timeout=seconds)
if user_input is None:
# 超时
console.print() # 换行
console.print("[muted]⏳ 等待超时[/muted]")
return "等待超时,用户未输入任何内容"
console.print()
console.print("[muted]Wait timeout[/muted]")
return "Wait timed out; no user input received."
user_input = user_input.strip()
if not user_input:
return "用户发送了空消息"
return "User submitted an empty input."
# 更新 timing 时间戳
now = datetime.now()
ctx.last_user_input_time = now
ctx.user_input_times.append(now)
if user_input.lower() in ("/quit", "/exit", "/q"):
return "[[QUIT]] 用户主动退出了对话"
return "[[QUIT]] User requested to exit."
return f"用户说:{user_input}"
return f"User input received: {user_input}"
async def handle_mcp_tool(tc, chat_history: list, mcp_manager: "MCPManager"):
"""
处理 MCP 工具调用。
将调用转发到 MCPManager展示结果并写入对话上下文。
"""
# 格式化参数预览
async def handle_mcp_tool(tc: Any, chat_history: list[dict[str, Any]], mcp_manager: "MCPManager") -> None:
"""Handle an MCP tool call."""
args_str = _json.dumps(tc.arguments, ensure_ascii=False)
args_preview = args_str if len(args_str) <= 120 else args_str[:120] + "..."
console.print(f"[accent]🔌 调用 MCP 工具: {tc.name}({args_preview})[/accent]")
console.print(f"[accent]Calling MCP tool: {tc.name}({args_preview})[/accent]")
with console.status(
f"[info]🔌 MCP 工具 {tc.name} 执行中...[/info]",
spinner="dots",
):
with console.status(f"[info]Running MCP tool {tc.name}...[/info]", spinner="dots"):
result = await mcp_manager.call_tool(tc.name, tc.arguments)
# 展示结果(截断过长内容)
display_text = result if len(result) <= 800 else result[:800] + "\n... (已截断)"
display_text = result if len(result) <= 800 else result[:800] + "\n... (truncated)"
console.print(
Panel(
display_text,
title=f"🔌 MCP: {tc.name}",
title=f"MCP: {tc.name}",
border_style="bright_green",
padding=(0, 1),
)
)
chat_history.append(
{
"role": "tool",
@@ -203,59 +179,50 @@ async def handle_mcp_tool(tc, chat_history: list, mcp_manager: "MCPManager"):
)
async def handle_unknown_tool(tc, chat_history: list):
"""处理未知工具调用。"""
console.print(f"[accent]🔧 调用工具: {tc.name}({tc.arguments})[/accent]")
async def handle_unknown_tool(tc: Any, chat_history: list[dict[str, Any]]) -> None:
"""Handle an unknown tool call."""
console.print(f"[accent]Calling unknown tool: {tc.name}({tc.arguments})[/accent]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": f"未知工具: {tc.name}",
"content": f"Unknown tool: {tc.name}",
}
)
async def handle_write_file(tc, chat_history: list):
"""处理 write_file 工具:在 mai_files 目录下写入文件。"""
async def handle_write_file(tc: Any, chat_history: list[dict[str, Any]]) -> None:
"""Write a file under the local mai_files workspace."""
filename = tc.arguments.get("filename", "")
content = tc.arguments.get("content", "")
console.print(f'[accent]🔧 调用工具: write_file("{filename}")[/accent]')
console.print(f'[accent]Calling tool: write_file("{filename}")[/accent]')
# 确保目录存在
MAI_FILES_DIR.mkdir(parents=True, exist_ok=True)
# 构建完整文件路径
file_path = MAI_FILES_DIR / filename
try:
# 创建父目录(如果需要)
file_path.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, "w", encoding="utf-8") as file:
file.write(content)
# 写入文件
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
# 获取文件大小
file_size = file_path.stat().st_size
console.print(
Panel(
f"文件已写入: {filename}\n大小: {file_size} 字符",
title="📁 文件已保存",
f"Path: {filename}\nSize: {file_size} bytes",
title="File Written",
border_style="green",
padding=(0, 1),
)
)
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": f"文件「{filename}」已成功写入,共 {file_size} 个字符。",
"content": f"File written successfully: {filename} ({file_size} bytes)",
}
)
except Exception as e:
error_msg = f"写入文件失败: {e}"
except Exception as exc:
error_msg = f"Failed to write file: {exc}"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
@@ -266,17 +233,16 @@ async def handle_write_file(tc, chat_history: list):
)
async def handle_read_file(tc, chat_history: list):
"""处理 read_file 工具:读取 mai_files 目录下的文件。"""
async def handle_read_file(tc: Any, chat_history: list[dict[str, Any]]) -> None:
"""Read a file from the local mai_files workspace."""
filename = tc.arguments.get("filename", "")
console.print(f'[accent]🔧 调用工具: read_file("{filename}")[/accent]')
console.print(f'[accent]Calling tool: read_file("{filename}")[/accent]')
# 构建完整文件路径
file_path = MAI_FILES_DIR / filename
try:
if not file_path.exists():
error_msg = f"文件「{filename}」不存在。"
error_msg = f"File does not exist: {filename}"
console.print(f"[warning]{error_msg}[/warning]")
chat_history.append(
{
@@ -288,7 +254,7 @@ async def handle_read_file(tc, chat_history: list):
return
if not file_path.is_file():
error_msg = f"{filename}」不是一个文件。"
error_msg = f"Path is not a file: {filename}"
console.print(f"[warning]{error_msg}[/warning]")
chat_history.append(
{
@@ -299,33 +265,27 @@ async def handle_read_file(tc, chat_history: list):
)
return
# 读取文件内容
with open(file_path, "r", encoding="utf-8") as f:
file_content = f.read()
# 截断过长内容用于显示
display_content = file_content
if len(file_content) > 1000:
display_content = file_content[:1000] + "\n... (内容已截断)"
with open(file_path, "r", encoding="utf-8") as file:
file_content = file.read()
display_content = file_content if len(file_content) <= 1000 else file_content[:1000] + "\n... (truncated)"
console.print(
Panel(
display_content,
title=f"📄 文件内容: {filename}",
title=f"Read File: {filename}",
border_style="blue",
padding=(0, 1),
)
)
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": f"文件「{filename}」内容:\n{file_content}",
"content": f"File content of {filename}:\n{file_content}",
}
)
except Exception as e:
error_msg = f"读取文件失败: {e}"
except Exception as exc:
error_msg = f"Failed to read file: {exc}"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
@@ -336,49 +296,42 @@ async def handle_read_file(tc, chat_history: list):
)
async def handle_list_files(tc, chat_history: list):
"""处理 list_files 工具:获取 mai_files 目录下所有文件的元信息。"""
console.print("[accent]🔧 调用工具: list_files()[/accent]")
async def handle_list_files(tc: Any, chat_history: list[dict[str, Any]]) -> None:
"""List files under the local mai_files workspace."""
console.print("[accent]Calling tool: list_files()[/accent]")
try:
# 确保目录存在
MAI_FILES_DIR.mkdir(parents=True, exist_ok=True)
# 获取所有文件
files_info = []
files_info: list[dict[str, Any]] = []
for item in MAI_FILES_DIR.rglob("*"):
if item.is_file():
# 获取相对路径
rel_path = item.relative_to(MAI_FILES_DIR)
stat = item.stat()
files_info.append(
{
"name": str(rel_path),
"name": str(item.relative_to(MAI_FILES_DIR)),
"size": stat.st_size,
"modified": datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M:%S"),
}
)
if not files_info:
result_text = "mai_files 目录为空,没有任何文件。"
result_text = "No files found under mai_files."
else:
# 按名称排序
files_info.sort(key=lambda x: x["name"])
# 格式化输出
lines = [f"📁 mai_files 目录下共有 {len(files_info)} 个文件:\n"]
for info in files_info:
lines.append(f"{info['name']} ({info['size']} 字节, 修改于 {info['modified']})")
files_info.sort(key=lambda item: item["name"])
lines = [f"Found {len(files_info)} file(s):\n"]
for item in files_info:
lines.append(f"- {item['name']} ({item['size']} bytes, modified {item['modified']})")
result_text = "\n".join(lines)
console.print(
Panel(
result_text,
title="📁 文件列表",
title="File List",
border_style="cyan",
padding=(0, 1),
)
)
chat_history.append(
{
"role": "tool",
@@ -386,8 +339,8 @@ async def handle_list_files(tc, chat_history: list):
"content": result_text,
}
)
except Exception as e:
error_msg = f"获取文件列表失败: {e}"
except Exception as exc:
error_msg = f"Failed to list files: {exc}"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
@@ -398,160 +351,7 @@ async def handle_list_files(tc, chat_history: list):
)
async def handle_store_context(tc, chat_history: list, ctx: ToolHandlerContext):
"""
处理 store_context 工具:将指定范围的对话上下文存入记忆系统,然后从对话中移除。
参数:
- count: 要存入记忆的消息数量(从最早的消息开始)
- reason: 存入的原因
"""
count = tc.arguments.get("count", 0)
reason = tc.arguments.get("reason", "")
console.print(f'[accent]🔧 调用工具: store_context(count={count}, reason="{reason}")[/accent]')
if count <= 0:
error_msg = "count 参数必须大于 0"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
return
# 计算实际消息数量(排除 role=tool 的工具返回消息)
actual_messages = [m for m in chat_history if m.get("role") != "tool"]
if count > len(actual_messages):
error_msg = f"count({count}) 超过了当前对话消息数量({len(actual_messages)})"
console.print(f"[warning]{error_msg}[/warning]")
count = len(actual_messages)
# 找到要移除的消息索引(确保 tool_calls 和 tool 响应成对)
indices_to_remove = []
removed_count = 0
i = 0
while i < len(chat_history) and removed_count < count:
msg = chat_history[i]
role = msg.get("role", "")
# 跳过 role=tool 的消息(它们会被对应的 assistant 消息一起处理)
if role == "tool":
i += 1
continue
# 检查这是否是一个带 tool_calls 的 assistant 消息
if role == "assistant" and "tool_calls" in msg:
# 检查这个消息是否包含当前的 tool_callstore_context 自己)
# 如果包含,跳过不删除(否则会导致 tool 响应孤儿)
contains_current_call = any(tc.get("id") == tc.id for tc in msg.get("tool_calls", []))
if contains_current_call:
i += 1
continue
# 收集这个 assistant 消息及其后续的 tool 响应消息
block_indices = [i]
j = i + 1
while j < len(chat_history):
next_msg = chat_history[j]
if next_msg.get("role") == "tool":
block_indices.append(j)
j += 1
else:
break
indices_to_remove.extend(block_indices)
removed_count += 1
i = j
elif role in ["user", "assistant"]:
# 普通消息,可以直接删除
indices_to_remove.append(i)
removed_count += 1
i += 1
else:
i += 1
if not indices_to_remove:
result_msg = "没有找到可存入记忆的消息"
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": result_msg,
}
)
return
# 收集要总结的消息(在删除前)
to_compress = []
for i in sorted(indices_to_remove):
if 0 <= i < len(chat_history):
to_compress.append(chat_history[i])
# 总结上下文并压缩
try:
with console.status(
"[info]📝 正在总结上下文...[/info]",
spinner="dots",
):
summary = await ctx.llm_service.summarize_context(to_compress)
if summary:
console.print(
Panel(
Markdown(summary),
title="📝 上下文已压缩",
border_style="green",
padding=(0, 1),
style="dim",
)
)
result_msg = f"✅ 已压缩 {len(to_compress)} 条消息\n原因: {reason}"
else:
result_msg = "⚠️ 上下文总结失败"
console.print(f"[warning]{result_msg}[/warning]")
except Exception as e:
result_msg = f"❌ 总结上下文时出错: {e}"
console.print(f"[error]{result_msg}[/error]")
# 从后往前删除消息
for i in sorted(indices_to_remove, reverse=True):
if 0 <= i < len(chat_history):
chat_history.pop(i)
# 清理"孤儿" tool 消息(没有对应 tool_calls 的 tool 消息)
# 收集所有有效的 tool_call_id
valid_tool_call_ids = set()
for msg in chat_history:
if msg.get("role") == "assistant" and "tool_calls" in msg:
for tool_call in msg["tool_calls"]:
valid_tool_call_ids.add(tool_call.get("id", ""))
# 删除无效的 tool 消息(从后往前)
i = len(chat_history) - 1
while i >= 0:
msg = chat_history[i]
if msg.get("role") == "tool":
tool_call_id = msg.get("tool_call_id", "")
if tool_call_id not in valid_tool_call_ids:
chat_history.pop(i)
i -= 1
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": result_msg,
}
)
# ──────────────────── 初始化 mai_files 目录 ────────────────────
# 确保程序启动时 mai_files 目录存在
try:
MAI_FILES_DIR.mkdir(parents=True, exist_ok=True)
except Exception as e:
console.print(f"[warning]创建 mai_files 目录失败: {e}[/warning]")
except Exception as exc:
console.print(f"[warning]Failed to initialize mai_files directory: {exc}[/warning]")