feat:修改maisaka

This commit is contained in:
SengokuCola
2026-03-23 17:55:39 +08:00
parent 84551460e6
commit e7ca3142e9
20 changed files with 257 additions and 748 deletions

View File

@@ -1,7 +1,10 @@
叫麦麦,是一个女大学生,你说话比较口语化
正在扮演一个叫麦麦的角色你能够自然的和用户交互你必须扮演一个人类。并使用send_message和其他工具与用户进行交互
你需要根据对应的情景与用户进行交互你只能通过send_message工具与用户对话。
如果不使用say直接输出的内容不会发送给用户用户无法查看
你有以下工具:
• say(reason) — 对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。
• send_message(reason) — 对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。
直接输出的文本会被视为你的内心思考,用户无法阅读。
reason 参数描述你想要回复的方式、想法和内容,系统会根据你的想法和对话上下文生成具体的回复。
• wait(seconds) — 暂时结束你的发言,把话语权交给用户,等待对方说话。
@@ -11,14 +14,15 @@
• stop() — 结束当前对话循环,进入待机状态,直到用户下次输入新内容时再唤醒你。
{file_tools_section}• store_context(count, reason) — 将指定范围的对话上下文存入记忆系统,然后从当前对话中移除这些内容。适合在对话上下文过长、话题转换、或遇到重要内容需要保存时使用。
思考规则:
你必须先进行内心思考然后选择需要使用的工具如果你想说话必须使用say工具
在内心思考中分析当前对话状态和你的想法,然后通过 say 工具的 reason 参数描述你想要回复的方式、想法和内容
只有使用say工具你才能向用户说话。用户才能看到你的发言
你需要按照以下**核心流程**决策
1.思考是否需要直接对用户说话如果需要使用send_message工具并描述你想要回复的方式、想法和内容
2.如果你认为使用工具能够帮助你更好的回复用户发言,请你选择合适的工具并视情况回复
3.思考是否需要等待或者结束对话如果需要使用wait或stop工具并描述你想要等待的原因
交互规则:
1. 你可以自由选择是否调用工具——如果你还想继续思考,可以不调用任何工具
2. 想对用户说话时,必须调用 say 工具;直接输出的文本只会被视为内心独白
3. 当你说完想说的话、想把话语权交给用户时,调用 wait 暂时结束发言,等待对方回应
4. 当对话自然结束、用户表示不想继续聊、或连续多次等待超时用户没有回复时,调用 stop 结束对话
5. 你可以在同一轮同时调用多个工具,例如先 say 再 wait
2. 当你说完想说的话、想把话语权交给用户时,调用 wait 暂时结束发言,等待对方回应
3. 当对话自然结束、用户表示不想继续聊、或连续多次等待超时用户没有回复时,调用 stop 结束对话
4. 你可以在同一轮同时调用多个工具,例如先 say 再 wait
现在根据**核心流程**输出你的思考在思考完后选择你使用的tool

11
saka.py
View File

@@ -23,21 +23,24 @@ if str(_maisaka_path) not in sys.path:
sys.path.insert(0, str(_maisaka_path))
from src.prompt.prompt_manager import prompt_manager
from config import console
from cli import BufferCLI
from src.maisaka.cli import BufferCLI
from src.maisaka.config import console
def main():
cli = None
# 加载所有提示词文件
prompt_manager.load_prompts()
cli = BufferCLI()
try:
cli = BufferCLI()
asyncio.run(cli.run())
except KeyboardInterrupt:
console.print("\n[muted]程序已终止[/muted]")
finally:
cli._debug_viewer.close()
if cli and hasattr(cli, "_debug_viewer"):
cli._debug_viewer.close()
if __name__ == "__main__":

View File

@@ -52,7 +52,7 @@ class ActionModifier:
all_actions = self.action_manager.get_using_actions()
message_list_before_now_half = get_messages_before_time_in_chat(
chat_id=self.chat_stream.stream_id,
chat_id=self.chat_stream.session_id,
timestamp=time.time(),
limit=min(int(global_config.chat.max_context_size * 0.33), 10),
filter_intercept_message_level=1,

View File

@@ -55,7 +55,7 @@ CONFIG_DIR: Path = PROJECT_ROOT / "config"
BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0"
CONFIG_VERSION: str = "8.1.0"
CONFIG_VERSION: str = "8.1.2"
MODEL_CONFIG_VERSION: str = "1.12.0"
logger = get_logger("config")

View File

@@ -1582,33 +1582,23 @@ class MaiSakaConfig(ConfigBase):
)
"""启用文件列表工具"""
enable_qq_tools: bool = Field(
show_analyze_cognition_prompt: bool = Field(
default=False,
json_schema_extra={
"x-widget": "switch",
"x-icon": "users",
"x-icon": "terminal",
},
)
"""启用 QQ 工具(获取聊天记录、发送消息等)"""
"""是否在 CLI 中显示 analyze_cognition 的 Prompt"""
qq_api_base_url: str = Field(
default="",
show_analyze_timing_prompt: bool = Field(
default=False,
json_schema_extra={
"x-widget": "input",
"x-icon": "server",
"x-widget": "switch",
"x-icon": "terminal",
},
)
"""QQ API 基地址"""
qq_api_key: str = Field(
default="",
json_schema_extra={
"x-widget": "input",
"x-icon": "key",
},
)
"""QQ API 密钥"""
"""是否在 CLI 中显示 analyze_timing 的 Prompt"""
class PluginRuntimeConfig(ConfigBase):
"""插件运行时配置类"""

View File

@@ -4,7 +4,7 @@ MaiSaka - 内置工具定义
使用主项目的工具格式ToolOption + ToolParamType
"""
from typing import List, Dict, Any
from typing import Any, Dict, List
from src.llm_models.payload_content.tool_option import ToolOption, ToolParamType
@@ -16,19 +16,19 @@ def create_builtin_tools() -> List[ToolOption]:
tools = []
# say 工具
say_builder = ToolOptionBuilder()
say_builder.set_name("say")
say_builder.set_description(
send_message_builder = ToolOptionBuilder()
send_message_builder.set_name("send_message")
send_message_builder.set_description(
"对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。直接输出的文本会被视为你的内心思考用户无法阅读。reason 参数描述你想要回复的方式、想法和内容,系统会根据你的想法和对话上下文生成具体的回复。"
)
say_builder.add_param(
send_message_builder.add_param(
name="reason",
param_type=ToolParamType.STRING,
description="描述你想要回复的方式、想法和内容。例如:'同意对方的看法,并分享自己的经历''礼貌地拒绝,表示现在不方便聊天'",
required=True,
enum_values=None,
)
tools.append(say_builder.build())
tools.append(send_message_builder.build())
# wait 工具
wait_builder = ToolOptionBuilder()
@@ -83,7 +83,7 @@ def builtin_tools_as_dicts() -> List[Dict[str, Any]]:
"""将内置工具转换为 dict 格式(用于调试)"""
return [
{
"name": "say",
"name": "send_message",
"description": "对用户说话。你所有想让用户看到的正式发言都必须通过此工具输出。",
"parameters": {
"type": "object",

View File

@@ -13,7 +13,7 @@ from rich.markdown import Markdown
from rich.text import Text
from rich import box
from config import (
from .config import (
console,
ENABLE_EMOTION_MODULE,
ENABLE_COGNITION_MODULE,
@@ -21,26 +21,23 @@ from config import (
ENABLE_KNOWLEDGE_MODULE,
ENABLE_MCP,
)
from input_reader import InputReader
from timing import build_timing_info
from knowledge import store_knowledge_from_context, retrieve_relevant_knowledge
from knowledge_store import get_knowledge_store
from llm_service import MaiSakaLLMService, build_message, remove_last_perception
from mcp_client import MCPManager
from tool_handlers import (
from .input_reader import InputReader
from .knowledge import retrieve_relevant_knowledge, store_knowledge_from_context
from .knowledge_store import get_knowledge_store
from .llm_service import MaiSakaLLMService, build_message, remove_last_perception
from .mcp_client import MCPManager
from .timing import build_timing_info
from .tool_handlers import (
ToolHandlerContext,
handle_say,
handle_list_files,
handle_mcp_tool,
handle_read_file,
handle_send_message,
handle_store_context,
handle_stop,
handle_unknown_tool,
handle_wait,
handle_write_file,
handle_read_file,
handle_list_files,
handle_store_context,
handle_mcp_tool,
handle_unknown_tool,
handle_get_qq_chat_info,
handle_send_info,
handle_list_qq_chats,
)
@@ -487,8 +484,8 @@ class BufferCLI:
ctx = self._build_tool_context()
for tc in response.tool_calls:
if tc.name == "say":
await handle_say(tc, chat_history, ctx)
if tc.name in {"send_message", "say"}:
await handle_send_message(tc, chat_history, ctx)
elif tc.name == "stop":
await handle_stop(tc, chat_history)
@@ -514,15 +511,6 @@ class BufferCLI:
elif tc.name == "store_context":
await handle_store_context(tc, chat_history, ctx)
elif tc.name == "get_qq_chat_info":
await handle_get_qq_chat_info(tc, chat_history)
elif tc.name == "send_info":
await handle_send_info(tc, chat_history)
elif tc.name == "list_qq_chats":
await handle_list_qq_chats(tc, chat_history)
elif self._mcp_manager and self._mcp_manager.is_mcp_tool(tc.name):
await handle_mcp_tool(tc, chat_history, self._mcp_manager)

View File

@@ -3,51 +3,31 @@ MaiSaka - 全局配置
从主项目配置系统读取配置、Rich Console 实例、主题定义。
"""
import sys
from pathlib import Path
import sys
from rich.console import Console
from rich.theme import Theme
from src.config.config import global_config
# 添加项目根目录到路径以导入主配置
_root = Path(__file__).parent.parent.parent.absolute()
if str(_root) not in sys.path:
sys.path.insert(0, str(_root))
# ──────────────────── 从主配置读取 ────────────────────
def _get_maisaka_config():
"""获取 MaiSaka 配置"""
try:
from src.config.config import config_manager
return config_manager.config.maisaka
except Exception:
# 如果配置加载失败,返回默认值
from src.config.official_configs import MaiSakaConfig
return MaiSakaConfig()
_maisaka_config = _get_maisaka_config()
# ──────────────────── 模块开关配置 ────────────────────
ENABLE_EMOTION_MODULE = global_config.maisaka.enable_emotion_module
ENABLE_COGNITION_MODULE = global_config.maisaka.enable_cognition_module
ENABLE_TIMING_MODULE = global_config.maisaka.enable_timing_module
ENABLE_KNOWLEDGE_MODULE = global_config.maisaka.enable_knowledge_module
ENABLE_MCP = global_config.maisaka.enable_mcp
ENABLE_WRITE_FILE = global_config.maisaka.enable_write_file
ENABLE_READ_FILE = global_config.maisaka.enable_read_file
ENABLE_LIST_FILES = global_config.maisaka.enable_list_files
SHOW_ANALYZE_COGNITION_PROMPT = global_config.maisaka.show_analyze_cognition_prompt
SHOW_ANALYZE_TIMING_PROMPT = global_config.maisaka.show_analyze_timing_prompt
ENABLE_EMOTION_MODULE = _maisaka_config.enable_emotion_module
ENABLE_COGNITION_MODULE = _maisaka_config.enable_cognition_module
# Timing 模块已包含自我反思功能
ENABLE_TIMING_MODULE = _maisaka_config.enable_timing_module
ENABLE_KNOWLEDGE_MODULE = _maisaka_config.enable_knowledge_module
ENABLE_MCP = _maisaka_config.enable_mcp
ENABLE_WRITE_FILE = _maisaka_config.enable_write_file
ENABLE_READ_FILE = _maisaka_config.enable_read_file
ENABLE_LIST_FILES = _maisaka_config.enable_list_files
# ──────────────────── QQ 工具配置 ────────────────────
ENABLE_QQ_TOOLS = _maisaka_config.enable_qq_tools
QQ_API_BASE_URL = _maisaka_config.qq_api_base_url
QQ_API_KEY = _maisaka_config.qq_api_key
# ──────────────────── Rich 主题 & Console ────────────────────

View File

@@ -1,93 +0,0 @@
"""
MaiSaka - Debug Viewer 客户端
在独立命令行窗口中显示每次 LLM 调用的完整 Prompt。
通过 TCP socket 将数据发送给 debug_viewer.py 子进程。
"""
import json
import os
import socket
import struct
import subprocess
import sys
import time
from typing import Optional
from config import console
class DebugViewer:
"""
在独立命令行窗口中显示每次 LLM 调用的完整 Prompt。
通过 TCP socket 将数据发送给 debug_viewer.py 子进程。
"""
def __init__(self, port: int = 19876):
self._port = port
self._conn: Optional[socket.socket] = None
self._process: Optional[subprocess.Popen] = None
def start(self):
"""启动调试窗口子进程并建立 TCP 连接。"""
script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "debug_viewer.py")
try:
self._process = subprocess.Popen(
[sys.executable, script_path, str(self._port)],
creationflags=getattr(subprocess, "CREATE_NEW_CONSOLE", 0),
)
except Exception as e:
console.print(f"[warning]⚠️ 无法启动调试窗口: {e}[/warning]")
return
# 重试连接(等待子进程启动监听)
for attempt in range(20):
try:
time.sleep(0.3)
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(("127.0.0.1", self._port))
self._conn = conn
console.print(f"[success]✓ 调试窗口已启动[/success] [muted](port {self._port})[/muted]")
return
except ConnectionRefusedError:
conn.close()
console.print("[warning]⚠️ 无法连接到调试窗口(超时)[/warning]")
def send(self, label: str, messages: list, tools: Optional[list] = None, response: Optional[dict] = None):
"""发送一次 LLM 调用的完整 prompt 和响应到调试窗口。"""
if not self._conn:
return
# 只在有响应时才发送(避免显示两次:请求中 + 完成响应)
if response is None:
return
payload = {"label": label, "messages": messages}
if tools:
payload["tools"] = tools
payload["response"] = response
try:
data = json.dumps(payload, ensure_ascii=False).encode("utf-8")
header = struct.pack(">I", len(data))
self._conn.sendall(header + data)
except Exception:
# 连接断开时静默忽略
self._conn = None
def close(self):
"""关闭连接和子进程。"""
if self._conn:
try:
self._conn.close()
except Exception:
pass
self._conn = None
if self._process:
try:
self._process.terminate()
except Exception:
pass
self._process = None

View File

@@ -1,194 +0,0 @@
"""
MaiSaka Debug Viewer — 在独立命令行窗口中显示每次 LLM 调用的完整 Prompt。
由主进程自动启动,通过 TCP socket 接收数据。
"""
import socket
import struct
import json
import sys
from rich.console import Console
from rich.panel import Panel
from rich import box
console = Console()
ROLE_STYLES = {
"system": ("📋", "bold blue"),
"user": ("👤", "bold green"),
"assistant": ("🤖", "bold magenta"),
"tool": ("🔧", "bold yellow"),
}
def recv_exact(conn: socket.socket, n: int) -> bytes | None:
"""精确接收 n 字节数据。"""
data = b""
while len(data) < n:
chunk = conn.recv(n - len(data))
if not chunk:
return None
data += chunk
return data
def format_message(idx: int, msg: dict) -> str:
"""格式化单条消息用于终端展示。"""
try:
role = str(msg.get("role", "?")) if msg.get("role") else "?"
content = str(msg.get("content", "")) if msg.get("content") else ""
tool_calls = msg.get("tool_calls", []) or []
tool_call_id = str(msg.get("tool_call_id", "")) if msg.get("tool_call_id") else ""
icon, style = ROLE_STYLES.get(role, ("", "white"))
parts: list[str] = []
# 消息头
header = f"[{style}]{icon} [{idx}] {role}[/{style}]"
if tool_call_id:
header += f" [dim](tool_call_id: {tool_call_id})[/dim]"
parts.append(header)
# 正文
if content:
display = (
content
if len(content) <= 3000
else (content[:3000] + f"\n[dim]... (截断, 共 {len(content)} 字符)[/dim]")
)
parts.append(display)
# 工具调用
if isinstance(tool_calls, list):
for tc in tool_calls:
if not isinstance(tc, dict):
continue
func = tc.get("function", {})
if not isinstance(func, dict):
continue
name = func.get("name", "?")
args = func.get("arguments", "")
if isinstance(args, str) and len(args) > 500:
args = args[:500] + "..."
parts.append(f" [yellow]→ tool_call: {name}({args})[/yellow]")
return "\n".join(parts)
except Exception:
return f"[red]消息 [{idx}] 格式化错误[/red]"
def main():
port = int(sys.argv[1]) if len(sys.argv) > 1 else 19876
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(("127.0.0.1", port))
server.listen(1)
console.print(
Panel(
f"[bold cyan]MaiSaka Debug Viewer[/bold cyan]\n[dim]监听端口: {port} 等待主进程连接...[/dim]",
box=box.DOUBLE_EDGE,
border_style="cyan",
)
)
conn, _ = server.accept()
console.print("[green]✓ 已连接到主进程[/green]\n")
call_count = 0
try:
while True:
# 读 4 字节长度前缀
length_bytes = recv_exact(conn, 4)
if not length_bytes:
break
length = struct.unpack(">I", length_bytes)[0]
# 读取 payload
payload_bytes = recv_exact(conn, length)
if not payload_bytes:
break
call_count += 1
try:
payload = json.loads(payload_bytes.decode("utf-8"))
except json.JSONDecodeError as e:
console.print(f"\n[red]JSON 解析错误: {e}[/red]")
console.print(f"[dim]原始数据: {payload_bytes[:200]}...[/dim]")
continue
try:
label = payload.get("label", "LLM Call")
messages = payload.get("messages", [])
tools = payload.get("tools")
response = payload.get("response")
# ── 标题栏 ──
console.print(f"\n{'' * 90}")
console.print(
f"[bold yellow]#{call_count} {label}[/bold yellow] [dim]({len(messages)} messages)[/dim]"
)
console.print(f"{'' * 90}")
# ── 逐条消息 ──
for i, msg in enumerate(messages):
console.print(format_message(i, msg))
if i < len(messages) - 1:
console.print("[dim]─ ─ ─[/dim]")
# ── tools 信息 ──
if tools:
tool_names = [t.get("function", {}).get("name", "?") for t in tools]
console.print(f"\n[dim]可用工具: {', '.join(tool_names)}[/dim]")
except Exception as e:
console.print(f"\n[red]数据处理错误: {e}[/red]")
console.print(f"[dim]Payload: {payload}[/dim]")
continue
# ── 响应结果 ──
if response:
try:
console.print("\n[bold cyan]📤 LLM 响应:[/bold cyan]")
resp_content = response.get("content", "")
if resp_content:
display = (
resp_content
if len(str(resp_content)) <= 3000
else (
str(resp_content)[:3000] + f"\n[dim]... (截断, 共 {len(str(resp_content))} 字符)[/dim]"
)
)
console.print(Panel(display, border_style="cyan", padding=(0, 1)))
resp_tool_calls = response.get("tool_calls", [])
if resp_tool_calls:
for tc in resp_tool_calls:
func = tc.get("function", {})
name = func.get("name", "?")
args = func.get("arguments", "")
if isinstance(args, str) and len(args) > 300:
args = args[:300] + "..."
console.print(f" [cyan]→ tool_call: {name}({args})[/cyan]")
except Exception as e:
console.print(f"\n[red]响应解析错误: {e}[/red]")
console.print(f"[dim]原始数据: {response}[/dim]")
console.print(f"[dim]{'' * 90}[/dim]")
except (ConnectionResetError, ConnectionAbortedError):
pass
finally:
conn.close()
server.close()
console.print("\n[red]连接已断开[/red]")
input("按 Enter 关闭窗口...")
if __name__ == "__main__":
main()

View File

@@ -1,62 +1,56 @@
"""
MaiSaka - 异步输入读取器
基于后台线程的异步标准输入读取,通过 asyncio.Queue 传递给异步代码
将阻塞的标准输入读取放到后台线程中,供 asyncio 循环安全消费
"""
import sys
import asyncio
import sys
import threading
from typing import Optional
class InputReader:
"""
基于后台线程的异步标准输入读取器。
"""后台读取标准输入,并通过 asyncio.Queue 向主循环投递结果。"""
使用单一守护线程持续读取 stdin通过 asyncio.Queue 传递给异步代码。
保证整个应用只有一个线程读 stdin避免多线程竞争。
支持带超时的读取,用于 LLM wait 工具。
"""
def __init__(self):
self._queue: asyncio.Queue = asyncio.Queue()
def __init__(self) -> None:
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._queue: asyncio.Queue[Optional[str]] = asyncio.Queue()
self._thread: Optional[threading.Thread] = None
self._stop_event = threading.Event()
def start(self, loop: asyncio.AbstractEventLoop):
"""启动后台读取线程(仅首次调用生效)"""
if self._thread is not None:
def start(self, loop: asyncio.AbstractEventLoop) -> None:
"""启动后台输入线程。重复调用时忽略。"""
if self._thread and self._thread.is_alive():
return
self._loop = loop
self._thread = threading.Thread(target=self._read_loop, daemon=True)
self._stop_event.clear()
self._thread = threading.Thread(target=self._read_loop, name="maisaka-input-reader", daemon=True)
self._thread.start()
def _read_loop(self):
"""后台线程:持续从 stdin 读取行"""
try:
while True:
line = sys.stdin.readline()
if not line: # EOF
self._loop.call_soon_threadsafe(self._queue.put_nowait, None)
break
stripped = line.rstrip("\n").rstrip("\r")
self._loop.call_soon_threadsafe(self._queue.put_nowait, stripped)
except Exception:
pass
def _read_loop(self) -> None:
"""后台线程中阻塞读取 stdin"""
while not self._stop_event.is_set():
line = sys.stdin.readline()
if self._loop is None:
return
async def get_line(self, timeout: Optional[float] = None) -> Optional[str]:
"""
异步获取下一行输入。
if line == "":
self._loop.call_soon_threadsafe(self._queue.put_nowait, None)
return
Args:
timeout: 超时秒数None 表示无限等待
self._loop.call_soon_threadsafe(self._queue.put_nowait, line.rstrip("\r\n"))
Returns:
输入的字符串,超时或 EOF 返回 None
"""
try:
if timeout is not None:
return await asyncio.wait_for(self._queue.get(), timeout=timeout)
async def get_line(self, timeout: Optional[int] = None) -> Optional[str]:
"""异步获取一行输入;设置 timeout 时支持超时返回。"""
if timeout is None:
return await self._queue.get()
try:
return await asyncio.wait_for(self._queue.get(), timeout=timeout)
except asyncio.TimeoutError:
return None
def close(self) -> None:
"""请求后台线程停止。"""
self._stop_event.set()

View File

@@ -4,7 +4,7 @@ MaiSaka - 了解模块
"""
from typing import List
from knowledge_store import get_knowledge_store, KNOWLEDGE_CATEGORIES
from .knowledge_store import KNOWLEDGE_CATEGORIES, get_knowledge_store
def build_knowledge_summary() -> str:

View File

@@ -3,19 +3,24 @@ MaiSaka LLM 服务 - 使用主项目 LLM 系统
将主项目的 LLMRequest 适配为 MaiSaka 需要的接口
"""
import json
from dataclasses import dataclass
from typing import List, Optional, Literal
from typing import Any, List, Literal, Optional
import json
from rich.console import Group
from rich.panel import Panel
from rich.pretty import Pretty
from rich.text import Text
from src.common.logger import get_logger
from src.config.config import config_manager
from src.llm_models.utils_model import LLMRequest
from src.prompt.prompt_manager import prompt_manager
from src.llm_models.payload_content.message import MessageBuilder, RoleType
from src.llm_models.payload_content.tool_option import ToolCall as ToolCallOption, ToolOption
from builtin_tools import get_builtin_tools
import config
from src.llm_models.utils_model import LLMRequest
from src.prompt.prompt_manager import prompt_manager
from . import config
from .config import console
from .builtin_tools import get_builtin_tools
logger = get_logger("maisaka_llm")
@@ -123,10 +128,6 @@ class MaiSakaLLMService:
tools_section += "\n• read_file(filename) — 读取 mai_files 目录下的文件内容。"
if config.ENABLE_LIST_FILES:
tools_section += "\n• list_files() — 获取 mai_files 目录下所有文件的元信息列表。"
if config.ENABLE_QQ_TOOLS:
tools_section += "\n• get_qq_chat_info(chat, limit) — 获取指定 QQ 聊天的聊天记录。"
tools_section += "\n• send_info(chat, message) — 发送消息到指定的 QQ 聊天。"
tools_section += "\n• list_qq_chats() — 获取所有可用的 QQ 聊天列表。"
chat_prompt.add_context("file_tools_section", tools_section if tools_section else "")
import asyncio
@@ -203,6 +204,108 @@ class MaiSakaLLMService:
"""设置额外的工具定义(如 MCP 工具)"""
self._extra_tools = list(tools)
@staticmethod
def _get_role_badge_style(role: str) -> str:
"""为不同 role 返回不同的标签样式。"""
if role == "system":
return "bold white on blue"
if role == "user":
return "bold black on green"
if role == "assistant":
return "bold black on yellow"
if role == "tool":
return "bold white on magenta"
return "bold white on bright_black"
@staticmethod
def _render_message_content(content: Any) -> object:
"""把消息内容转成适合 Rich 输出的 renderable。"""
if isinstance(content, str):
return Text(content)
if isinstance(content, list):
parts: list[object] = []
for item in content:
if isinstance(item, dict) and item.get("type") == "text" and isinstance(item.get("text"), str):
parts.append(Text(item["text"]))
else:
parts.append(Pretty(item, expand_all=True))
return Group(*parts) if parts else Text("")
if content is None:
return Text("")
return Pretty(content, expand_all=True)
@staticmethod
def _format_tool_call_for_display(tool_call: Any) -> dict[str, Any]:
"""将 tool call 转成适合 CLI 展示的结构。"""
if isinstance(tool_call, dict):
function_info = tool_call.get("function", {})
return {
"id": tool_call.get("id"),
"name": function_info.get("name", tool_call.get("name")),
"arguments": function_info.get("arguments", tool_call.get("arguments")),
}
return {
"id": getattr(tool_call, "call_id", getattr(tool_call, "id", None)),
"name": getattr(tool_call, "func_name", getattr(tool_call, "name", None)),
"arguments": getattr(tool_call, "args", getattr(tool_call, "arguments", None)),
}
def _render_message_panel(self, message: Any, index: int) -> Panel:
"""渲染主循环 prompt 中的一条消息。"""
if isinstance(message, dict):
raw_role = message.get("role", "unknown")
content = message.get("content")
tool_calls = message.get("tool_calls")
tool_call_id = message.get("tool_call_id")
else:
raw_role = getattr(message, "role", "unknown")
content = getattr(message, "content", None)
tool_calls = getattr(message, "tool_calls", None)
tool_call_id = getattr(message, "tool_call_id", None)
role = raw_role.value if hasattr(raw_role, "value") else str(raw_role)
title = Text.assemble(
Text(f" {role.upper()} ", style=self._get_role_badge_style(role)),
Text(f" #{index}", style="muted"),
)
parts: list[object] = []
if content not in (None, "", []):
parts.append(Text(" message ", style="bold cyan"))
parts.append(self._render_message_content(content))
if tool_calls:
parts.append(Text(" tool_calls ", style="bold magenta"))
parts.append(
Pretty(
[self._format_tool_call_for_display(tool_call) for tool_call in tool_calls],
expand_all=True,
)
)
if tool_call_id:
parts.append(
Text.assemble(
Text(" tool_call_id ", style="bold magenta"),
Text(" "),
Text(str(tool_call_id), style="magenta"),
)
)
if not parts:
parts.append(Text("[empty message]", style="muted"))
return Panel(
Group(*parts),
title=title,
border_style="dim",
padding=(0, 1),
)
@staticmethod
def _tool_option_to_dict(tool: "ToolOption") -> dict:
"""将 ToolOption 对象转换为主项目期望的 dict 格式
@@ -287,11 +390,14 @@ class MaiSakaLLMService:
# 打印消息列表
built_messages = message_factory(None)
print("\n" + "=" * 60)
print("MaiSaka LLM Request - chat_loop_step:")
for msg in built_messages:
print(f" {msg}")
print("=" * 60 + "\n")
console.print(
Panel(
Group(*[self._render_message_panel(msg, index + 1) for index, msg in enumerate(built_messages)]),
title="MaiSaka LLM Request - chat_loop_step",
border_style="cyan",
padding=(0, 1),
)
)
response, (reasoning, model, tool_calls) = await self._llm_chat.generate_response_with_message_async(
message_factory=message_factory,
@@ -423,10 +529,11 @@ class MaiSakaLLMService:
prompt = "\n".join(prompt_parts)
print("\n" + "=" * 60)
print("MaiSaka LLM Request - analyze_cognition:")
print(f" {prompt}")
print("=" * 60 + "\n")
if config.SHOW_ANALYZE_COGNITION_PROMPT:
print("\n" + "=" * 60)
print("MaiSaka LLM Request - analyze_cognition:")
print(f" {prompt}")
print("=" * 60 + "\n")
try:
response, _ = await self._llm_utils.generate_response_async(
@@ -458,10 +565,11 @@ class MaiSakaLLMService:
prompt = "\n".join(prompt_parts)
print("\n" + "=" * 60)
print("MaiSaka LLM Request - analyze_timing:")
print(f" {prompt}")
print("=" * 60 + "\n")
if config.SHOW_ANALYZE_TIMING_PROMPT:
print("\n" + "=" * 60)
print("MaiSaka LLM Request - analyze_timing:")
print(f" {prompt}")
print("=" * 60 + "\n")
try:
response, _ = await self._llm_utils.generate_response_async(
@@ -518,7 +626,7 @@ class MaiSakaLLMService:
可供 Replyer 类直接调用
"""
from datetime import datetime
from replyer import format_chat_history
from .replyer import format_chat_history
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

View File

@@ -4,7 +4,7 @@ MaiSaka - MCP (Model Context Protocol) 客户端包
提供 MCPManager 用于管理 MCP 服务器连接、发现工具、调用工具。
用法:
from mcp_client import MCPManager
from .manager import MCPManager
manager = await MCPManager.from_config("mcp_config.json")
if manager:

View File

@@ -26,7 +26,7 @@ import os
from dataclasses import dataclass, field
from typing import Optional
from config import console
from ..config import console
@dataclass

View File

@@ -6,7 +6,7 @@ MaiSaka - 单个 MCP 服务器连接管理
from contextlib import AsyncExitStack
from typing import Any, Optional
from config import console
from ..config import console
from .config import MCPServerConfig
# ──────────────────── MCP SDK 可选导入 ────────────────────

View File

@@ -5,7 +5,7 @@ MaiSaka - MCP 管理器
from typing import Optional
from config import console
from ..config import console
from .config import MCPServerConfig, load_mcp_config
from .connection import MCPConnection, MCP_AVAILABLE
@@ -13,6 +13,7 @@ from .connection import MCPConnection, MCP_AVAILABLE
BUILTIN_TOOL_NAMES = frozenset(
{
"say",
"send_message",
"wait",
"stop",
"create_table",

View File

@@ -4,7 +4,7 @@ MaiSaka - Reply 回复生成器
"""
from typing import Optional
from llm_service import MaiSakaLLMService
from .llm_service import MaiSakaLLMService
def format_chat_history(messages: list) -> str:

View File

@@ -1,30 +1,24 @@
"""
MaiSaka - 工具调用处理器
处理 LLM 循环中各工具say/wait/stop/file/MCP/QQ)的执行逻辑。
处理 LLM 循环中各工具say/wait/stop/file/MCP的执行逻辑。
"""
import json as _json
import os
from datetime import datetime
from typing import TYPE_CHECKING, Optional
from pathlib import Path
import importlib.util
# 检查 aiohttp 是否可用
AIOHTTP_AVAILABLE = importlib.util.find_spec("aiohttp") is not None
if AIOHTTP_AVAILABLE:
import aiohttp
from typing import TYPE_CHECKING, Optional
from rich.panel import Panel
from rich.markdown import Markdown
from config import console
from input_reader import InputReader
from llm_service import MaiSakaLLMService
from replyer import Replyer
from .config import console
from .input_reader import InputReader
from .llm_service import MaiSakaLLMService
from .replyer import Replyer
if TYPE_CHECKING:
from mcp_client import MCPManager
from .mcp_client import MCPManager
# mai_files 目录路径
@@ -59,7 +53,7 @@ class ToolHandlerContext:
self.last_user_input_time: Optional[datetime] = None
async def handle_say(tc, chat_history: list, ctx: ToolHandlerContext):
async def handle_send_message(tc, chat_history: list, ctx: ToolHandlerContext):
"""处理 say 工具:根据想法和上下文生成回复后展示给用户。"""
reason = tc.arguments.get("reason", "")
console.print("[accent]🔧 调用工具: say(...)[/accent]")
@@ -554,281 +548,6 @@ async def handle_store_context(tc, chat_history: list, ctx: ToolHandlerContext):
"content": result_msg,
}
)
async def handle_get_qq_chat_info(tc, chat_history: list):
"""处理 get_qq_chat_info 工具:通过 HTTP 获取 QQ 聊天内容。"""
chat = tc.arguments.get("chat", "")
limit = tc.arguments.get("limit", 20)
console.print(f'[accent]🔧 调用工具: get_qq_chat_info("{chat}", limit={limit})[/accent]')
if not AIOHTTP_AVAILABLE:
error_msg = "aiohttp 模块未安装,请运行: pip install aiohttp"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
return
from config import QQ_API_BASE_URL, QQ_API_KEY
if not QQ_API_BASE_URL:
error_msg = "QQ_API_BASE_URL 未配置,请在 .env 中设置"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
return
try:
# 构建 API 端点
url = f"{QQ_API_BASE_URL.rstrip('/')}/api/external/chat/history"
# 构建请求头(如果配置了 API Key
headers = {}
if QQ_API_KEY:
headers["Authorization"] = f"Bearer {QQ_API_KEY}"
# 发送 HTTP 请求
async with aiohttp.ClientSession() as session:
params = {"chat": chat, "limit": limit}
async with session.get(url, params=params, headers=headers) as response:
if response.status == 200:
# 获取纯文本响应
text = await response.text()
# 格式化显示
console.print(
Panel(
f"聊天标识: {chat}\n获取数量: {limit}\n\n{text if text.strip() else '暂无聊天记录'}",
title="💬 QQ 聊天记录",
border_style="cyan",
padding=(0, 1),
)
)
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": text if text.strip() else "暂无聊天记录",
}
)
else:
error_text = await response.text()
error_msg = f"HTTP 请求失败 (状态码 {response.status}): {error_text}"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
except Exception as e:
error_msg = f"获取 QQ 聊天记录失败: {e}"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
async def handle_send_info(tc, chat_history: list):
"""处理 send_info 工具:通过 HTTP 发送消息到 QQ。"""
chat = tc.arguments.get("chat", "")
message = tc.arguments.get("message", "")
console.print(f'[accent]🔧 调用工具: send_info("{chat}")[/accent]')
if not AIOHTTP_AVAILABLE:
error_msg = "aiohttp 模块未安装,请运行: pip install aiohttp"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
return
from config import QQ_API_BASE_URL, QQ_API_KEY
if not QQ_API_BASE_URL:
error_msg = "QQ_API_BASE_URL 未配置,请在 .env 中设置"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
return
try:
# 构建 API 端点
url = f"{QQ_API_BASE_URL.rstrip('/')}/api/external/chat/send"
# 构建请求头(如果配置了 API Key
headers = {}
if QQ_API_KEY:
headers["Authorization"] = f"Bearer {QQ_API_KEY}"
# 发送 HTTP 请求
async with aiohttp.ClientSession() as session:
payload = {"chat": chat, "message": message}
async with session.post(url, json=payload, headers=headers) as response:
data = await response.json()
if response.status == 200 and data.get("success"):
# 格式化显示
console.print(
Panel(
f"目标: {chat}\n消息: {message}\n\n结果: {data.get('message', '发送成功')}",
title="📤 消息已发送",
border_style="green",
padding=(0, 1),
)
)
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": f"消息发送成功: {data.get('message', '发送成功')}",
}
)
else:
error_msg = f"发送失败: {data.get('message', '未知错误')}"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
except Exception as e:
error_msg = f"发送消息失败: {e}"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
async def handle_list_qq_chats(tc, chat_history: list):
"""处理 list_qq_chats 工具:获取所有可用的 QQ 聊天列表。"""
console.print("[accent]🔧 调用工具: list_qq_chats()[/accent]")
if not AIOHTTP_AVAILABLE:
error_msg = "aiohttp 模块未安装,请运行: pip install aiohttp"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
return
from config import QQ_API_BASE_URL, QQ_API_KEY
if not QQ_API_BASE_URL:
error_msg = "QQ_API_BASE_URL 未配置,请在 .env 中设置"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
return
try:
# 构建 API 端点
url = f"{QQ_API_BASE_URL.rstrip('/')}/api/external/chat/list"
# 构建请求头(如果配置了 API Key
headers = {}
if QQ_API_KEY:
headers["Authorization"] = f"Bearer {QQ_API_KEY}"
# 发送 HTTP 请求
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as response:
data = await response.json()
if response.status == 200 and data.get("success"):
chats = data.get("chats", [])
# 格式化聊天列表
if chats:
chat_list_text = "\n".join(
[
f" • [{c.get('platform', 'qq')}] {c.get('name', '未知')} (chat: {c.get('chat', 'N/A')})"
for c in chats
]
)
result_text = f"可用的聊天 (共 {len(chats)} 个):\n{chat_list_text}"
else:
result_text = "没有可用的聊天"
console.print(
Panel(
result_text,
title="💬 QQ 聊天列表",
border_style="cyan",
padding=(0, 1),
)
)
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": result_text,
}
)
else:
error_msg = f"获取失败: {data.get('message', '未知错误')}"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
except Exception as e:
error_msg = f"获取聊天列表失败: {e}"
console.print(f"[error]{error_msg}[/error]")
chat_history.append(
{
"role": "tool",
"tool_call_id": tc.id,
"content": error_msg,
}
)
# ──────────────────── 初始化 mai_files 目录 ────────────────────
# 确保程序启动时 mai_files 目录存在

View File

@@ -1,4 +1,13 @@
# 代码备忘
.env中的webui配置仍旧在被读取
# 代码备忘
- [ ] 检查EmojiManager的replace_an_emoji_by_llm传入的emoji是否真的是没有注册到db的
- [ ] According to a comment, MaiMBot's check_types() accesses format_info.accept_format without None check
- [ ] 如果需要更多的消息格式支持,更新列表如下: