ref:重构maisaka内置工具逻辑,拆分文件

This commit is contained in:
SengokuCola
2026-04-03 14:51:05 +08:00
parent 6e6aa0b13a
commit 6c720e0403
19 changed files with 1075 additions and 2384 deletions

View File

@@ -688,39 +688,41 @@ class DefaultReplyer:
return None
def get_chat_prompt_for_chat(self, chat_id: str) -> str:
"""
根据聊天流ID获取匹配的额外prompt仅匹配group类型
Args:
chat_id: 聊天流ID哈希值
Returns:
str: 匹配的额外prompt内容如果没有匹配则返回空字符串
"""
if not global_config.experimental.chat_prompts:
"""根据聊天流 ID 获取匹配的额外 prompt。"""
if not global_config.chat.chat_prompts:
return ""
for chat_prompt_str in global_config.experimental.chat_prompts:
if not isinstance(chat_prompt_str, str):
for chat_prompt_item in global_config.chat.chat_prompts:
if hasattr(chat_prompt_item, "rule_type") and hasattr(chat_prompt_item, "prompt"):
if str(chat_prompt_item.rule_type or "").strip() != "group":
continue
config_chat_id = self._build_chat_uid(
str(chat_prompt_item.platform or "").strip(),
str(chat_prompt_item.item_id or "").strip(),
True,
)
prompt_content = str(chat_prompt_item.prompt or "").strip()
if config_chat_id == chat_id and prompt_content:
logger.debug(f"匹配到群聊 prompt 配置chat_id: {chat_id}, prompt: {prompt_content[:50]}...")
return prompt_content
continue
# 解析配置字符串检查类型是否为group
parts = chat_prompt_str.split(":", 3)
if len(parts) != 4:
if not isinstance(chat_prompt_item, str):
continue
stream_type = parts[2]
# 只匹配group类型
if stream_type != "group":
# 兼容旧格式的 platform:id:type:prompt 配置字符串。
parts = chat_prompt_item.split(":", 3)
if len(parts) != 4 or parts[2] != "group":
continue
result = self._parse_chat_prompt_config_to_chat_id(chat_prompt_str)
result = self._parse_chat_prompt_config_to_chat_id(chat_prompt_item)
if result is None:
continue
config_chat_id, prompt_content = result
if config_chat_id == chat_id:
logger.debug(f"匹配到群聊prompt配置chat_id: {chat_id}, prompt: {prompt_content[:50]}...")
logger.debug(f"匹配到群聊 prompt 配置chat_id: {chat_id}, prompt: {prompt_content[:50]}...")
return prompt_content
return ""

File diff suppressed because it is too large Load Diff

View File

@@ -21,7 +21,6 @@ from .official_configs import (
DatabaseConfig,
DebugConfig,
EmojiConfig,
ExperimentalConfig,
ExpressionConfig,
KeywordReactionConfig,
LPMMKnowledgeConfig,
@@ -56,7 +55,7 @@ CONFIG_DIR: Path = PROJECT_ROOT / "config"
BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0"
CONFIG_VERSION: str = "8.2.1"
CONFIG_VERSION: str = "8.3.0"
MODEL_CONFIG_VERSION: str = "1.13.1"
logger = get_logger("config")
@@ -113,13 +112,10 @@ class Config(ConfigBase):
debug: DebugConfig = Field(default_factory=DebugConfig)
"""调试配置类"""
experimental: ExperimentalConfig = Field(default_factory=ExperimentalConfig)
"""实验性功能配置类"""
maim_message: MaimMessageConfig = Field(default_factory=MaimMessageConfig)
"""maim_message配置类"""
lpmm_knowledge: LPMMKnowledgeConfig = Field(default_factory=LPMMKnowledgeConfig)
lpmm_knowledge: LPMMKnowledgeConfig = Field(default_factory=LPMMKnowledgeConfig, repr=False)
"""LPMM知识库配置类"""
webui: WebUIConfig = Field(default_factory=WebUIConfig)

View File

@@ -253,11 +253,23 @@ def try_migrate_legacy_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
migrated_any = True
reasons.append("expression.manual_reflect_operator_id")
chat = _as_dict(data.get("chat"))
if chat is None:
chat = {}
data["chat"] = chat
mem = _as_dict(data.get("memory"))
if mem is not None:
if _migrate_target_item_list(mem, "global_memory_blacklist"):
migrated_any = True
reasons.append("memory.global_memory_blacklist")
for removed_key in (
"agent_timeout_seconds",
"global_memory",
"global_memory_blacklist",
"max_agent_iterations",
):
if removed_key in mem:
mem.pop(removed_key, None)
migrated_any = True
reasons.append(f"memory.{removed_key}_removed")
exp = _as_dict(data.get("experimental"))
if exp is not None:
@@ -265,7 +277,16 @@ def try_migrate_legacy_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
migrated_any = True
reasons.append("experimental.chat_prompts")
chat = _as_dict(data.get("chat"))
for key in ("private_plan_style", "group_chat_prompt", "private_chat_prompts", "chat_prompts"):
if key in exp and key not in chat:
chat[key] = exp[key]
migrated_any = True
reasons.append(f"experimental.{key}_moved_to_chat")
data.pop("experimental", None)
migrated_any = True
reasons.append("experimental_removed")
if chat is not None and "think_mode" in chat:
chat.pop("think_mode", None)
migrated_any = True

View File

@@ -244,15 +244,45 @@ class ChatConfig(ConfigBase):
},
)
"""每个聊天流最大保存的Plan/Reply日志数量超过此数量时会自动删除最老的日志"""
llm_quote: bool = Field(
default=False,
private_plan_style: str = Field(
default=(
"1.思考**所有**的可用的action中的**每个动作**是否符合当下条件,如果动作使用条件符合聊天内容就使用\n"
"2.如果相同的内容已经被执行,请不要重复执行\n"
"3.某句话如果已经被回复过,不要重复回复"
),
json_schema_extra={
"x-widget": "switch",
"x-icon": "quote",
"x-widget": "textarea",
"x-icon": "user",
},
)
"""是否在 reply action 中启用 quote 参数,启用后 LLM 可以控制是否引用消息"""
"""_wrap_私聊说话规则行为风格"""
group_chat_prompt: str = Field(
default="不要回复的太频繁!控制回复的频率,不要每个人的消息都回复,只回复你感兴趣的或者主动提及你的。",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "users",
},
)
"""_wrap_群聊通用注意事项"""
private_chat_prompts: str = Field(
default="",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "user",
},
)
"""_wrap_私聊通用注意事项"""
chat_prompts: list["ExtraPromptItem"] = Field(
default_factory=lambda: [],
json_schema_extra={
"x-widget": "custom",
"x-icon": "list",
},
)
"""_wrap_为指定聊天添加额外的 prompt 配置列表"""
enable_talk_value_rules: bool = Field(
default=True,
@@ -356,43 +386,6 @@ class MemoryConfig(ConfigBase):
__ui_parent__ = "emoji"
max_agent_iterations: int = Field(
default=5,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "layers",
},
)
"""记忆思考深度最低为1"""
agent_timeout_seconds: float = Field(
default=120.0,
json_schema_extra={
"x-widget": "input",
"x-icon": "clock",
},
)
"""最长回忆时间(秒)"""
global_memory: bool = Field(
default=False,
json_schema_extra={
"x-widget": "switch",
"x-icon": "globe",
},
)
"""是否允许记忆检索在聊天记录中进行全局查询忽略当前chat_id仅对 search_chat_history 等工具生效)"""
global_memory_blacklist: list[TargetItem] = Field(
default_factory=lambda: [],
json_schema_extra={
"x-widget": "custom",
"x-icon": "shield-off",
},
)
"""_wrap_全局记忆黑名单当启用全局记忆时不将特定聊天流纳入检索"""
chat_history_topic_check_message_threshold: int = Field(
default=80,
ge=1,
@@ -444,10 +437,6 @@ class MemoryConfig(ConfigBase):
def model_post_init(self, context: Optional[dict] = None) -> None:
"""验证配置值"""
if self.max_agent_iterations < 1:
raise ValueError(f"max_agent_iterations 必须至少为1当前值: {self.max_agent_iterations}")
if self.agent_timeout_seconds <= 0:
raise ValueError(f"agent_timeout_seconds 必须大于0当前值: {self.agent_timeout_seconds}")
if self.chat_history_topic_check_message_threshold < 1:
raise ValueError(
f"chat_history_topic_check_message_threshold 必须至少为1当前值: {self.chat_history_topic_check_message_threshold}"
@@ -1052,57 +1041,13 @@ class ExtraPromptItem(ConfigBase):
"""额外的prompt内容"""
def model_post_init(self, context: Optional[dict] = None) -> None:
if not self.platform and not self.item_id and not self.prompt:
return super().model_post_init(context)
if not self.platform or not self.item_id or not self.prompt:
raise ValueError("ExtraPromptItem 中 platform, id 和 prompt 不能为空")
return super().model_post_init(context)
class ExperimentalConfig(ConfigBase):
"""实验功能配置类"""
__ui_parent__ = "debug"
private_plan_style: str = Field(
default=(
"1.思考**所有**的可用的action中的**每个动作**是否符合当下条件,如果动作使用条件符合聊天内容就使用"
"2.如果相同的内容已经被执行,请不要重复执行"
"3.某句话如果已经被回复过,不要重复回复"
),
json_schema_extra={
"x-widget": "textarea",
"x-icon": "user",
},
)
"""_wrap_私聊说话规则行为风格实验性功能"""
group_chat_prompt: str = Field(
default="",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "users",
},
)
"""_wrap_群聊通用注意事项实验性功能"""
private_chat_prompts: str = Field(
default="",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "user",
},
)
"""_wrap_私聊通用注意事项实验性功能"""
chat_prompts: list[ExtraPromptItem] = Field(
default_factory=lambda: [],
json_schema_extra={
"x-widget": "custom",
"x-icon": "list",
},
)
"""_wrap_为指定聊天添加额外的prompt配置列表"""
class MaimMessageConfig(ConfigBase):
"""maim_message配置类"""
@@ -1482,16 +1427,6 @@ class MaiSakaConfig(ConfigBase):
},
)
"""启用知识库模块"""
show_analyze_cognition_prompt: bool = Field(
default=False,
json_schema_extra={
"x-widget": "switch",
"x-icon": "terminal",
},
)
"""是否在 CLI 中显示 analyze_cognition 的 Prompt"""
show_thinking: bool = Field(
default=True,
json_schema_extra={

View File

@@ -843,12 +843,6 @@ class LLMOrchestrator:
for _ in range(max_attempts):
model_info, api_provider, client = self._select_model(exclude_models=failed_models_this_request)
if self.request_type.startswith("maisaka_"):
logger.info(
f"LLMOrchestrator[{self.request_type}] 已选择模型 model={model_info.name} "
f"provider={api_provider.name} request_type={request_type.value}"
)
message_list = []
if message_factory:
message_list = message_factory(client)

View File

@@ -0,0 +1,71 @@
"""Maisaka 内置工具聚合入口。"""
from collections.abc import Awaitable, Callable
from typing import Dict, List, Optional
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from src.llm_models.payload_content.tool_option import ToolDefinitionInput
from .context import BuiltinToolRuntimeContext
from .no_reply import get_tool_spec as get_no_reply_tool_spec
from .no_reply import handle_tool as handle_no_reply_tool
from .query_jargon import get_tool_spec as get_query_jargon_tool_spec
from .query_jargon import handle_tool as handle_query_jargon_tool
from .query_person_info import get_tool_spec as get_query_person_info_tool_spec
from .query_person_info import handle_tool as handle_query_person_info_tool
from .reply import get_tool_spec as get_reply_tool_spec
from .reply import handle_tool as handle_reply_tool
from .send_emoji import get_tool_spec as get_send_emoji_tool_spec
from .send_emoji import handle_tool as handle_send_emoji_tool
from .wait import get_tool_spec as get_wait_tool_spec
from .wait import handle_tool as handle_wait_tool
BuiltinToolHandler = Callable[[ToolInvocation, Optional[ToolExecutionContext]], Awaitable[ToolExecutionResult]]
def get_builtin_tool_specs() -> List[ToolSpec]:
"""获取默认启用的内置工具声明列表。"""
return [
get_wait_tool_spec(),
get_reply_tool_spec(),
get_query_jargon_tool_spec(),
get_no_reply_tool_spec(),
get_send_emoji_tool_spec(),
]
def get_all_builtin_tool_specs() -> List[ToolSpec]:
"""获取全部内置工具声明列表。"""
return [
get_wait_tool_spec(),
get_reply_tool_spec(),
get_query_jargon_tool_spec(),
get_query_person_info_tool_spec(),
get_no_reply_tool_spec(),
get_send_emoji_tool_spec(),
]
def get_builtin_tools() -> List[ToolDefinitionInput]:
"""获取兼容旧模型层的内置工具定义。"""
return [tool_spec.to_llm_definition() for tool_spec in get_builtin_tool_specs()]
def build_builtin_tool_handlers(tool_ctx: BuiltinToolRuntimeContext) -> Dict[str, BuiltinToolHandler]:
"""构建内置工具处理器映射。"""
return {
"reply": lambda invocation, context=None: handle_reply_tool(tool_ctx, invocation, context),
"no_reply": lambda invocation, context=None: handle_no_reply_tool(tool_ctx, invocation, context),
"query_jargon": lambda invocation, context=None: handle_query_jargon_tool(tool_ctx, invocation, context),
"query_person_info": lambda invocation, context=None: handle_query_person_info_tool(
tool_ctx,
invocation,
context,
),
"wait": lambda invocation, context=None: handle_wait_tool(tool_ctx, invocation, context),
"send_emoji": lambda invocation, context=None: handle_send_emoji_tool(tool_ctx, invocation, context),
}

View File

@@ -0,0 +1,199 @@
"""Maisaka 内置工具执行上下文。"""
from __future__ import annotations
from base64 import b64decode
from datetime import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from src.chat.message_receive.message import SessionMessage
from src.chat.utils.utils import process_llm_response
from src.common.data_models.message_component_data_model import EmojiComponent, MessageSequence, TextComponent
from src.config.config import global_config
from src.core.tooling import ToolExecutionResult
from ..context_messages import SessionBackedMessage
from ..message_adapter import format_speaker_content
if TYPE_CHECKING:
from ..reasoning_engine import MaisakaReasoningEngine
from ..runtime import MaisakaHeartFlowChatting
class BuiltinToolRuntimeContext:
"""为拆分后的内置工具提供统一运行时能力。"""
def __init__(
self,
engine: "MaisakaReasoningEngine",
runtime: "MaisakaHeartFlowChatting",
) -> None:
self.engine = engine
self.runtime = runtime
@staticmethod
def build_success_result(
tool_name: str,
content: str = "",
structured_content: Any = None,
metadata: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
"""构造统一工具成功结果。"""
return ToolExecutionResult(
tool_name=tool_name,
success=True,
content=content,
structured_content=structured_content,
metadata=dict(metadata or {}),
)
@staticmethod
def build_failure_result(
tool_name: str,
error_message: str,
structured_content: Any = None,
metadata: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
"""构造统一工具失败结果。"""
return ToolExecutionResult(
tool_name=tool_name,
success=False,
error_message=error_message,
structured_content=structured_content,
metadata=dict(metadata or {}),
)
@staticmethod
def normalize_words(raw_words: Any) -> List[str]:
"""清洗黑话查询词条列表。"""
if not isinstance(raw_words, list):
return []
normalized_words: List[str] = []
seen_words: set[str] = set()
for item in raw_words:
if not isinstance(item, str):
continue
word = item.strip()
if not word or word in seen_words:
continue
seen_words.add(word)
normalized_words.append(word)
return normalized_words
@staticmethod
def normalize_jargon_query_results(raw_results: Any) -> List[Dict[str, object]]:
"""规范化黑话查询结果列表。"""
if not isinstance(raw_results, list):
return []
normalized_results: List[Dict[str, object]] = []
for raw_item in raw_results:
if not isinstance(raw_item, dict):
continue
word = str(raw_item.get("word") or "").strip()
matches = raw_item.get("matches")
normalized_matches: List[Dict[str, str]] = []
if isinstance(matches, list):
for match in matches:
if not isinstance(match, dict):
continue
content = str(match.get("content") or "").strip()
meaning = str(match.get("meaning") or "").strip()
if not content or not meaning:
continue
normalized_matches.append({"content": content, "meaning": meaning})
normalized_results.append(
{
"word": word,
"found": bool(raw_item.get("found", bool(normalized_matches))),
"matches": normalized_matches,
}
)
return normalized_results
@staticmethod
def post_process_reply_text(reply_text: str) -> List[str]:
"""沿用旧回复链的文本后处理,执行分段与错别字注入。"""
processed_segments: List[str] = []
for segment in process_llm_response(reply_text):
normalized_segment = segment.strip()
if normalized_segment:
processed_segments.append(normalized_segment)
if processed_segments:
return processed_segments
return [reply_text.strip()]
def get_runtime_manager(self) -> Any:
"""获取插件运行时管理器。"""
return self.engine._get_runtime_manager()
def append_guided_reply_to_chat_history(self, reply_text: str) -> None:
"""将引导回复写回 Maisaka 历史。"""
bot_name = global_config.bot.nickname.strip() or "MaiSaka"
reply_timestamp = datetime.now()
planner_prefix = (
f"[时间]{reply_timestamp.strftime('%H:%M:%S')}\n"
f"[用户]{bot_name}\n"
"[用户群昵称]\n"
"[msg_id]\n"
"[发言内容]"
)
history_message = SessionBackedMessage(
raw_message=MessageSequence([TextComponent(f"{planner_prefix}{reply_text}")]),
visible_text=format_speaker_content(
bot_name,
reply_text,
reply_timestamp,
),
timestamp=reply_timestamp,
source_kind="guided_reply",
)
self.runtime._chat_history.append(history_message)
def append_sent_emoji_to_chat_history(
self,
*,
emoji_base64: str,
success_message: str,
) -> None:
"""将 bot 主动发送的表情包同步到 Maisaka 历史。"""
bot_name = global_config.bot.nickname.strip() or "MaiSaka"
reply_timestamp = datetime.now()
planner_prefix = (
f"[时间]{reply_timestamp.strftime('%H:%M:%S')}\n"
f"[用户]{bot_name}\n"
"[用户群昵称]\n"
"[msg_id]\n"
"[发言内容]"
)
history_message = SessionBackedMessage(
raw_message=MessageSequence(
[
TextComponent(planner_prefix),
EmojiComponent(
binary_hash="",
content=success_message,
binary_data=b64decode(emoji_base64),
),
]
),
visible_text=format_speaker_content(
bot_name,
"[表情包]",
reply_timestamp,
),
timestamp=reply_timestamp,
source_kind="guided_reply",
)
self.runtime._chat_history.append(history_message)

View File

@@ -0,0 +1,34 @@
"""no_reply 内置工具。"""
from typing import Optional
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from .context import BuiltinToolRuntimeContext
def get_tool_spec() -> ToolSpec:
"""获取 no_reply 工具声明。"""
return ToolSpec(
name="no_reply",
brief_description="本轮不进行回复,等待其他用户的新消息。",
provider_name="maisaka_builtin",
provider_type="builtin",
)
async def handle_tool(
tool_ctx: BuiltinToolRuntimeContext,
invocation: ToolInvocation,
context: Optional[ToolExecutionContext] = None,
) -> ToolExecutionResult:
"""执行 no_reply 内置工具。"""
del context
tool_ctx.runtime._enter_stop_state()
return tool_ctx.build_success_result(
invocation.tool_name,
"当前对话循环已暂停,等待新消息到来。",
metadata={"pause_execution": True},
)

View File

@@ -0,0 +1,143 @@
"""query_jargon 内置工具。"""
from typing import Any, Dict, List, Optional
import json
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from src.learners.jargon_explainer import search_jargon
from .context import BuiltinToolRuntimeContext
def get_tool_spec() -> ToolSpec:
"""获取 query_jargon 工具声明。"""
return ToolSpec(
name="query_jargon",
brief_description="查询当前聊天上下文中的黑话或词条含义。",
detailed_description="参数说明:\n- wordsarray必填。要查询的词条列表。",
parameters_schema={
"type": "object",
"properties": {
"words": {
"type": "array",
"description": "要查询的词条列表。",
"items": {"type": "string"},
},
},
"required": ["words"],
},
provider_name="maisaka_builtin",
provider_type="builtin",
)
async def handle_tool(
tool_ctx: BuiltinToolRuntimeContext,
invocation: ToolInvocation,
context: Optional[ToolExecutionContext] = None,
) -> ToolExecutionResult:
"""执行 query_jargon 内置工具。"""
del context
raw_words = invocation.arguments.get("words")
if not isinstance(raw_words, list):
return tool_ctx.build_failure_result(
invocation.tool_name,
"查询黑话工具需要提供 `words` 数组参数。",
)
words = tool_ctx.normalize_words(raw_words)
if not words:
return tool_ctx.build_failure_result(
invocation.tool_name,
"查询黑话工具至少需要一个非空词条。",
)
limit = 5
case_sensitive = False
enable_fuzzy_fallback = True
before_search_result = await tool_ctx.get_runtime_manager().invoke_hook(
"jargon.query.before_search",
words=list(words),
session_id=tool_ctx.runtime.session_id,
limit=limit,
case_sensitive=case_sensitive,
enable_fuzzy_fallback=enable_fuzzy_fallback,
abort_message="黑话查询已被 Hook 中止。",
)
if before_search_result.aborted:
abort_message = str(before_search_result.kwargs.get("abort_message") or "黑话查询已被 Hook 中止。").strip()
return tool_ctx.build_failure_result(invocation.tool_name, abort_message or "黑话查询已被 Hook 中止。")
before_search_kwargs = before_search_result.kwargs
if before_search_kwargs.get("words") is not None:
words = tool_ctx.normalize_words(before_search_kwargs.get("words"))
if not words:
return tool_ctx.build_failure_result(invocation.tool_name, "Hook 过滤后没有可查询的黑话词条。")
try:
limit = int(before_search_kwargs.get("limit", limit))
except (TypeError, ValueError):
limit = 5
limit = max(limit, 1)
case_sensitive = bool(before_search_kwargs.get("case_sensitive", case_sensitive))
enable_fuzzy_fallback = bool(before_search_kwargs.get("enable_fuzzy_fallback", enable_fuzzy_fallback))
results: List[Dict[str, object]] = []
for word in words:
exact_matches = search_jargon(
keyword=word,
chat_id=tool_ctx.runtime.session_id,
limit=limit,
case_sensitive=case_sensitive,
fuzzy=False,
)
matched_entries = exact_matches
if not matched_entries and enable_fuzzy_fallback:
matched_entries = search_jargon(
keyword=word,
chat_id=tool_ctx.runtime.session_id,
limit=limit,
case_sensitive=case_sensitive,
fuzzy=True,
)
results.append(
{
"word": word,
"found": bool(matched_entries),
"matches": matched_entries,
}
)
after_search_result = await tool_ctx.get_runtime_manager().invoke_hook(
"jargon.query.after_search",
words=list(words),
session_id=tool_ctx.runtime.session_id,
limit=limit,
case_sensitive=case_sensitive,
enable_fuzzy_fallback=enable_fuzzy_fallback,
results=list(results),
abort_message="黑话查询结果已被 Hook 中止。",
)
if after_search_result.aborted:
abort_message = str(after_search_result.kwargs.get("abort_message") or "黑话查询结果已被 Hook 中止。").strip()
return tool_ctx.build_failure_result(
invocation.tool_name,
abort_message or "黑话查询结果已被 Hook 中止。",
)
raw_results = after_search_result.kwargs.get("results")
if raw_results is not None:
results = tool_ctx.normalize_jargon_query_results(raw_results)
structured_content: Dict[str, Any] = {"results": results}
return tool_ctx.build_success_result(
invocation.tool_name,
json.dumps(structured_content, ensure_ascii=False),
structured_content=structured_content,
)

View File

@@ -0,0 +1,183 @@
"""query_person_info 内置工具。"""
from typing import Any, Dict, List, Optional
import json
from sqlmodel import col, select
from src.common.database.database import get_db_session
from src.common.database.database_model import PersonInfo
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from src.know_u.knowledge_store import get_knowledge_store
from .context import BuiltinToolRuntimeContext
def get_tool_spec(*, enabled: bool = False) -> ToolSpec:
"""获取 query_person_info 工具声明。"""
return ToolSpec(
name="query_person_info",
brief_description="查询某个人的档案和相关记忆信息。",
detailed_description=(
"参数说明:\n"
"- person_namestring必填。人物名称、昵称或用户 ID。\n"
"- limitinteger可选。最多返回多少条匹配记录默认 3。"
),
parameters_schema={
"type": "object",
"properties": {
"person_name": {
"type": "string",
"description": "人物名称、昵称或用户 ID。",
},
"limit": {
"type": "integer",
"description": "最多返回多少条匹配记录。",
"default": 3,
},
},
"required": ["person_name"],
},
provider_name="maisaka_builtin",
provider_type="builtin",
enabled=enabled,
)
async def handle_tool(
tool_ctx: BuiltinToolRuntimeContext,
invocation: ToolInvocation,
context: Optional[ToolExecutionContext] = None,
) -> ToolExecutionResult:
"""执行 query_person_info 内置工具。"""
del context
raw_person_name = invocation.arguments.get("person_name")
raw_limit = invocation.arguments.get("limit", 3)
if not isinstance(raw_person_name, str):
return tool_ctx.build_failure_result(
invocation.tool_name,
"查询人物信息工具需要提供字符串类型的 `person_name` 参数。",
)
person_name = raw_person_name.strip()
if not person_name:
return tool_ctx.build_failure_result(
invocation.tool_name,
"查询人物信息工具需要提供非空的 `person_name` 参数。",
)
try:
limit = max(1, min(int(raw_limit), 10))
except (TypeError, ValueError):
limit = 3
persons = _query_person_records(person_name, limit)
result: Dict[str, Any] = {
"query": person_name,
"persons": persons,
"related_knowledge": _query_related_knowledge(person_name, persons, limit),
}
return tool_ctx.build_success_result(
invocation.tool_name,
json.dumps(result, ensure_ascii=False),
structured_content=result,
)
def _query_person_records(person_name: str, limit: int) -> List[Dict[str, Any]]:
"""按名称、昵称或用户 ID 查询人物档案。"""
with get_db_session() as session:
records = session.exec(
select(PersonInfo)
.where(
col(PersonInfo.person_name).contains(person_name)
| col(PersonInfo.user_nickname).contains(person_name)
| col(PersonInfo.user_id).contains(person_name)
)
.order_by(col(PersonInfo.last_known_time).desc(), col(PersonInfo.id).desc())
.limit(limit)
).all()
persons: List[Dict[str, Any]] = []
for record in records:
memory_points: List[str] = []
if record.memory_points:
try:
parsed_points = json.loads(record.memory_points)
if isinstance(parsed_points, list):
memory_points = [str(point).strip() for point in parsed_points if str(point).strip()]
except (json.JSONDecodeError, TypeError, ValueError):
memory_points = []
persons.append(
{
"person_id": record.person_id,
"person_name": record.person_name or "",
"user_nickname": record.user_nickname,
"user_id": record.user_id,
"platform": record.platform,
"name_reason": record.name_reason or "",
"is_known": record.is_known,
"know_counts": record.know_counts,
"memory_points": memory_points[:20],
"last_known_time": record.last_known_time.isoformat() if record.last_known_time is not None else None,
}
)
return persons
def _query_related_knowledge(
person_name: str,
persons: List[Dict[str, Any]],
limit: int,
) -> List[Dict[str, Any]]:
"""从 Maisaka knowledge 中补充检索与该人物相关的条目。"""
store = get_knowledge_store()
knowledge_items: List[Dict[str, Any]] = []
seen_ids: set[str] = set()
for person in persons:
matched_items = store.get_knowledge_by_user(
platform=str(person.get("platform", "")).strip(),
user_id=str(person.get("user_id", "")).strip(),
user_nickname=str(person.get("user_nickname", "")).strip(),
person_name=str(person.get("person_name", "")).strip(),
limit=max(limit, 5),
)
for item in matched_items:
item_id = str(item.get("id", "")).strip()
if item_id and item_id in seen_ids:
continue
if item_id:
seen_ids.add(item_id)
knowledge_items.append(item)
if not knowledge_items:
fallback_items = store.search_knowledge(person_name, limit=max(limit, 5))
for item in fallback_items:
item_id = str(item.get("id", "")).strip()
if item_id and item_id in seen_ids:
continue
if item_id:
seen_ids.add(item_id)
knowledge_items.append(item)
results: List[Dict[str, Any]] = []
for item in knowledge_items:
results.append(
{
"id": str(item.get("id", "")).strip(),
"category_id": str(item.get("category_id", "")).strip(),
"category_name": str(item.get("category_name", "")).strip(),
"content": str(item.get("content", "")).strip(),
"metadata": item.get("metadata", {}),
"created_at": item.get("created_at"),
}
)
return results

View File

@@ -0,0 +1,182 @@
"""reply 内置工具。"""
from typing import Optional
from src.chat.replyer.replyer_manager import replyer_manager
from src.common.logger import get_logger
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from src.services import send_service
from .context import BuiltinToolRuntimeContext
logger = get_logger("maisaka_builtin_reply")
def get_tool_spec() -> ToolSpec:
"""获取 reply 工具声明。"""
return ToolSpec(
name="reply",
brief_description="根据当前思考生成并发送一条可见回复。",
detailed_description=(
"参数说明:\n"
"- msg_idstring必填。要回复的目标用户消息编号。\n"
"- quoteboolean可选。当有非常明确的回复目标时以引用回复的方式发送默认 true。\n"
"- unknown_wordsarray可选。回复前可能需要查询的黑话或词条列表。"
),
parameters_schema={
"type": "object",
"properties": {
"msg_id": {
"type": "string",
"description": "要回复的目标用户消息编号。",
},
"quote": {
"type": "boolean",
"description": "当有非常明确的回复目标时,以引用回复的方式发送。",
"default": True,
},
"unknown_words": {
"type": "array",
"description": "回复前可能需要查询的黑话或词条列表。",
"items": {"type": "string"},
},
},
"required": ["msg_id"],
},
provider_name="maisaka_builtin",
provider_type="builtin",
)
async def handle_tool(
tool_ctx: BuiltinToolRuntimeContext,
invocation: ToolInvocation,
context: Optional[ToolExecutionContext] = None,
) -> ToolExecutionResult:
"""执行 reply 内置工具。"""
latest_thought = context.reasoning if context is not None else invocation.reasoning
target_message_id = str(invocation.arguments.get("msg_id") or "").strip()
quote_reply = bool(invocation.arguments.get("quote", True))
raw_unknown_words = invocation.arguments.get("unknown_words")
unknown_words = raw_unknown_words if isinstance(raw_unknown_words, list) else None
if not target_message_id:
return tool_ctx.build_failure_result(
invocation.tool_name,
"回复工具需要提供有效的 `msg_id` 参数。",
)
target_message = tool_ctx.runtime._source_messages_by_id.get(target_message_id)
if target_message is None:
return tool_ctx.build_failure_result(
invocation.tool_name,
f"未找到要回复的目标消息msg_id={target_message_id}",
)
logger.info(
f"{tool_ctx.runtime.log_prefix} 已触发回复工具 "
f"目标消息编号={target_message_id} 引用回复={quote_reply} 最新思考={latest_thought!r}"
)
try:
replyer = replyer_manager.get_replyer(
chat_stream=tool_ctx.runtime.chat_stream,
request_type="maisaka_replyer",
replyer_type="maisaka",
)
except Exception:
logger.exception(
f"{tool_ctx.runtime.log_prefix} 获取回复生成器时发生异常: 目标消息编号={target_message_id}"
)
return tool_ctx.build_failure_result(
invocation.tool_name,
"获取 Maisaka 回复生成器时发生异常。",
)
if replyer is None:
logger.error(f"{tool_ctx.runtime.log_prefix} 获取 Maisaka 回复生成器失败")
return tool_ctx.build_failure_result(
invocation.tool_name,
"Maisaka 回复生成器当前不可用。",
)
try:
success, reply_result = await replyer.generate_reply_with_context(
reply_reason=latest_thought,
stream_id=tool_ctx.runtime.session_id,
reply_message=target_message,
chat_history=tool_ctx.runtime._chat_history,
unknown_words=unknown_words,
log_reply=False,
)
except Exception as exc:
logger.exception(
f"{tool_ctx.runtime.log_prefix} 回复生成器执行异常: 目标消息编号={target_message_id} 异常={exc}"
)
return tool_ctx.build_failure_result(
invocation.tool_name,
"生成可见回复时发生异常。",
)
reply_text = reply_result.completion.response_text.strip() if success else ""
if not reply_text:
logger.warning(
f"{tool_ctx.runtime.log_prefix} 回复生成器返回空文本: "
f"目标消息编号={target_message_id} 错误信息={reply_result.error_message!r}"
)
return tool_ctx.build_failure_result(
invocation.tool_name,
"生成可见回复失败。",
)
reply_segments = tool_ctx.post_process_reply_text(reply_text)
combined_reply_text = "".join(reply_segments)
try:
sent = False
for index, segment in enumerate(reply_segments):
sent = await send_service.text_to_stream(
text=segment,
stream_id=tool_ctx.runtime.session_id,
set_reply=quote_reply if index == 0 else False,
reply_message=target_message if quote_reply and index == 0 else None,
selected_expressions=reply_result.selected_expression_ids or None,
typing=index > 0,
)
if not sent:
break
except Exception:
logger.exception(
f"{tool_ctx.runtime.log_prefix} 发送文字消息时发生异常,目标消息编号={target_message_id}"
)
return tool_ctx.build_failure_result(
invocation.tool_name,
"发送可见回复时发生异常。",
)
if not sent:
return tool_ctx.build_failure_result(
invocation.tool_name,
"可见回复生成成功,但发送失败。",
structured_content={
"msg_id": target_message_id,
"quote": quote_reply,
"reply_segments": reply_segments,
},
)
target_user_info = target_message.message_info.user_info
target_user_name = target_user_info.user_cardname or target_user_info.user_nickname or target_user_info.user_id
tool_ctx.append_guided_reply_to_chat_history(combined_reply_text)
return tool_ctx.build_success_result(
invocation.tool_name,
"回复已生成并发送。",
structured_content={
"msg_id": target_message_id,
"quote": quote_reply,
"reply_text": combined_reply_text,
"reply_segments": reply_segments,
"target_user_name": target_user_name,
},
)

View File

@@ -0,0 +1,106 @@
"""send_emoji 内置工具。"""
from typing import Any, Dict, Optional
from src.chat.emoji_system.maisaka_tool import send_emoji_for_maisaka
from src.common.logger import get_logger
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from .context import BuiltinToolRuntimeContext
logger = get_logger("maisaka_builtin_send_emoji")
def get_tool_spec() -> ToolSpec:
"""获取 send_emoji 工具声明。"""
return ToolSpec(
name="send_emoji",
brief_description="发送一个合适的表情包来辅助表达情绪。",
detailed_description="参数说明:\n- emotionstring可选。希望表达的情绪例如 happy、sad、angry 等。",
parameters_schema={
"type": "object",
"properties": {
"emotion": {
"type": "string",
"description": "希望表达的情绪,例如 happy、sad、angry 等。",
},
},
},
provider_name="maisaka_builtin",
provider_type="builtin",
)
async def handle_tool(
tool_ctx: BuiltinToolRuntimeContext,
invocation: ToolInvocation,
context: Optional[ToolExecutionContext] = None,
) -> ToolExecutionResult:
"""执行 send_emoji 内置工具。"""
del context
emotion = str(invocation.arguments.get("emotion") or "").strip()
context_texts = [
message.get_history_text()
for message in tool_ctx.runtime._chat_history[-5:]
if message.get_history_text().strip()
]
structured_result: Dict[str, Any] = {
"success": False,
"message": "",
"description": "",
"emotion": [],
"requested_emotion": emotion,
"matched_emotion": "",
}
logger.info(f"{tool_ctx.runtime.log_prefix} 触发表情包发送工具,请求情绪={emotion!r}")
try:
send_result = await send_emoji_for_maisaka(
stream_id=tool_ctx.runtime.session_id,
requested_emotion=emotion,
reasoning=tool_ctx.engine.last_reasoning_content,
context_texts=context_texts,
)
except Exception as exc:
logger.exception(f"{tool_ctx.runtime.log_prefix} 发送表情包时发生异常: {exc}")
structured_result["message"] = f"发送表情包时发生异常:{exc}"
return tool_ctx.build_failure_result(
invocation.tool_name,
structured_result["message"],
structured_content=structured_result,
)
structured_result["description"] = send_result.description
structured_result["emotion"] = list(send_result.emotions)
structured_result["matched_emotion"] = send_result.matched_emotion
structured_result["message"] = send_result.message
if send_result.success:
logger.info(
f"{tool_ctx.runtime.log_prefix} 表情包发送成功 "
f"描述={send_result.description!r} 情绪标签={send_result.emotions} "
f"请求情绪={emotion!r} 命中情绪={send_result.matched_emotion!r}"
)
tool_ctx.append_sent_emoji_to_chat_history(
emoji_base64=send_result.emoji_base64,
success_message=send_result.message,
)
structured_result["success"] = True
return tool_ctx.build_success_result(
invocation.tool_name,
send_result.message,
structured_content=structured_result,
)
logger.warning(
f"{tool_ctx.runtime.log_prefix} 表情包发送失败 "
f"请求情绪={emotion!r} 错误信息={send_result.message}"
)
return tool_ctx.build_failure_result(
invocation.tool_name,
structured_result["message"],
structured_content=structured_result,
)

View File

@@ -0,0 +1,51 @@
"""wait 内置工具。"""
from typing import Optional
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from .context import BuiltinToolRuntimeContext
def get_tool_spec() -> ToolSpec:
"""获取 wait 工具声明。"""
return ToolSpec(
name="wait",
brief_description="暂停当前对话并等待用户新的输入。",
detailed_description="参数说明:\n- secondsinteger必填。等待的秒数。",
parameters_schema={
"type": "object",
"properties": {
"seconds": {
"type": "integer",
"description": "等待的秒数。",
},
},
"required": ["seconds"],
},
provider_name="maisaka_builtin",
provider_type="builtin",
)
async def handle_tool(
tool_ctx: BuiltinToolRuntimeContext,
invocation: ToolInvocation,
context: Optional[ToolExecutionContext] = None,
) -> ToolExecutionResult:
"""执行 wait 内置工具。"""
del context
seconds = invocation.arguments.get("seconds", 30)
try:
wait_seconds = int(seconds)
except (TypeError, ValueError):
wait_seconds = 30
wait_seconds = max(0, wait_seconds)
tool_ctx.runtime._enter_wait_state(seconds=wait_seconds, tool_call_id=invocation.call_id)
return tool_ctx.build_success_result(
invocation.tool_name,
f"当前对话循环进入等待状态,最长等待 {wait_seconds} 秒。",
metadata={"pause_execution": True},
)

View File

@@ -1,159 +0,0 @@
"""Maisaka 内置工具声明。"""
from copy import deepcopy
from typing import Any, Dict, List
from src.core.tooling import ToolSpec, build_tool_detailed_description
from src.llm_models.payload_content.tool_option import ToolDefinitionInput
def _build_tool_spec(
name: str,
brief_description: str,
parameters_schema: Dict[str, Any] | None = None,
detailed_description: str = "",
) -> ToolSpec:
"""构建单个内置工具声明。
Args:
name: 工具名称。
brief_description: 简要描述。
parameters_schema: 参数 Schema。
detailed_description: 详细描述;为空时自动根据参数生成。
Returns:
ToolSpec: 构建完成的工具声明。
"""
normalized_schema = deepcopy(parameters_schema) if parameters_schema is not None else None
return ToolSpec(
name=name,
brief_description=brief_description,
detailed_description=(
detailed_description.strip()
or build_tool_detailed_description(normalized_schema)
),
parameters_schema=normalized_schema,
provider_name="maisaka_builtin",
provider_type="builtin",
)
def create_builtin_tool_specs() -> List[ToolSpec]:
"""创建 Maisaka 内置工具声明列表。
Returns:
List[ToolSpec]: 内置工具声明列表。
"""
return [
_build_tool_spec(
name="wait",
brief_description="暂停当前对话并等待用户新的输入。",
parameters_schema={
"type": "object",
"properties": {
"seconds": {
"type": "integer",
"description": "等待的秒数。",
},
},
"required": ["seconds"],
},
),
_build_tool_spec(
name="reply",
brief_description="根据当前思考生成并发送一条可见回复。",
parameters_schema={
"type": "object",
"properties": {
"msg_id": {
"type": "string",
"description": "要回复的目标用户消息编号。",
},
"quote": {
"type": "boolean",
"description": "当有非常明确的回复目标时,以引用回复的方式发送。",
"default": True,
},
"unknown_words": {
"type": "array",
"description": "回复前可能需要查询的黑话或词条列表。",
"items": {"type": "string"},
},
},
"required": ["msg_id"],
},
),
_build_tool_spec(
name="query_jargon",
brief_description="查询当前聊天上下文中的黑话或词条含义。",
parameters_schema={
"type": "object",
"properties": {
"words": {
"type": "array",
"description": "要查询的词条列表。",
"items": {"type": "string"},
},
},
"required": ["words"],
},
),
# _build_tool_spec(
# name="query_person_info",
# brief_description="查询某个人的档案和相关记忆信息。",
# parameters_schema={
# "type": "object",
# "properties": {
# "person_name": {
# "type": "string",
# "description": "人物名称、昵称或用户 ID。",
# },
# "limit": {
# "type": "integer",
# "description": "最多返回多少条匹配记录。",
# "default": 3,
# },
# },
# "required": ["person_name"],
# },
# ),
_build_tool_spec(
name="no_reply",
brief_description="本轮不进行回复,等待其他用户的新消息。",
),
_build_tool_spec(
name="send_emoji",
brief_description="发送一个合适的表情包来辅助表达情绪。",
parameters_schema={
"type": "object",
"properties": {
"emotion": {
"type": "string",
"description": "希望表达的情绪,例如 happy、sad、angry 等。",
},
},
},
),
]
def get_builtin_tool_specs() -> List[ToolSpec]:
"""获取 Maisaka 内置工具声明。
Returns:
List[ToolSpec]: 内置工具声明列表。
"""
return create_builtin_tool_specs()
def get_builtin_tools() -> List[ToolDefinitionInput]:
"""获取兼容旧模型层的内置工具定义。
Returns:
List[ToolDefinitionInput]: 可直接传给模型层的工具定义。
"""
return [tool_spec.to_llm_definition() for tool_spec in create_builtin_tool_specs()]

View File

@@ -37,7 +37,7 @@ from src.plugin_runtime.hook_schema_utils import build_object_schema
from src.plugin_runtime.host.hook_spec_registry import HookSpec, HookSpecRegistry
from src.services.llm_service import LLMServiceClient
from .builtin_tools import get_builtin_tools
from .builtin_tool import get_builtin_tools
from .context_messages import AssistantMessage, LLMContextMessage, SessionBackedMessage, ToolResultMessage
from .message_adapter import format_speaker_content
from .prompt_cli_renderer import PromptCLIVisualizer
@@ -290,14 +290,7 @@ class MaisakaChatLoopService:
Args:
tools_section: 额外注入到提示词中的工具说明片段。
"""
if self._prompts_loaded:
return
async with self._prompt_load_lock:
if self._prompts_loaded:
return
try:
self._chat_system_prompt = load_prompt(
"maisaka_chat",
@@ -317,29 +310,29 @@ class MaisakaChatLoopService:
prompt_lines: List[str] = []
if self._is_group_chat is True:
if group_chat_prompt := str(global_config.experimental.group_chat_prompt or "").strip():
prompt_lines.append(group_chat_prompt)
if group_chat_prompt := str(global_config.chat.group_chat_prompt or "").strip():
prompt_lines.append(f"通用注意事项:\n{group_chat_prompt}")
elif self._is_group_chat is False:
if private_chat_prompt := str(global_config.experimental.private_chat_prompts or "").strip():
prompt_lines.append(private_chat_prompt)
if private_chat_prompt := str(global_config.chat.private_chat_prompts or "").strip():
prompt_lines.append(f"通用注意事项:\n{private_chat_prompt}")
if self._session_id:
if chat_prompt := self._get_chat_prompt_for_chat(self._session_id, self._is_group_chat).strip():
prompt_lines.append(chat_prompt)
prompt_lines.append(f"当前聊天额外注意事项:\n{chat_prompt}")
if not prompt_lines:
return ""
return f"在该聊天中的注意事项:\n" + "\n".join(prompt_lines) + "\n"
return f"在该聊天中的注意事项:\n" + "\n\n".join(prompt_lines) + "\n"
@staticmethod
def _get_chat_prompt_for_chat(chat_id: str, is_group_chat: Optional[bool]) -> str:
"""根据聊天流 ID 获取匹配的额外提示。"""
if not global_config.experimental.chat_prompts:
if not global_config.chat.chat_prompts:
return ""
for chat_prompt_item in global_config.experimental.chat_prompts:
for chat_prompt_item in global_config.chat.chat_prompts:
if hasattr(chat_prompt_item, "platform"):
platform = str(chat_prompt_item.platform or "").strip()
item_id = str(chat_prompt_item.item_id or "").strip()

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,7 @@ from typing import Dict, Optional
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolProvider, ToolSpec
from .builtin_tools import get_builtin_tool_specs
from .builtin_tool import get_builtin_tool_specs
BuiltinToolHandler = Callable[[ToolInvocation, Optional[ToolExecutionContext]], Awaitable[ToolExecutionResult]]

View File

@@ -24,10 +24,8 @@ from src.config.official_configs import (
ChineseTypoConfig,
DebugConfig,
EmojiConfig,
ExperimentalConfig,
ExpressionConfig,
KeywordReactionConfig,
LPMMKnowledgeConfig,
MaimMessageConfig,
MemoryConfig,
MessageReceiveConfig,
@@ -109,9 +107,7 @@ async def get_config_section_schema(section_name: str):
- response_post_process: ResponsePostProcessConfig
- response_splitter: ResponseSplitterConfig
- telemetry: TelemetryConfig
- experimental: ExperimentalConfig
- maim_message: MaimMessageConfig
- lpmm_knowledge: LPMMKnowledgeConfig
- memory: MemoryConfig
- debug: DebugConfig
- voice: VoiceConfig
@@ -133,9 +129,7 @@ async def get_config_section_schema(section_name: str):
"response_post_process": ResponsePostProcessConfig,
"response_splitter": ResponseSplitterConfig,
"telemetry": TelemetryConfig,
"experimental": ExperimentalConfig,
"maim_message": MaimMessageConfig,
"lpmm_knowledge": LPMMKnowledgeConfig,
"memory": MemoryConfig,
"debug": DebugConfig,
"voice": VoiceConfig,