ref:重构maisaka内置工具逻辑,拆分文件

This commit is contained in:
SengokuCola
2026-04-03 14:51:05 +08:00
parent 6e6aa0b13a
commit 6c720e0403
19 changed files with 1075 additions and 2384 deletions

View File

@@ -21,7 +21,6 @@ from .official_configs import (
DatabaseConfig,
DebugConfig,
EmojiConfig,
ExperimentalConfig,
ExpressionConfig,
KeywordReactionConfig,
LPMMKnowledgeConfig,
@@ -56,7 +55,7 @@ CONFIG_DIR: Path = PROJECT_ROOT / "config"
BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0"
CONFIG_VERSION: str = "8.2.1"
CONFIG_VERSION: str = "8.3.0"
MODEL_CONFIG_VERSION: str = "1.13.1"
logger = get_logger("config")
@@ -113,13 +112,10 @@ class Config(ConfigBase):
debug: DebugConfig = Field(default_factory=DebugConfig)
"""调试配置类"""
experimental: ExperimentalConfig = Field(default_factory=ExperimentalConfig)
"""实验性功能配置类"""
maim_message: MaimMessageConfig = Field(default_factory=MaimMessageConfig)
"""maim_message配置类"""
lpmm_knowledge: LPMMKnowledgeConfig = Field(default_factory=LPMMKnowledgeConfig)
lpmm_knowledge: LPMMKnowledgeConfig = Field(default_factory=LPMMKnowledgeConfig, repr=False)
"""LPMM知识库配置类"""
webui: WebUIConfig = Field(default_factory=WebUIConfig)

View File

@@ -253,11 +253,23 @@ def try_migrate_legacy_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
migrated_any = True
reasons.append("expression.manual_reflect_operator_id")
chat = _as_dict(data.get("chat"))
if chat is None:
chat = {}
data["chat"] = chat
mem = _as_dict(data.get("memory"))
if mem is not None:
if _migrate_target_item_list(mem, "global_memory_blacklist"):
migrated_any = True
reasons.append("memory.global_memory_blacklist")
for removed_key in (
"agent_timeout_seconds",
"global_memory",
"global_memory_blacklist",
"max_agent_iterations",
):
if removed_key in mem:
mem.pop(removed_key, None)
migrated_any = True
reasons.append(f"memory.{removed_key}_removed")
exp = _as_dict(data.get("experimental"))
if exp is not None:
@@ -265,7 +277,16 @@ def try_migrate_legacy_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
migrated_any = True
reasons.append("experimental.chat_prompts")
chat = _as_dict(data.get("chat"))
for key in ("private_plan_style", "group_chat_prompt", "private_chat_prompts", "chat_prompts"):
if key in exp and key not in chat:
chat[key] = exp[key]
migrated_any = True
reasons.append(f"experimental.{key}_moved_to_chat")
data.pop("experimental", None)
migrated_any = True
reasons.append("experimental_removed")
if chat is not None and "think_mode" in chat:
chat.pop("think_mode", None)
migrated_any = True

View File

@@ -244,15 +244,45 @@ class ChatConfig(ConfigBase):
},
)
"""每个聊天流最大保存的Plan/Reply日志数量超过此数量时会自动删除最老的日志"""
llm_quote: bool = Field(
default=False,
private_plan_style: str = Field(
default=(
"1.思考**所有**的可用的action中的**每个动作**是否符合当下条件,如果动作使用条件符合聊天内容就使用\n"
"2.如果相同的内容已经被执行,请不要重复执行\n"
"3.某句话如果已经被回复过,不要重复回复"
),
json_schema_extra={
"x-widget": "switch",
"x-icon": "quote",
"x-widget": "textarea",
"x-icon": "user",
},
)
"""是否在 reply action 中启用 quote 参数,启用后 LLM 可以控制是否引用消息"""
"""_wrap_私聊说话规则行为风格"""
group_chat_prompt: str = Field(
default="不要回复的太频繁!控制回复的频率,不要每个人的消息都回复,只回复你感兴趣的或者主动提及你的。",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "users",
},
)
"""_wrap_群聊通用注意事项"""
private_chat_prompts: str = Field(
default="",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "user",
},
)
"""_wrap_私聊通用注意事项"""
chat_prompts: list["ExtraPromptItem"] = Field(
default_factory=lambda: [],
json_schema_extra={
"x-widget": "custom",
"x-icon": "list",
},
)
"""_wrap_为指定聊天添加额外的 prompt 配置列表"""
enable_talk_value_rules: bool = Field(
default=True,
@@ -356,43 +386,6 @@ class MemoryConfig(ConfigBase):
__ui_parent__ = "emoji"
max_agent_iterations: int = Field(
default=5,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "layers",
},
)
"""记忆思考深度最低为1"""
agent_timeout_seconds: float = Field(
default=120.0,
json_schema_extra={
"x-widget": "input",
"x-icon": "clock",
},
)
"""最长回忆时间(秒)"""
global_memory: bool = Field(
default=False,
json_schema_extra={
"x-widget": "switch",
"x-icon": "globe",
},
)
"""是否允许记忆检索在聊天记录中进行全局查询忽略当前chat_id仅对 search_chat_history 等工具生效)"""
global_memory_blacklist: list[TargetItem] = Field(
default_factory=lambda: [],
json_schema_extra={
"x-widget": "custom",
"x-icon": "shield-off",
},
)
"""_wrap_全局记忆黑名单当启用全局记忆时不将特定聊天流纳入检索"""
chat_history_topic_check_message_threshold: int = Field(
default=80,
ge=1,
@@ -444,10 +437,6 @@ class MemoryConfig(ConfigBase):
def model_post_init(self, context: Optional[dict] = None) -> None:
"""验证配置值"""
if self.max_agent_iterations < 1:
raise ValueError(f"max_agent_iterations 必须至少为1当前值: {self.max_agent_iterations}")
if self.agent_timeout_seconds <= 0:
raise ValueError(f"agent_timeout_seconds 必须大于0当前值: {self.agent_timeout_seconds}")
if self.chat_history_topic_check_message_threshold < 1:
raise ValueError(
f"chat_history_topic_check_message_threshold 必须至少为1当前值: {self.chat_history_topic_check_message_threshold}"
@@ -1052,57 +1041,13 @@ class ExtraPromptItem(ConfigBase):
"""额外的prompt内容"""
def model_post_init(self, context: Optional[dict] = None) -> None:
if not self.platform and not self.item_id and not self.prompt:
return super().model_post_init(context)
if not self.platform or not self.item_id or not self.prompt:
raise ValueError("ExtraPromptItem 中 platform, id 和 prompt 不能为空")
return super().model_post_init(context)
class ExperimentalConfig(ConfigBase):
"""实验功能配置类"""
__ui_parent__ = "debug"
private_plan_style: str = Field(
default=(
"1.思考**所有**的可用的action中的**每个动作**是否符合当下条件,如果动作使用条件符合聊天内容就使用"
"2.如果相同的内容已经被执行,请不要重复执行"
"3.某句话如果已经被回复过,不要重复回复"
),
json_schema_extra={
"x-widget": "textarea",
"x-icon": "user",
},
)
"""_wrap_私聊说话规则行为风格实验性功能"""
group_chat_prompt: str = Field(
default="",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "users",
},
)
"""_wrap_群聊通用注意事项实验性功能"""
private_chat_prompts: str = Field(
default="",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "user",
},
)
"""_wrap_私聊通用注意事项实验性功能"""
chat_prompts: list[ExtraPromptItem] = Field(
default_factory=lambda: [],
json_schema_extra={
"x-widget": "custom",
"x-icon": "list",
},
)
"""_wrap_为指定聊天添加额外的prompt配置列表"""
class MaimMessageConfig(ConfigBase):
"""maim_message配置类"""
@@ -1482,16 +1427,6 @@ class MaiSakaConfig(ConfigBase):
},
)
"""启用知识库模块"""
show_analyze_cognition_prompt: bool = Field(
default=False,
json_schema_extra={
"x-widget": "switch",
"x-icon": "terminal",
},
)
"""是否在 CLI 中显示 analyze_cognition 的 Prompt"""
show_thinking: bool = Field(
default=True,
json_schema_extra={