perf:修改默认配置

This commit is contained in:
SengokuCola
2026-05-07 14:28:35 +08:00
parent d3cd5a4afc
commit fd51632da8
12 changed files with 218 additions and 14 deletions

View File

@@ -15,7 +15,7 @@
1. 如果 {bot_name} 已经回复,但用户暂时没有新的回复,且没有新信息需要搜集,使用 wait 或者 no_reply 进行等待。
2. 如果用户有新发言,但是你评估用户还有后续发言尚未发送,可以适当等待让用户说完。
3. 你需要先评估是用户之间在互动还是和{bot_name}在互动,不要盲目插话,弄错回复对象
4. 你需要评估哪些话是对{bot_name}的发言,哪些是用户之间的交流或者自言自语,不要频繁插入无关的话题
4. 你需要评估哪些话是对{bot_name}的发言,哪些是用户之间的交流或者自言自语,根据情况适当发言
5. 在特定情况下也可以连续回复,例如想要追问,或者补充自己先前的发言,这时应调用 continue让主流程继续执行。
6. 如果你判断现在需要真正回复、查询信息、查看上下文或做进一步分析,不要在这里完成,直接调用 continue把工作交给主流程。

View File

@@ -19,7 +19,7 @@ dependencies = [
"jieba>=0.42.1",
"json-repair>=0.47.6",
"maim-message>=0.6.2",
"maibot-dashboard>=1.0.6",
"maibot-dashboard>=1.0.7",
"maibot-plugin-sdk>=2.4.0",
"matplotlib>=3.10.5",
"mcp",

View File

@@ -33,4 +33,4 @@ tomlkit>=0.13.3
typing-extensions
uvicorn>=0.35.0
watchfiles>=1.1.1
maibot-dashboard>=1.0.6
maibot-dashboard>=1.0.7

View File

@@ -10,6 +10,7 @@ import tomlkit
from .config_base import AttributeData, ConfigBase, Field
from .config_utils import compare_versions, output_config_changes, recursive_parse_item_to_table
from .config_upgrade_hooks import apply_config_upgrade_hooks
from .default_model_config import create_default_model_config
from .file_watcher import FileChange, FileWatcher
from .legacy_migration import migrate_legacy_bind_env_to_bot_config_dict, try_migrate_legacy_bot_config_dict
@@ -55,9 +56,9 @@ BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0-pre.13"
CONFIG_VERSION: str = "8.10.10"
MODEL_CONFIG_VERSION: str = "1.15.3"
MMC_VERSION: str = "1.0.0-pre.14"
CONFIG_VERSION: str = "8.10.11"
MODEL_CONFIG_VERSION: str = "1.16.0"
logger = get_logger("config")
@@ -547,6 +548,7 @@ def load_config_from_file(
old_ver: str = inner_version
env_migration_applied: bool = False
a_memorix_migration_applied: bool = False
upgrade_hook_applied: bool = False
config_data.remove("inner") # 移除 inner 部分,避免干扰后续处理
config_data = config_data.unwrap() # 转换为普通字典,方便后续处理
if config_path.name == "bot_config.toml" and config_class.__name__ == "Config":
@@ -561,6 +563,11 @@ def load_config_from_file(
config_data = legacy_migration.data
config_data, a_memorix_migration_applied = _migrate_legacy_a_memorix_config(config_data)
config_data = _normalize_loaded_bot_config_dict(config_data)
hook_result = apply_config_upgrade_hooks(config_data, config_path.name, old_ver, new_ver)
upgrade_hook_applied = hook_result.migrated
if hook_result.migrated:
logger.warning(f"检测到配置升级钩子变更,已应用: {hook_result.reason}")
config_data = hook_result.data
# 保留一份“干净”的原始数据副本,避免第一次 from_dict 过程中对 dict 的就地修改
original_data: dict[str, Any] = copy.deepcopy(config_data)
try:
@@ -580,7 +587,7 @@ def load_config_from_file(
raise e
else:
raise e
if compare_versions(old_ver, new_ver) or env_migration_applied or a_memorix_migration_applied:
if compare_versions(old_ver, new_ver) or env_migration_applied or a_memorix_migration_applied or upgrade_hook_applied:
output_config_changes(attribute_data, logger, old_ver, new_ver, config_path.name)
write_config_to_file(target_config, config_path, new_ver, override_repr)
if env_migration_applied:

View File

@@ -0,0 +1,99 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable
from .official_configs import ChatConfig
ConfigUpgradeHookCallable = Callable[[dict[str, Any]], list[str]]
@dataclass(frozen=True)
class ConfigUpgradeHook:
"""配置升级钩子,在跨过指定版本时执行一次。"""
target_version: str
config_names: tuple[str, ...]
migrate: ConfigUpgradeHookCallable
@dataclass
class ConfigUpgradeHookResult:
data: dict[str, Any]
migrated: bool
reason: str = ""
def _parse_version(version: str) -> tuple[int, ...]:
return tuple(int(part) for part in version.split("."))
def _version_in_upgrade_range(old_ver: str, target_ver: str, new_ver: str) -> bool:
old_parts = _parse_version(old_ver)
target_parts = _parse_version(target_ver)
new_parts = _parse_version(new_ver)
return old_parts < target_parts <= new_parts
def set_nested_config_value(data: dict[str, Any], path: tuple[str, ...], value: Any, force: bool = True) -> bool:
"""设置嵌套配置值,返回是否实际发生变化。"""
if not path:
return False
current: dict[str, Any] = data
for key in path[:-1]:
next_value = current.get(key)
if not isinstance(next_value, dict):
next_value = {}
current[key] = next_value
current = next_value
leaf_key = path[-1]
if not force and leaf_key in current:
return False
if current.get(leaf_key) == value:
return False
current[leaf_key] = value
return True
def _reset_group_chat_prompt_to_default(data: dict[str, Any]) -> list[str]:
default_group_chat_prompt = ChatConfig().group_chat_prompt
changed = set_nested_config_value(data, ("chat", "group_chat_prompt"), default_group_chat_prompt)
return ["chat.group_chat_prompt"] if changed else []
BOT_CONFIG_UPGRADE_HOOKS: tuple[ConfigUpgradeHook, ...] = (
ConfigUpgradeHook(
target_version="8.10.11",
config_names=("bot_config.toml",),
migrate=_reset_group_chat_prompt_to_default,
),
)
MODEL_CONFIG_UPGRADE_HOOKS: tuple[ConfigUpgradeHook, ...] = ()
def apply_config_upgrade_hooks(
data: dict[str, Any],
config_name: str,
old_ver: str,
new_ver: str,
) -> ConfigUpgradeHookResult:
migrated_reasons: list[str] = []
hooks = BOT_CONFIG_UPGRADE_HOOKS + MODEL_CONFIG_UPGRADE_HOOKS
for hook in hooks:
if config_name not in hook.config_names:
continue
if not _version_in_upgrade_range(old_ver, hook.target_version, new_ver):
continue
hook_reasons = hook.migrate(data)
for reason in hook_reasons:
migrated_reasons.append(f"{hook.target_version}:{reason}")
reason = ",".join(migrated_reasons)
return ConfigUpgradeHookResult(data=data, migrated=bool(migrated_reasons), reason=reason)

View File

@@ -459,6 +459,16 @@ class ModelTaskConfig(ConfigBase):
)
"""学习模型配置,用于表达方式学习和黑话学习;留空时自动继用 utils 模型"""
emoji: TaskConfig = Field(
default_factory=TaskConfig,
json_schema_extra={
"x-widget": "custom",
"x-icon": "smile",
"advanced": True,
},
)
"""表情包发送模型配置;留空时保持原有 planner/vlm 选择逻辑"""
vlm: TaskConfig = Field(
default_factory=TaskConfig,
json_schema_extra={

View File

@@ -425,8 +425,8 @@ class ChatConfig(ConfigBase):
default=(
"你正在qq群里聊天下面是群里正在聊的内容其中包含聊天记录和聊天中的图片和表情包。\n"
"回复尽量简短一些。最好一次对一个话题进行回复,但必须考虑不同群友发言之间的交互,免得啰嗦或者回复内容太乱。请注意把握聊天内容。\n"
"不要总是提及自己的身份背景,根据聊天内容自由发挥,但是要日常不浮夸,不要太关注具体的聊天内容,不要刻意找话题,。\n"
"要回复的太频繁!不用刻意回复表情包,只要关注表情包表达的含义。控制回复的频率,不要每个人的消息都回复,回复你感兴趣的或者主动提及你的。\n"
"不要总是提及自己的身份背景,根据聊天内容自由发挥,但是要日常不浮夸,不要刻意找话题,。\n"
"不用刻意回复其他人发送的表情包,只要关注表情包表达的含义。你可以适当发送表情包表达情绪。控制回复的频率,不要每个人的消息都回复,优先回复你感兴趣的或者主动提及你的,适当回复其他话题\n"
),
json_schema_extra={
"label": {

View File

@@ -12,7 +12,7 @@ from src.services.llm_service import LLMServiceClient
logger = get_logger("expression_utils")
judge_llm = LLMServiceClient(task_name="replyer", request_type="expression_check")
judge_llm = LLMServiceClient(task_name="learner", request_type="expression_check")
def _normalize_repair_json_result(repaired_result: Any) -> str:

View File

@@ -284,6 +284,15 @@ def _resolve_emoji_selector_model_task_name() -> str:
"""根据 planner 模型视觉能力选择表情选择子代理的模型任务。"""
model_config = config_manager.get_model_config()
emoji_task_config = getattr(model_config.model_task_config, "emoji", None)
emoji_models = [
model_name
for model_name in getattr(emoji_task_config, "model_list", [])
if str(model_name).strip()
]
if emoji_models:
return "emoji"
planner_models = [
model_name
for model_name in model_config.model_task_config.planner.model_list

View File

@@ -204,6 +204,9 @@ def detect_package_runner() -> PackageRunner:
if " pip " in f" {parent_command} " or executable.endswith("pip.exe") or executable.endswith("/pip"):
return "pip"
if sys.prefix != sys.base_prefix or os.getenv("VIRTUAL_ENV"):
return "pip"
return "unknown"

View File

@@ -0,0 +1,76 @@
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
from src.config.config_upgrade_hooks import (
BOT_CONFIG_UPGRADE_HOOKS,
ConfigUpgradeHook,
apply_config_upgrade_hooks,
set_nested_config_value,
)
from src.config.official_configs import ChatConfig
import src.config.config_upgrade_hooks as hooks
def test_apply_config_upgrade_hooks_runs_when_target_version_is_crossed(monkeypatch):
def migrate(data):
changed = set_nested_config_value(data, ("chat", "enable"), False)
return ["chat.enable"] if changed else []
monkeypatch.setattr(
hooks,
"BOT_CONFIG_UPGRADE_HOOKS",
(ConfigUpgradeHook(target_version="8.10.11", config_names=("bot_config.toml",), migrate=migrate),),
)
data = {"chat": {"enable": True}}
result = apply_config_upgrade_hooks(data, "bot_config.toml", "8.10.10", "8.10.11")
assert result.migrated is True
assert result.reason == "8.10.11:chat.enable"
assert result.data["chat"]["enable"] is False
def test_apply_config_upgrade_hooks_skips_versions_outside_upgrade_range(monkeypatch):
def migrate(data):
set_nested_config_value(data, ("chat", "enable"), False)
return ["chat.enable"]
monkeypatch.setattr(
hooks,
"BOT_CONFIG_UPGRADE_HOOKS",
(ConfigUpgradeHook(target_version="8.10.11", config_names=("bot_config.toml",), migrate=migrate),),
)
data = {"chat": {"enable": True}}
result = apply_config_upgrade_hooks(data, "bot_config.toml", "8.10.11", "8.10.12")
assert result.migrated is False
assert result.data["chat"]["enable"] is True
def test_set_nested_config_value_can_keep_existing_value():
data = {"webui": {"port": 8001}}
changed = set_nested_config_value(data, ("webui", "port"), 8080, force=False)
assert changed is False
assert data["webui"]["port"] == 8001
def test_builtin_hook_resets_group_chat_prompt_when_upgrading_from_8_10_10():
data = {"chat": {"group_chat_prompt": "自定义旧提示词"}}
result = apply_config_upgrade_hooks(data, "bot_config.toml", "8.10.10", "8.10.11")
assert result.migrated is True
assert result.reason == "8.10.11:chat.group_chat_prompt"
assert result.data["chat"]["group_chat_prompt"] == ChatConfig().group_chat_prompt
def test_bot_config_upgrade_hooks_register_group_chat_prompt_reset():
assert len(BOT_CONFIG_UPGRADE_HOOKS) == 1
assert BOT_CONFIG_UPGRADE_HOOKS[0].target_version == "8.10.11"

8
uv.lock generated
View File

@@ -1511,7 +1511,7 @@ requires-dist = [
{ name = "httpx", extras = ["socks"] },
{ name = "jieba", specifier = ">=0.42.1" },
{ name = "json-repair", specifier = ">=0.47.6" },
{ name = "maibot-dashboard", specifier = ">=1.0.6" },
{ name = "maibot-dashboard", specifier = ">=1.0.7" },
{ name = "maibot-plugin-sdk", specifier = ">=2.4.0" },
{ name = "maim-message", specifier = ">=0.6.2" },
{ name = "matplotlib", specifier = ">=3.10.5" },
@@ -1549,11 +1549,11 @@ dev = [
[[package]]
name = "maibot-dashboard"
version = "1.0.6"
version = "1.0.7"
source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" }
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/73/76/a2a47f902f20bbaa699584d7fa9676f591503e0d6954de65aa0a90c07000/maibot_dashboard-1.0.6.tar.gz", hash = "sha256:f383d3505a102554a51bf49d1fc56a8ba8c5db60a3c41b7eab4513a6fd0a1f88", size = 2485522, upload-time = "2026-05-06T10:44:36.42Z" }
sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/20/48/3477af08da9d7fb422a7ecd7800ebbc2200b7dd09685c167a11aed07e773/maibot_dashboard-1.0.7.tar.gz", hash = "sha256:29a0e2f121d05f6b87cd79059c59e77a533ee9cbdaa56888043443d1d4382785", size = 2488200, upload-time = "2026-05-07T05:09:22.843Z" }
wheels = [
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/ae/14/a62631e60c9606a793d6740ef61fc0b8868cf8a79c9f192667026874799b/maibot_dashboard-1.0.6-py3-none-any.whl", hash = "sha256:36299d7039fbb98fd8aa1fb31d2bbc040d1018d9d87ebcf09194e4efb0cf9af7", size = 2552642, upload-time = "2026-05-06T10:44:34.216Z" },
{ url = "https://pypi.tuna.tsinghua.edu.cn/packages/02/91/629d6891a9e5fc12083efe2de712342a1d20fd0ce22cf1685a45d1d2fc91/maibot_dashboard-1.0.7-py3-none-any.whl", hash = "sha256:38e7e4baa921b5b5b75404aa1b9c0dfce264c6ee4783b0e54174a32f23eb4880", size = 2555199, upload-time = "2026-05-07T05:09:21.262Z" },
]
[[package]]