perf:修改默认配置
This commit is contained in:
@@ -10,6 +10,7 @@ import tomlkit
|
||||
|
||||
from .config_base import AttributeData, ConfigBase, Field
|
||||
from .config_utils import compare_versions, output_config_changes, recursive_parse_item_to_table
|
||||
from .config_upgrade_hooks import apply_config_upgrade_hooks
|
||||
from .default_model_config import create_default_model_config
|
||||
from .file_watcher import FileChange, FileWatcher
|
||||
from .legacy_migration import migrate_legacy_bind_env_to_bot_config_dict, try_migrate_legacy_bot_config_dict
|
||||
@@ -55,9 +56,9 @@ BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
|
||||
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
|
||||
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
|
||||
A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute()
|
||||
MMC_VERSION: str = "1.0.0-pre.13"
|
||||
CONFIG_VERSION: str = "8.10.10"
|
||||
MODEL_CONFIG_VERSION: str = "1.15.3"
|
||||
MMC_VERSION: str = "1.0.0-pre.14"
|
||||
CONFIG_VERSION: str = "8.10.11"
|
||||
MODEL_CONFIG_VERSION: str = "1.16.0"
|
||||
|
||||
logger = get_logger("config")
|
||||
|
||||
@@ -547,6 +548,7 @@ def load_config_from_file(
|
||||
old_ver: str = inner_version
|
||||
env_migration_applied: bool = False
|
||||
a_memorix_migration_applied: bool = False
|
||||
upgrade_hook_applied: bool = False
|
||||
config_data.remove("inner") # 移除 inner 部分,避免干扰后续处理
|
||||
config_data = config_data.unwrap() # 转换为普通字典,方便后续处理
|
||||
if config_path.name == "bot_config.toml" and config_class.__name__ == "Config":
|
||||
@@ -561,6 +563,11 @@ def load_config_from_file(
|
||||
config_data = legacy_migration.data
|
||||
config_data, a_memorix_migration_applied = _migrate_legacy_a_memorix_config(config_data)
|
||||
config_data = _normalize_loaded_bot_config_dict(config_data)
|
||||
hook_result = apply_config_upgrade_hooks(config_data, config_path.name, old_ver, new_ver)
|
||||
upgrade_hook_applied = hook_result.migrated
|
||||
if hook_result.migrated:
|
||||
logger.warning(f"检测到配置升级钩子变更,已应用: {hook_result.reason}")
|
||||
config_data = hook_result.data
|
||||
# 保留一份“干净”的原始数据副本,避免第一次 from_dict 过程中对 dict 的就地修改
|
||||
original_data: dict[str, Any] = copy.deepcopy(config_data)
|
||||
try:
|
||||
@@ -580,7 +587,7 @@ def load_config_from_file(
|
||||
raise e
|
||||
else:
|
||||
raise e
|
||||
if compare_versions(old_ver, new_ver) or env_migration_applied or a_memorix_migration_applied:
|
||||
if compare_versions(old_ver, new_ver) or env_migration_applied or a_memorix_migration_applied or upgrade_hook_applied:
|
||||
output_config_changes(attribute_data, logger, old_ver, new_ver, config_path.name)
|
||||
write_config_to_file(target_config, config_path, new_ver, override_repr)
|
||||
if env_migration_applied:
|
||||
|
||||
99
src/config/config_upgrade_hooks.py
Normal file
99
src/config/config_upgrade_hooks.py
Normal file
@@ -0,0 +1,99 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Callable
|
||||
|
||||
from .official_configs import ChatConfig
|
||||
|
||||
|
||||
ConfigUpgradeHookCallable = Callable[[dict[str, Any]], list[str]]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ConfigUpgradeHook:
|
||||
"""配置升级钩子,在跨过指定版本时执行一次。"""
|
||||
|
||||
target_version: str
|
||||
config_names: tuple[str, ...]
|
||||
migrate: ConfigUpgradeHookCallable
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigUpgradeHookResult:
|
||||
data: dict[str, Any]
|
||||
migrated: bool
|
||||
reason: str = ""
|
||||
|
||||
|
||||
def _parse_version(version: str) -> tuple[int, ...]:
|
||||
return tuple(int(part) for part in version.split("."))
|
||||
|
||||
|
||||
def _version_in_upgrade_range(old_ver: str, target_ver: str, new_ver: str) -> bool:
|
||||
old_parts = _parse_version(old_ver)
|
||||
target_parts = _parse_version(target_ver)
|
||||
new_parts = _parse_version(new_ver)
|
||||
return old_parts < target_parts <= new_parts
|
||||
|
||||
|
||||
def set_nested_config_value(data: dict[str, Any], path: tuple[str, ...], value: Any, force: bool = True) -> bool:
|
||||
"""设置嵌套配置值,返回是否实际发生变化。"""
|
||||
|
||||
if not path:
|
||||
return False
|
||||
|
||||
current: dict[str, Any] = data
|
||||
for key in path[:-1]:
|
||||
next_value = current.get(key)
|
||||
if not isinstance(next_value, dict):
|
||||
next_value = {}
|
||||
current[key] = next_value
|
||||
current = next_value
|
||||
|
||||
leaf_key = path[-1]
|
||||
if not force and leaf_key in current:
|
||||
return False
|
||||
if current.get(leaf_key) == value:
|
||||
return False
|
||||
|
||||
current[leaf_key] = value
|
||||
return True
|
||||
|
||||
|
||||
def _reset_group_chat_prompt_to_default(data: dict[str, Any]) -> list[str]:
|
||||
default_group_chat_prompt = ChatConfig().group_chat_prompt
|
||||
changed = set_nested_config_value(data, ("chat", "group_chat_prompt"), default_group_chat_prompt)
|
||||
return ["chat.group_chat_prompt"] if changed else []
|
||||
|
||||
|
||||
BOT_CONFIG_UPGRADE_HOOKS: tuple[ConfigUpgradeHook, ...] = (
|
||||
ConfigUpgradeHook(
|
||||
target_version="8.10.11",
|
||||
config_names=("bot_config.toml",),
|
||||
migrate=_reset_group_chat_prompt_to_default,
|
||||
),
|
||||
)
|
||||
MODEL_CONFIG_UPGRADE_HOOKS: tuple[ConfigUpgradeHook, ...] = ()
|
||||
|
||||
|
||||
def apply_config_upgrade_hooks(
|
||||
data: dict[str, Any],
|
||||
config_name: str,
|
||||
old_ver: str,
|
||||
new_ver: str,
|
||||
) -> ConfigUpgradeHookResult:
|
||||
migrated_reasons: list[str] = []
|
||||
hooks = BOT_CONFIG_UPGRADE_HOOKS + MODEL_CONFIG_UPGRADE_HOOKS
|
||||
|
||||
for hook in hooks:
|
||||
if config_name not in hook.config_names:
|
||||
continue
|
||||
if not _version_in_upgrade_range(old_ver, hook.target_version, new_ver):
|
||||
continue
|
||||
|
||||
hook_reasons = hook.migrate(data)
|
||||
for reason in hook_reasons:
|
||||
migrated_reasons.append(f"{hook.target_version}:{reason}")
|
||||
|
||||
reason = ",".join(migrated_reasons)
|
||||
return ConfigUpgradeHookResult(data=data, migrated=bool(migrated_reasons), reason=reason)
|
||||
@@ -459,6 +459,16 @@ class ModelTaskConfig(ConfigBase):
|
||||
)
|
||||
"""学习模型配置,用于表达方式学习和黑话学习;留空时自动继用 utils 模型"""
|
||||
|
||||
emoji: TaskConfig = Field(
|
||||
default_factory=TaskConfig,
|
||||
json_schema_extra={
|
||||
"x-widget": "custom",
|
||||
"x-icon": "smile",
|
||||
"advanced": True,
|
||||
},
|
||||
)
|
||||
"""表情包发送模型配置;留空时保持原有 planner/vlm 选择逻辑"""
|
||||
|
||||
vlm: TaskConfig = Field(
|
||||
default_factory=TaskConfig,
|
||||
json_schema_extra={
|
||||
|
||||
@@ -425,8 +425,8 @@ class ChatConfig(ConfigBase):
|
||||
default=(
|
||||
"你正在qq群里聊天,下面是群里正在聊的内容,其中包含聊天记录和聊天中的图片和表情包。\n"
|
||||
"回复尽量简短一些。最好一次对一个话题进行回复,但必须考虑不同群友发言之间的交互,免得啰嗦或者回复内容太乱。请注意把握聊天内容。\n"
|
||||
"不要总是提及自己的身份背景,根据聊天内容自由发挥,但是要日常不浮夸,不要太关注具体的聊天内容,不要刻意找话题,。\n"
|
||||
"不要回复的太频繁!不用刻意回复表情包,只要关注表情包表达的含义。控制回复的频率,不要每个人的消息都回复,只回复你感兴趣的或者主动提及你的。\n"
|
||||
"不要总是提及自己的身份背景,根据聊天内容自由发挥,但是要日常不浮夸,不要刻意找话题,。\n"
|
||||
"不用刻意回复其他人发送的表情包,只要关注表情包表达的含义。你可以适当发送表情包表达情绪。控制回复的频率,不要每个人的消息都回复,优先回复你感兴趣的或者主动提及你的,适当回复其他话题。\n"
|
||||
),
|
||||
json_schema_extra={
|
||||
"label": {
|
||||
|
||||
@@ -12,7 +12,7 @@ from src.services.llm_service import LLMServiceClient
|
||||
|
||||
logger = get_logger("expression_utils")
|
||||
|
||||
judge_llm = LLMServiceClient(task_name="replyer", request_type="expression_check")
|
||||
judge_llm = LLMServiceClient(task_name="learner", request_type="expression_check")
|
||||
|
||||
|
||||
def _normalize_repair_json_result(repaired_result: Any) -> str:
|
||||
|
||||
@@ -284,6 +284,15 @@ def _resolve_emoji_selector_model_task_name() -> str:
|
||||
"""根据 planner 模型视觉能力选择表情选择子代理的模型任务。"""
|
||||
|
||||
model_config = config_manager.get_model_config()
|
||||
emoji_task_config = getattr(model_config.model_task_config, "emoji", None)
|
||||
emoji_models = [
|
||||
model_name
|
||||
for model_name in getattr(emoji_task_config, "model_list", [])
|
||||
if str(model_name).strip()
|
||||
]
|
||||
if emoji_models:
|
||||
return "emoji"
|
||||
|
||||
planner_models = [
|
||||
model_name
|
||||
for model_name in model_config.model_task_config.planner.model_list
|
||||
|
||||
@@ -204,6 +204,9 @@ def detect_package_runner() -> PackageRunner:
|
||||
if " pip " in f" {parent_command} " or executable.endswith("pip.exe") or executable.endswith("/pip"):
|
||||
return "pip"
|
||||
|
||||
if sys.prefix != sys.base_prefix or os.getenv("VIRTUAL_ENV"):
|
||||
return "pip"
|
||||
|
||||
return "unknown"
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user