remove:移除冗余的迁移代码

This commit is contained in:
SengokuCola
2026-04-12 16:34:43 +08:00
parent ff75930466
commit fc08e44293
4 changed files with 93 additions and 303 deletions

23
bot.py
View File

@@ -1,7 +1,6 @@
# raise RuntimeError("System Not Ready")
from pathlib import Path
from dotenv import load_dotenv
from rich.traceback import install
import asyncio
@@ -23,28 +22,6 @@ script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
set_locale(os.getenv("MAIBOT_LOCALE", "zh-CN"))
env_path = Path(__file__).parent / ".env"
template_env_path = Path(__file__).parent / "template" / "template.env"
if env_path.exists():
load_dotenv(str(env_path), override=True)
else:
print("[WIP] no .env file found, and templates is not ready yet.")
print("[WIP] continue startup, use environment and existing config values.")
# try:
# if template_env_path.exists():
# shutil.copyfile(template_env_path, env_path)
# print(t("startup.env_created"))
# load_dotenv(str(env_path), override=True)
# else:
# print(t("startup.env_template_missing"))
# raise FileNotFoundError(t("startup.env_file_missing"))
# except Exception as e:
# print(t("startup.env_auto_create_failed", error=e))
# raise
set_locale(os.getenv("MAIBOT_LOCALE", "zh-CN"))
# 检查是否是 Worker 进程,只在 Worker 进程中输出详细的初始化信息
# Runner 进程只需要基本的日志功能,不需要详细的初始化日志
is_worker = os.environ.get("MAIBOT_WORKER_PROCESS") == "1"

View File

@@ -53,8 +53,9 @@ PROJECT_ROOT: Path = Path(__file__).parent.parent.parent.absolute().resolve()
CONFIG_DIR: Path = PROJECT_ROOT / "config"
BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
MMC_VERSION: str = "1.0.0"
CONFIG_VERSION: str = "8.6.0"
CONFIG_VERSION: str = "8.7.1"
MODEL_CONFIG_VERSION: str = "1.14.0"
logger = get_logger("config")
@@ -453,6 +454,20 @@ def generate_new_config_file(config_class: type[T], config_path: Path, inner_con
write_config_to_file(config, config_path, inner_config_version)
def remove_legacy_env_file(env_path: Path) -> None:
"""删除已完成迁移的旧版 `.env` 文件。"""
if not env_path.exists():
return
try:
env_path.unlink()
except OSError as exc:
logger.warning(f"旧版 .env 配置文件删除失败,请手动删除: {env_path},原因: {exc}")
else:
logger.warning(f"检测到旧版环境变量绑定配置迁移成功,已删除旧版 .env 文件: {env_path}")
def load_config_from_file(
config_class: type[T], config_path: Path, new_ver: str, override_repr: bool = False
) -> tuple[T, bool]:
@@ -466,10 +481,12 @@ def load_config_from_file(
if not isinstance(inner_version, str):
raise TypeError(t("config.invalid_inner_version"))
old_ver: str = inner_version
env_migration_applied: bool = False
config_data.remove("inner") # 移除 inner 部分,避免干扰后续处理
config_data = config_data.unwrap() # 转换为普通字典,方便后续处理
if config_path.name == "bot_config.toml" and config_class.__name__ == "Config":
env_migration = migrate_legacy_bind_env_to_bot_config_dict(config_data)
env_migration_applied = env_migration.migrated
if env_migration.migrated:
logger.warning(f"检测到旧版环境变量绑定配置,已迁移到主配置: {env_migration.reason}")
config_data = env_migration.data
@@ -496,9 +513,11 @@ def load_config_from_file(
raise e
else:
raise e
if compare_versions(old_ver, new_ver):
if compare_versions(old_ver, new_ver) or env_migration_applied:
output_config_changes(attribute_data, logger, old_ver, new_ver, config_path.name)
write_config_to_file(target_config, config_path, new_ver, override_repr)
if env_migration_applied:
remove_legacy_env_file(LEGACY_ENV_PATH)
updated = True
return target_config, updated
except Exception as e:

View File

@@ -1,12 +1,8 @@
"""
legacy_migration.py
一个“可随时拔掉”的旧配置兼容层
- 仅在配置解析失败时尝试修复旧格式数据7.x -> 8.x 这一类结构性变更)
- 不依赖 Pydantic / ConfigBase仅对 dict 做最小转换
- 成功则返回(修复后的 dict, True),失败则返回(原 dict, False)
设计目标:与现有 config 加载逻辑的接触点尽可能小,未来不需要时可一键移除。
旧配置兼容层
仅保留当前仍需要的“解析前结构修复”,避免老配置在 `from_dict` 前直接失败。
"""
from __future__ import annotations
@@ -16,12 +12,7 @@ from typing import Any, Optional
import os
from src.common.logger import get_logger
logger = get_logger("legacy_migration")
# 方便未来快速关闭/移除
ENABLE_LEGACY_MIGRATION: bool = True
@@ -43,6 +34,7 @@ def _as_list(x: Any) -> Optional[list[Any]]:
def _parse_host_env(value: Any) -> Optional[str]:
if not isinstance(value, str):
return None
normalized_value = value.strip()
return normalized_value or None
@@ -75,116 +67,73 @@ def _migrate_env_value(section: dict[str, Any], key: str, parsed_env_value: Any,
return True
def _move_section_key(source: dict[str, Any], target: dict[str, Any], key: str) -> bool:
"""将配置项从旧分组移动到新分组,若新分组已有值则保留新值。"""
if key not in source:
return False
if key not in target:
target[key] = source[key]
source.pop(key, None)
return True
def _parse_triplet_target(s: str) -> Optional[dict[str, str]]:
"""
解析 "platform:id:type" -> {platform,item_id,rule_type}
返回 None 表示无法解析。
解析 "platform:id:type" -> {platform, item_id, rule_type}
"""
if not isinstance(s, str):
return None
parts = s.split(":", 2)
if len(parts) != 3:
return None
platform, item_id, rule_type = parts
if rule_type not in ("group", "private"):
return None
return {"platform": platform, "item_id": item_id, "rule_type": rule_type}
def _parse_quad_prompt(s: str) -> Optional[dict[str, str]]:
"""
解析 "platform:id:type:prompt" -> {platform,item_id,rule_type,prompt}
prompt 允许包含冒号,因此只切前三个冒号。
"""
if not isinstance(s, str):
return None
parts = s.split(":", 3)
if len(parts) != 4:
return None
platform, item_id, rule_type, prompt = parts
if rule_type not in ("group", "private"):
return None
if not prompt:
return None
return {"platform": platform, "item_id": item_id, "rule_type": rule_type, "prompt": prompt}
def _parse_enable_disable(v: Any) -> Optional[bool]:
"""
兼容旧值 "enable"/"disable" 以及 bool。
"""
if isinstance(v, bool):
return v
if isinstance(v, str):
vv = v.strip().lower()
if vv == "enable":
normalized_value = v.strip().lower()
if normalized_value == "enable":
return True
if vv == "disable":
if normalized_value == "disable":
return False
return None
def _migrate_expression_learning_list(expr: dict[str, Any]) -> bool:
"""
旧:
learning_list = [
["", "enable", "enable", "enable"],
["qq:1919810:group", "enable", "enable", "enable"],
]
兼容旧旧格式:
learning_list = [
["qq:1919810:group", "enable", "enable", "0.5"],
["", "disable", "disable", "0.1"],
]
新:
[[expression.learning_list]]
platform="", item_id="", rule_type="group", use_expression=true, enable_learning=true, enable_jargon_learning=true
将旧版 expression.learning_list 转成当前结构。
"""
ll = _as_list(expr.get("learning_list"))
if ll is None:
learning_list = _as_list(expr.get("learning_list"))
if learning_list is None:
return False
# 如果已经是新格式(列表里是 dict跳过
if ll and all(isinstance(i, dict) for i in ll):
if learning_list and all(isinstance(item, dict) for item in learning_list):
return False
migrated_items: list[dict[str, Any]] = []
for row in ll:
r = _as_list(row)
if r is None or len(r) < 4:
# 行结构不对,无法安全迁移
for row in learning_list:
row_items = _as_list(row)
if row_items is None or len(row_items) < 4:
return False
target_raw = r[0]
use_expression = _parse_enable_disable(r[1])
enable_learning = _parse_enable_disable(r[2])
enable_jargon_learning = _parse_enable_disable(r[3])
target_raw = row_items[0]
use_expression = _parse_enable_disable(row_items[1])
enable_learning = _parse_enable_disable(row_items[2])
enable_jargon_learning = _parse_enable_disable(row_items[3])
if enable_jargon_learning is None:
# 更早期的配置在第 4 列记录的是一个已废弃的数值权重/阈值,
# 当前 schema 已没有对应字段。这里按保守策略兼容迁移:
# 丢弃旧数值,并将 enable_jargon_learning 置为 False。
# 更早期版本第 4 列是已废弃的数值阈值,这里仅做保守兼容。
try:
float(str(r[3]))
float(str(row_items[3]))
except (TypeError, ValueError):
pass
else:
enable_jargon_learning = False
if use_expression is None or enable_learning is None or enable_jargon_learning is None:
return False
# 旧格式中 target 允许为空字符串:表示全局;新结构必须有三元组字段
if target_raw == "" or target_raw is None:
target = {"platform": "", "item_id": "", "rule_type": "group"}
else:
@@ -209,101 +158,56 @@ def _migrate_expression_learning_list(expr: dict[str, Any]) -> bool:
def _migrate_expression_groups(expr: dict[str, Any]) -> bool:
"""
旧:
expression_groups = [
["qq:1:group","qq:2:group"],
["qq:3:group"],
]
新:
expression_groups = [
{ expression_groups = [ {platform="qq", item_id="1", rule_type="group"}, ... ] },
{ expression_groups = [ ... ] },
]
将旧版 expression.expression_groups 转成当前结构。
"""
eg = _as_list(expr.get("expression_groups"))
if eg is None:
expression_groups = _as_list(expr.get("expression_groups"))
if expression_groups is None:
return False
if expression_groups and all(isinstance(item, dict) for item in expression_groups):
return False
# 已经是新格式(列表里是 dict 且包含 expression_groups跳过
if eg and all(isinstance(i, dict) for i in eg):
return False
migrated: list[dict[str, Any]] = []
for group in eg:
g = _as_list(group)
if g is None:
migrated_groups: list[dict[str, Any]] = []
for group in expression_groups:
group_items = _as_list(group)
if group_items is None:
return False
targets: list[dict[str, str]] = []
for item in g:
for item in group_items:
parsed = _parse_triplet_target(str(item))
if parsed is None:
return False
targets.append(parsed)
migrated.append({"expression_groups": targets})
expr["expression_groups"] = migrated
migrated_groups.append({"expression_groups": targets})
expr["expression_groups"] = migrated_groups
return True
def _migrate_target_item_list(parent: dict[str, Any], key: str) -> bool:
"""
将 list[str] 的 "platform:id:type" 迁移为 list[{platform,item_id,rule_type}]
用于memory.global_memory_blacklist 等。
将 list[str] 的 "platform:id:type" 迁移为 list[TargetItem]。
"""
raw = _as_list(parent.get(key))
if raw is None:
if raw is None or not raw:
return False
if not raw:
return False
if raw and all(isinstance(i, dict) for i in raw):
if all(isinstance(item, dict) for item in raw):
return False
targets: list[dict[str, str]] = []
for item in raw:
parsed = _parse_triplet_target(str(item))
if parsed is None:
return False
targets.append(parsed)
parent[key] = targets
return True
def _migrate_extra_prompt_list(exp: dict[str, Any], key: str) -> bool:
"""
将 list[str] 的 "platform:id:type:prompt" 迁移为 list[{platform,item_id,rule_type,prompt}]
用于experimental.chat_prompts
"""
raw = _as_list(exp.get(key))
if raw is None:
return False
if raw and all(isinstance(i, dict) for i in raw):
return False
items: list[dict[str, str]] = []
for item in raw:
parsed = _parse_quad_prompt(str(item))
if parsed is None:
return False
items.append(parsed)
exp[key] = items
return True
def _parse_replyer_mode(v: Any) -> Optional[str]:
"""兼容旧 replyer_generator_type 到布尔开关的迁移。"""
if isinstance(v, bool):
return "multimodal" if v else "text"
if not isinstance(v, str):
return None
normalized_value = v.strip().lower()
if normalized_value in {"text", "multimodal", "auto"}:
return normalized_value
if normalized_value == "legacy":
return "text"
return None
def migrate_legacy_bind_env_to_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
"""将旧版环境变量中的绑定地址迁移到主配置结构。"""
"""将旧版 `.env` 中的绑定地址迁移到主配置结构。"""
migrated_any = False
reasons: list[str] = []
@@ -341,8 +245,7 @@ def migrate_legacy_bind_env_to_bot_config_dict(data: dict[str, Any]) -> Migratio
def try_migrate_legacy_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
"""
尝试对“总配置 bot_config.toml”的 dict已 unwrap进行旧格式修复
仅做我们明确知道的结构性变更;其它字段不动。
尝试修复 `bot_config.toml` 的少量旧结构,仅保留当前仍需要的兼容逻辑
"""
if not ENABLE_LEGACY_MIGRATION:
return MigrationResult(data=data, migrated=False, reason="disabled")
@@ -355,41 +258,30 @@ def try_migrate_legacy_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
if _migrate_expression_learning_list(expr):
migrated_any = True
reasons.append("expression.learning_list")
if _migrate_expression_groups(expr):
migrated_any = True
reasons.append("expression.expression_groups")
# allow_reflect: 旧 list[str] -> 新 list[TargetItem]
if _migrate_target_item_list(expr, "allow_reflect"):
migrated_any = True
reasons.append("expression.allow_reflect")
# manual_reflect_operator_id: 旧 str -> 新 Optional[TargetItem]
mroi = expr.get("manual_reflect_operator_id")
if isinstance(mroi, str) and mroi.strip():
parsed = _parse_triplet_target(mroi.strip())
manual_reflect_operator_id = expr.get("manual_reflect_operator_id")
if isinstance(manual_reflect_operator_id, str) and manual_reflect_operator_id.strip():
parsed = _parse_triplet_target(manual_reflect_operator_id.strip())
if parsed is not None:
expr["manual_reflect_operator_id"] = parsed
migrated_any = True
reasons.append("expression.manual_reflect_operator_id")
chat = _as_dict(data.get("chat"))
if chat is None:
chat = {}
data["chat"] = chat
elif "private_plan_style" in chat:
chat.pop("private_plan_style", None)
migrated_any = True
reasons.append("chat.private_plan_style_removed")
if isinstance(manual_reflect_operator_id, str) and not manual_reflect_operator_id.strip():
expr.pop("manual_reflect_operator_id", None)
migrated_any = True
reasons.append("expression.manual_reflect_operator_id_empty")
personality = _as_dict(data.get("personality"))
visual = _as_dict(data.get("visual"))
if visual is None and (
(personality is not None and "visual_style" in personality)
or "multimodal_planner" in chat
or "replyer_generator_type" in chat
):
visual = {}
data["visual"] = visual
if visual is not None and personality is not None and "visual_style" in personality:
if "visual_style" not in visual:
visual["visual_style"] = personality["visual_style"]
@@ -397,117 +289,19 @@ def try_migrate_legacy_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
migrated_any = True
reasons.append("personality.visual_style_moved_to_visual.visual_style")
if visual is not None and "multimodal_planner" in chat:
if "multimodal_planner" not in visual and isinstance(chat["multimodal_planner"], bool):
visual["multimodal_planner"] = chat["multimodal_planner"]
if "multimodal_planner" in visual:
chat.pop("multimodal_planner", None)
if visual is not None and "multimodal_planner" in visual and "planner_mode" not in visual:
multimodal_planner = visual.pop("multimodal_planner")
if isinstance(multimodal_planner, bool):
visual["planner_mode"] = "multimodal" if multimodal_planner else "text"
migrated_any = True
reasons.append("chat.multimodal_planner_moved_to_visual.multimodal_planner")
reasons.append("visual.multimodal_planner_moved_to_visual.planner_mode")
else:
visual["multimodal_planner"] = multimodal_planner
if visual is not None and "multimodal_replyer" in visual:
replyer_mode = _parse_replyer_mode(visual.get("multimodal_replyer"))
if "replyer_mode" not in visual and replyer_mode is not None:
visual["replyer_mode"] = replyer_mode
if "replyer_mode" in visual:
visual.pop("multimodal_replyer", None)
migrated_any = True
reasons.append("visual.multimodal_replyer_moved_to_visual.replyer_mode")
if visual is not None and "replyer_generator_type" in chat:
replyer_mode = _parse_replyer_mode(chat["replyer_generator_type"])
if "replyer_mode" not in visual and replyer_mode is not None:
visual["replyer_mode"] = replyer_mode
if "replyer_mode" in visual:
chat.pop("replyer_generator_type", None)
migrated_any = True
reasons.append("chat.replyer_generator_type_moved_to_visual.replyer_mode")
maisaka = _as_dict(data.get("maisaka"))
mem = _as_dict(data.get("memory"))
debug = _as_dict(data.get("debug"))
if maisaka is not None:
moved_memory_keys = ("enable_memory_query_tool", "memory_query_default_limit")
if any(key in maisaka for key in moved_memory_keys) and mem is None:
mem = {}
data["memory"] = mem
if mem is not None:
for moved_key in moved_memory_keys:
if _move_section_key(maisaka, mem, moved_key):
migrated_any = True
reasons.append(f"maisaka.{moved_key}_moved_to_memory")
if mem is not None and "show_memory_prompt" in mem and debug is None:
debug = {}
data["debug"] = debug
if mem is not None:
if _migrate_target_item_list(mem, "global_memory_blacklist"):
migrated_any = True
reasons.append("memory.global_memory_blacklist")
if debug is not None and _move_section_key(mem, debug, "show_memory_prompt"):
migrated_any = True
reasons.append("memory.show_memory_prompt_moved_to_debug")
for removed_key in (
"agent_timeout_seconds",
"max_agent_iterations",
):
if removed_key in mem:
mem.pop(removed_key, None)
migrated_any = True
reasons.append(f"memory.{removed_key}_removed")
relationship = _as_dict(data.get("relationship"))
if relationship is not None:
data.pop("relationship", None)
memory = _as_dict(data.get("memory"))
if memory is not None and _migrate_target_item_list(memory, "global_memory_blacklist"):
migrated_any = True
reasons.append("relationship_removed")
exp = _as_dict(data.get("experimental"))
if exp is not None:
if _migrate_extra_prompt_list(exp, "chat_prompts"):
migrated_any = True
reasons.append("experimental.chat_prompts")
if "private_plan_style" in exp:
exp.pop("private_plan_style", None)
migrated_any = True
reasons.append("experimental.private_plan_style_removed")
for key in ("group_chat_prompt", "private_chat_prompts", "chat_prompts"):
if key in exp and key not in chat:
chat[key] = exp[key]
migrated_any = True
reasons.append(f"experimental.{key}_moved_to_chat")
data.pop("experimental", None)
migrated_any = True
reasons.append("experimental_removed")
if chat is not None and "think_mode" in chat:
chat.pop("think_mode", None)
migrated_any = True
reasons.append("chat.think_mode_removed")
tool = _as_dict(data.get("tool"))
if tool is not None:
data.pop("tool", None)
migrated_any = True
reasons.append("tool_section_removed")
# ExpressionConfig 中的 manual_reflect_operator_id:
# 旧版本可能是 ""(字符串),新版本期望 Optional[TargetItem]。
# 空字符串视为未配置,转换为 None/删除键以避免校验错误。
expr = _as_dict(data.get("expression"))
if expr is not None:
mroi = expr.get("manual_reflect_operator_id")
if isinstance(mroi, str) and not mroi.strip():
expr.pop("manual_reflect_operator_id", None)
migrated_any = True
reasons.append("expression.manual_reflect_operator_id_empty")
reasons.append("memory.global_memory_blacklist")
reason = ",".join(reasons)
return MigrationResult(data=data, migrated=migrated_any, reason=reason)

View File

@@ -145,14 +145,14 @@ class VisualConfig(ConfigBase):
__ui_label__ = "视觉"
__ui_icon__ = "image"
multimodal_planner: bool = Field(
default=True,
planner_mode: Literal["text", "multimodal", "auto"] = Field(
default="auto",
json_schema_extra={
"x-widget": "switch",
"x-icon": "image",
"x-widget": "select",
"x-icon": "git-branch",
},
)
"""是否启用多模态planner"""
"""规划器模式auto根据模型信息自动选择text为纯文本模式multimodal为多模态模式"""
replyer_mode: Literal["text", "multimodal", "auto"] = Field(
default="auto",