feat:合并memory配置,优化webui交互和展示

This commit is contained in:
SengokuCola
2026-05-06 18:13:14 +08:00
parent 3bdc2a9f70
commit ad5b5889e2
28 changed files with 921 additions and 726 deletions

View File

@@ -3020,80 +3020,80 @@ class SDKMemoryKernel:
@staticmethod
def _feedback_cfg_enabled() -> bool:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return bool(getattr(memory_cfg, "feedback_correction_enabled", False))
@staticmethod
def _feedback_cfg_window_hours() -> float:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return max(0.1, float(getattr(memory_cfg, "feedback_correction_window_hours", 12.0) or 12.0))
@staticmethod
def _feedback_cfg_check_interval_seconds() -> float:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
minutes = max(1, int(getattr(memory_cfg, "feedback_correction_check_interval_minutes", 30) or 30))
return float(minutes) * 60.0
@staticmethod
def _feedback_cfg_batch_size() -> int:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return max(1, int(getattr(memory_cfg, "feedback_correction_batch_size", 20) or 20))
@staticmethod
def _feedback_cfg_auto_apply_threshold() -> float:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
value = float(getattr(memory_cfg, "feedback_correction_auto_apply_threshold", 0.85) or 0.85)
return min(1.0, max(0.0, value))
@staticmethod
def _feedback_cfg_max_messages() -> int:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return max(1, int(getattr(memory_cfg, "feedback_correction_max_feedback_messages", 30) or 30))
@staticmethod
def _feedback_cfg_prefilter_enabled() -> bool:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return bool(getattr(memory_cfg, "feedback_correction_prefilter_enabled", True))
@staticmethod
def _feedback_cfg_paragraph_mark_enabled() -> bool:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return bool(getattr(memory_cfg, "feedback_correction_paragraph_mark_enabled", True))
@staticmethod
def _feedback_cfg_paragraph_hard_filter_enabled() -> bool:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return bool(getattr(memory_cfg, "feedback_correction_paragraph_hard_filter_enabled", True))
@staticmethod
def _feedback_cfg_profile_refresh_enabled() -> bool:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return bool(getattr(memory_cfg, "feedback_correction_profile_refresh_enabled", True))
@staticmethod
def _feedback_cfg_profile_force_refresh_on_read() -> bool:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return bool(getattr(memory_cfg, "feedback_correction_profile_force_refresh_on_read", True))
@staticmethod
def _feedback_cfg_episode_rebuild_enabled() -> bool:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return bool(getattr(memory_cfg, "feedback_correction_episode_rebuild_enabled", True))
@staticmethod
def _feedback_cfg_episode_query_block_enabled() -> bool:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return bool(getattr(memory_cfg, "feedback_correction_episode_query_block_enabled", True))
@staticmethod
def _feedback_cfg_reconcile_interval_seconds() -> float:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
minutes = max(1, int(getattr(memory_cfg, "feedback_correction_reconcile_interval_minutes", 5) or 5))
return float(minutes) * 60.0
@staticmethod
def _feedback_cfg_reconcile_batch_size() -> int:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
return max(1, int(getattr(memory_cfg, "feedback_correction_reconcile_batch_size", 20) or 20))
@classmethod

View File

@@ -529,7 +529,7 @@ class EpisodeService:
"paragraph_count": 0,
}
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
paragraphs = self.metadata_store.get_live_paragraphs_by_source(
token,
exclude_stale=bool(getattr(memory_cfg, "feedback_correction_paragraph_hard_filter_enabled", True)),

View File

@@ -349,7 +349,7 @@ class PersonProfileService:
self,
evidence: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
memory_cfg = getattr(global_config, "memory", None)
memory_cfg = global_config.a_memorix.integration
if not bool(getattr(memory_cfg, "feedback_correction_paragraph_hard_filter_enabled", True)):
return evidence
paragraph_hashes = [

View File

@@ -27,7 +27,6 @@ from .official_configs import (
LogConfig,
MaimMessageConfig,
MCPConfig,
MemoryConfig,
MessageReceiveConfig,
PersonalityConfig,
PluginRuntimeConfig,
@@ -57,7 +56,7 @@ MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute(
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0-pre.11"
CONFIG_VERSION: str = "8.10.8"
CONFIG_VERSION: str = "8.10.9"
MODEL_CONFIG_VERSION: str = "1.15.3"
logger = get_logger("config")
@@ -84,9 +83,6 @@ class Config(ConfigBase):
expression: ExpressionConfig = Field(default_factory=ExpressionConfig)
"""表达配置类"""
memory: MemoryConfig = Field(default_factory=MemoryConfig)
"""记忆配置类"""
a_memorix: AMemorixConfig = Field(default_factory=AMemorixConfig)
"""A_Memorix 长期记忆子系统配置"""

View File

@@ -392,6 +392,23 @@ def try_migrate_legacy_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
migrated_any = True
reasons.append("visual.visual_style_removed")
memory = _as_dict(data.pop("memory", None))
if memory is not None:
a_memorix = _as_dict(data.get("a_memorix"))
if a_memorix is None:
a_memorix = {}
data["a_memorix"] = a_memorix
integration = _as_dict(a_memorix.get("integration"))
if integration is None:
integration = {}
a_memorix["integration"] = integration
for key, value in memory.items():
integration.setdefault(key, value)
migrated_any = True
reasons.append("memory->a_memorix.integration")
keyword_reaction = _as_dict(data.get("keyword_reaction"))
if keyword_reaction is not None:
if _drop_empty_keyword_rules(keyword_reaction, "keyword_rules"):

View File

@@ -467,8 +467,8 @@ class TargetItem(ConfigBase):
"""聊天流类型group群聊或private私聊"""
class MemoryConfig(ConfigBase):
"""记忆配置"""
class AMemorixIntegrationConfig(ConfigBase):
"""A_Memorix 与 Maisaka 集成配置"""
__ui_parent__ = "a_memorix"
@@ -1038,6 +1038,9 @@ class AMemorixConfig(ConfigBase):
__ui_label__ = "长期记忆"
__ui_icon__ = "brain"
integration: AMemorixIntegrationConfig = Field(default_factory=AMemorixIntegrationConfig)
"""Maisaka 集成配置"""
plugin: AMemorixPluginConfig = Field(default_factory=AMemorixPluginConfig)
"""子系统状态"""

View File

@@ -157,15 +157,6 @@ class MainSystem:
logger.info(t("startup.schedule_cancelled"))
raise
# async def forget_memory_task(self):
# """记忆遗忘任务"""
# while True:
# await asyncio.sleep(global_config.memory.forget_memory_interval)
# logger.info("[记忆遗忘] 开始遗忘记忆...")
# await self.hippocampus_manager.forget_memory(percentage=global_config.memory.memory_forget_percentage) # type: ignore
# logger.info("[记忆遗忘] 记忆遗忘完成")
async def main() -> None:
"""主函数"""
system = MainSystem()

View File

@@ -66,7 +66,7 @@ class BuiltinToolEntry:
def _get_query_memory_tool_spec() -> ToolSpec:
"""根据配置生成 query_memory 工具声明。"""
return get_query_memory_tool_spec(enabled=bool(global_config.memory.enable_memory_query_tool))
return get_query_memory_tool_spec(enabled=bool(global_config.a_memorix.integration.enable_memory_query_tool))
BUILTIN_TOOL_ENTRIES: List[BuiltinToolEntry] = [

View File

@@ -161,7 +161,7 @@ async def handle_tool(
f"不支持的检索模式:{mode}。可选值search/time/hybrid/episode/aggregate。",
)
default_limit = max(1, global_config.memory.memory_query_default_limit)
default_limit = max(1, global_config.a_memorix.integration.memory_query_default_limit)
try:
limit = int(invocation.arguments.get("limit", default_limit) or default_limit)
except (TypeError, ValueError):

View File

@@ -51,7 +51,7 @@ class PersonFactWritebackService:
logger.warning("关闭人物事实写回 worker 失败: %s", exc)
async def enqueue(self, message: Any) -> None:
if not bool(getattr(global_config.memory, "person_fact_writeback_enabled", True)):
if not bool(global_config.a_memorix.integration.person_fact_writeback_enabled):
return
if self._stopping:
return
@@ -251,7 +251,7 @@ class ChatSummaryWritebackService:
logger.warning("关闭聊天摘要写回 worker 失败: %s", exc)
async def enqueue(self, message: Any) -> None:
if not bool(getattr(global_config.memory, "chat_summary_writeback_enabled", True)):
if not bool(global_config.a_memorix.integration.chat_summary_writeback_enabled):
return
if self._stopping:
return
@@ -434,11 +434,11 @@ class ChatSummaryWritebackService:
@staticmethod
def _message_threshold() -> int:
return max(1, int(getattr(global_config.memory, "chat_summary_writeback_message_threshold", 12) or 12))
return max(1, int(global_config.a_memorix.integration.chat_summary_writeback_message_threshold))
@staticmethod
def _context_length() -> int:
return max(1, int(getattr(global_config.memory, "chat_summary_writeback_context_length", 50) or 50))
return max(1, int(global_config.a_memorix.integration.chat_summary_writeback_context_length))
class MemoryAutomationService:

View File

@@ -32,7 +32,6 @@ from src.config.official_configs import (
ExpressionConfig,
KeywordReactionConfig,
MaimMessageConfig,
MemoryConfig,
MessageReceiveConfig,
PersonalityConfig,
ResponsePostProcessConfig,
@@ -333,7 +332,6 @@ async def get_config_section_schema(section_name: str):
- response_splitter: ResponseSplitterConfig
- telemetry: TelemetryConfig
- maim_message: MaimMessageConfig
- memory: MemoryConfig
- debug: DebugConfig
- voice: VoiceConfig
- jargon: JargonConfig
@@ -354,7 +352,6 @@ async def get_config_section_schema(section_name: str):
"response_splitter": ResponseSplitterConfig,
"telemetry": TelemetryConfig,
"maim_message": MaimMessageConfig,
"memory": MemoryConfig,
"a_memorix": AMemorixConfig,
"debug": DebugConfig,
"voice": VoiceConfig,