移除不再使用的聊天总结,移除路径显示配置项,修复回复器错判的一个问题

This commit is contained in:
SengokuCola
2026-04-13 18:57:50 +08:00
parent 412166ed7e
commit 01ba4f55c2
17 changed files with 22 additions and 4395 deletions

View File

@@ -612,13 +612,6 @@ class ChatBot:
scope=scope,
) # 确保会话存在
try:
from src.services.memory_flow_service import memory_automation_service
await memory_automation_service.on_incoming_message(message)
except Exception as exc:
logger.warning(f"[{session_id}] 长期记忆自动摘要注册失败: {exc}")
# message.update_chat_stream(chat)
# 命令处理 - 使用新插件系统检查并处理命令。

View File

@@ -159,7 +159,6 @@ MODULE_ALIASES = {
"planner": "规划器",
"config": "配置",
"main": "主程序",
"chat_history_summarizer": "聊天概括器",
"plugin_runtime.integration": "IPC插件系统",
"plugin_runtime.host.supervisor": "插件监督器",
"plugin_runtime.host.runner_manager": "插件监督器",

View File

@@ -55,7 +55,7 @@ BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
MMC_VERSION: str = "1.0.0"
CONFIG_VERSION: str = "8.7.1"
CONFIG_VERSION: str = "8.8.0"
MODEL_CONFIG_VERSION: str = "1.14.0"
logger = get_logger("config")

View File

@@ -414,95 +414,6 @@ class MemoryConfig(ConfigBase):
)
"""Maisaka 内置长期记忆检索工具 query_memory 的默认返回条数"""
long_term_auto_summary_enabled: bool = Field(
default=True,
json_schema_extra={
"x-widget": "switch",
"x-icon": "book-open",
},
)
"""是否自动启动聊天总结并导入长期记忆"""
person_fact_writeback_enabled: bool = Field(
default=True,
json_schema_extra={
"x-widget": "switch",
"x-icon": "user-round-pen",
},
)
"""是否在发送回复后自动提取并写回人物事实到长期记忆"""
chat_history_topic_check_message_threshold: int = Field(
default=80,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "hash",
},
)
"""聊天历史话题检查的消息数量阈值,当累积消息数达到此值时触发话题检查"""
chat_history_topic_check_time_hours: float = Field(
default=8.0,
json_schema_extra={
"x-widget": "input",
"x-icon": "clock",
},
)
"""聊天历史话题检查的时间阈值(小时),当距离上次检查超过此时间且消息数达到最小阈值时触发话题检查"""
chat_history_topic_check_min_messages: int = Field(
default=20,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "hash",
},
)
"""聊天历史话题检查的时间触发模式下的最小消息数阈值"""
chat_history_finalize_no_update_checks: int = Field(
default=3,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "check-circle",
},
)
"""聊天历史话题打包存储的连续无更新检查次数阈值当话题连续N次检查无新增内容时触发打包存储"""
chat_history_finalize_message_count: int = Field(
default=5,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "package",
},
)
"""聊天历史话题打包存储的消息条数阈值,当话题的消息条数超过此值时触发打包存储"""
def model_post_init(self, context: Optional[dict] = None) -> None:
"""验证配置值"""
if self.chat_history_topic_check_message_threshold < 1:
raise ValueError(
f"chat_history_topic_check_message_threshold 必须至少为1当前值: {self.chat_history_topic_check_message_threshold}"
)
if self.chat_history_topic_check_time_hours <= 0:
raise ValueError(
f"chat_history_topic_check_time_hours 必须大于0当前值: {self.chat_history_topic_check_time_hours}"
)
if self.chat_history_topic_check_min_messages < 1:
raise ValueError(
f"chat_history_topic_check_min_messages 必须至少为1当前值: {self.chat_history_topic_check_min_messages}"
)
if self.chat_history_finalize_no_update_checks < 1:
raise ValueError(
f"chat_history_finalize_no_update_checks 必须至少为1当前值: {self.chat_history_finalize_no_update_checks}"
)
if self.chat_history_finalize_message_count < 1:
raise ValueError(
f"chat_history_finalize_message_count 必须至少为1当前值: {self.chat_history_finalize_message_count}"
)
return super().model_post_init(context)
class LearningItem(ConfigBase):

View File

@@ -53,40 +53,16 @@ def get_tool_spec() -> ToolSpec:
return ToolSpec(
name="send_emoji",
brief_description="发送一个合适的表情包来辅助表达情绪。",
detailed_description="参数说明:\n- emotionstring可选。希望表达的情绪例如 happy、sad、angry 等",
detailed_description="无需参数,直接发送一个合适的表情包",
parameters_schema={
"type": "object",
"properties": {
"emotion": {
"type": "string",
"description": "希望表达的情绪,例如 happy、sad、angry 等。",
},
},
"properties": {},
},
provider_name="maisaka_builtin",
provider_type="builtin",
)
def _normalize_candidate_emotions(emoji: MaiEmoji) -> list[str]:
"""清洗候选表情上的情绪标签。"""
raw_emotions = getattr(emoji, "emotion", None)
if isinstance(raw_emotions, list) and raw_emotions:
return [str(item).strip() for item in raw_emotions if str(item).strip()]
description = str(getattr(emoji, "description", "") or "").strip()
if not description:
return []
normalized_description = (
description.replace("", ",")
.replace("", ",")
.replace("", ",")
)
return [item.strip() for item in normalized_description.split(",") if item.strip()]
async def _load_emoji_bytes(emoji: MaiEmoji) -> bytes:
"""读取单个表情包图片字节。"""
@@ -232,18 +208,6 @@ async def _build_emoji_candidate_message(emojis: list[MaiEmoji]) -> SessionBacke
)
def _build_emoji_candidate_summary(emojis: list[MaiEmoji]) -> str:
"""构建供监控展示使用的候选表情摘要。"""
summary_lines: list[str] = []
for index, emoji in enumerate(emojis, start=1):
description = emoji.description.strip() or "(无描述)"
emotions = "".join(_normalize_candidate_emotions(emoji)) or ""
summary_lines.append(f"{index}. 描述:{description}")
summary_lines.append(f" 情绪:{emotions}")
return "\n".join(summary_lines).strip()
def _build_send_emoji_monitor_detail(
*,
request_messages: Optional[list[dict[str, Any]]] = None,
@@ -252,7 +216,7 @@ def _build_send_emoji_monitor_detail(
metrics: Optional[Dict[str, Any]] = None,
extra_sections: Optional[list[dict[str, str]]] = None,
) -> Dict[str, Any]:
"""构建 emotion tool 统一监控详情。"""
"""构建 send_emoji 工具统一监控详情。"""
detail: Dict[str, Any] = {}
if isinstance(request_messages, list) and request_messages:
@@ -281,7 +245,6 @@ def _build_send_emoji_monitor_detail(
def _build_send_emoji_monitor_metadata(
selection_metadata: Dict[str, Any],
*,
requested_emotion: str,
send_result: Optional[Any] = None,
error_message: str = "",
) -> Dict[str, Any]:
@@ -293,7 +256,6 @@ def _build_send_emoji_monitor_metadata(
if send_result is not None:
result_lines = [
f"请求情绪:{requested_emotion or '未指定'}",
f"命中情绪:{send_result.matched_emotion or '未命中'}",
f"表情描述:{send_result.description or '无描述'}",
f"情绪标签:{''.join(send_result.emotions) if send_result.emotions else ''}",
@@ -306,10 +268,7 @@ def _build_send_emoji_monitor_metadata(
elif error_message.strip():
extra_sections.append({
"title": "表情发送结果",
"content": (
f"请求情绪:{requested_emotion or '未指定'}\n"
f"发送结果:{error_message.strip()}"
),
"content": f"发送结果:{error_message.strip()}",
})
if extra_sections:
@@ -322,7 +281,6 @@ def _build_send_emoji_monitor_metadata(
async def _select_emoji_with_sub_agent(
tool_ctx: BuiltinToolRuntimeContext,
requested_emotion: str,
reasoning: str,
context_texts: list[str],
sample_size: int,
@@ -347,14 +305,12 @@ async def _select_emoji_with_sub_agent(
f"一共 {len(sampled_emojis)} 个位置。\n"
f"每张小图左上角都有一个较大的序号,范围是 1 到 {len(sampled_emojis)}\n"
f"你的任务是根据上下文和当前语气,从这 {len(sampled_emojis)} 张图里选出最合适的一张表情包。\n"
"如果提供了 requested_emotion请优先考虑与其接近的候选如果没有完全匹配则选择最符合上下文语气的候选。\n"
"你必须返回一个 JSON 对象json object不要输出任何 JSON 之外的内容。\n"
'返回格式固定为:{"emoji_index":1,"reason":"简短理由"}'
)
prompt_message = ReferenceMessage(
content=(
f"[选择任务]\n"
f"requested_emotion: {requested_emotion or '未指定'}\n"
f"候选总数: {len(sampled_emojis)}\n"
f"拼图布局: {grid_rows}x{grid_columns}\n"
"请只输出 JSON。"
@@ -439,7 +395,6 @@ async def handle_tool(
"""执行 send_emoji 内置工具。"""
del context
emotion = str(invocation.arguments.get("emotion") or "").strip()
context_texts = [
message.processed_plain_text.strip()
for message in tool_ctx.runtime._chat_history[-5:]
@@ -450,23 +405,20 @@ async def handle_tool(
"message": "",
"description": "",
"emotion": [],
"requested_emotion": emotion,
"matched_emotion": "",
"reason": "",
}
selection_metadata: Dict[str, Any] = {"reason": "", "monitor_detail": {}}
logger.info(f"{tool_ctx.runtime.log_prefix} 触发表情包发送工具,请求情绪={emotion!r}")
logger.info(f"{tool_ctx.runtime.log_prefix} 触发表情包发送工具")
try:
send_result = await send_emoji_for_maisaka(
stream_id=tool_ctx.runtime.session_id,
requested_emotion=emotion,
reasoning=tool_ctx.engine.last_reasoning_content,
context_texts=context_texts,
emoji_selector=lambda requested_emotion, reasoning, context_texts, sample_size: _select_emoji_with_sub_agent(
emoji_selector=lambda _requested_emotion, reasoning, context_texts, sample_size: _select_emoji_with_sub_agent(
tool_ctx,
requested_emotion,
reasoning,
list(context_texts or []),
sample_size,
@@ -482,7 +434,6 @@ async def handle_tool(
structured_content=structured_result,
metadata=_build_send_emoji_monitor_metadata(
selection_metadata,
requested_emotion=emotion,
error_message=structured_result["message"],
),
)
@@ -493,7 +444,7 @@ async def handle_tool(
logger.info(
f"{tool_ctx.runtime.log_prefix} 表情包发送成功 "
f"描述={send_result.description!r} 情绪标签={send_result.emotions} "
f"请求情绪={emotion!r} 命中情绪={send_result.matched_emotion!r}"
f"命中情绪={send_result.matched_emotion!r}"
)
if send_result.sent_message is not None:
tool_ctx.append_sent_message_to_chat_history(send_result.sent_message)
@@ -509,7 +460,6 @@ async def handle_tool(
structured_content=structured_result,
metadata=_build_send_emoji_monitor_metadata(
selection_metadata,
requested_emotion=emotion,
send_result=send_result,
),
)
@@ -521,7 +471,7 @@ async def handle_tool(
logger.warning(
f"{tool_ctx.runtime.log_prefix} 表情包发送失败 "
f"请求情绪={emotion!r} 错误信息={send_result.message}"
f"错误信息={send_result.message}"
)
return tool_ctx.build_failure_result(
invocation.tool_name,
@@ -529,7 +479,6 @@ async def handle_tool(
structured_content=structured_result,
metadata=_build_send_emoji_monitor_metadata(
selection_metadata,
requested_emotion=emotion,
send_result=send_result,
),
)

View File

@@ -528,14 +528,12 @@ class MaisakaChatLoopService:
prompt_section: RenderableType | None = None
if global_config.debug.show_maisaka_thinking:
image_display_mode: str = "path_link" if global_config.maisaka.show_image_path else "legacy"
prompt_section = PromptCLIVisualizer.build_prompt_section(
built_messages,
category="planner" if request_kind != "timing_gate" else "timing_gate",
chat_id=self._session_id,
request_kind=request_kind,
selection_reason=selection_reason,
image_display_mode=image_display_mode,
folded=global_config.debug.fold_maisaka_thinking,
tool_definitions=list(all_tools),
)

View File

@@ -799,7 +799,7 @@ class PromptCLIVisualizer:
chat_id: str,
request_kind: str,
selection_reason: str,
image_display_mode: Literal["legacy", "path_link"],
image_display_mode: Literal["legacy", "path_link"] = "path_link",
tool_definitions: list[dict[str, Any]] | None = None,
) -> RenderableType:
"""构建用于查看完整 prompt 的折叠入口内容。"""
@@ -864,7 +864,7 @@ class PromptCLIVisualizer:
chat_id: str,
request_kind: str,
selection_reason: str,
image_display_mode: Literal["legacy", "path_link"],
image_display_mode: Literal["legacy", "path_link"] = "path_link",
folded: bool,
tool_definitions: list[dict[str, Any]] | None = None,
) -> Panel:
@@ -878,14 +878,10 @@ class PromptCLIVisualizer:
chat_id=chat_id,
request_kind=request_kind,
selection_reason=selection_reason,
image_display_mode=image_display_mode,
tool_definitions=tool_definitions,
)
else:
ordered_panels = cls.build_prompt_panels(
messages,
image_display_mode=image_display_mode,
)
ordered_panels = cls.build_prompt_panels(messages)
prompt_renderable = Group(*ordered_panels)
return Panel(
@@ -1102,11 +1098,9 @@ class PromptCLIVisualizer:
cls,
messages: list[Any],
*,
image_display_mode: Literal["legacy", "path_link"],
image_display_mode: Literal["legacy", "path_link"] = "path_link",
) -> List[Panel]:
"""构建完整 prompt 可视化面板。"""
if image_display_mode not in {mode.value for mode in PromptImageDisplayMode}:
image_display_mode = PromptImageDisplayMode.LEGACY
settings = PromptImageDisplaySettings(
display_mode=PromptImageDisplayMode(image_display_mode),
)

View File

@@ -1151,7 +1151,6 @@ class MaisakaHeartFlowChatting:
chat_id=self.session_id,
request_kind=labels["request_kind"],
selection_reason=subtitle,
image_display_mode="path_link" if global_config.maisaka.show_image_path else "legacy",
),
title=labels["prompt_title"],
border_style=border_style,

File diff suppressed because it is too large Load Diff

View File

@@ -2,54 +2,20 @@ from __future__ import annotations
import asyncio
import json
from typing import Any, Dict, List, Optional
from typing import Any, List, Optional
from json_repair import repair_json
from src.chat.utils.utils import is_bot_self
from src.common.message_repository import find_messages
from src.common.logger import get_logger
from src.common.message_repository import find_messages
from src.config.config import global_config
from src.memory_system.chat_history_summarizer import ChatHistorySummarizer
from src.person_info.person_info import Person, get_person_id, store_person_memory_from_answer
from src.services.llm_service import LLMServiceClient
logger = get_logger("memory_flow_service")
class LongTermMemorySessionManager:
def __init__(self) -> None:
self._lock = asyncio.Lock()
self._summarizers: Dict[str, ChatHistorySummarizer] = {}
async def on_message(self, message: Any) -> None:
if not bool(getattr(global_config.memory, "long_term_auto_summary_enabled", True)):
return
session_id = str(getattr(message, "session_id", "") or "").strip()
if not session_id:
return
created = False
async with self._lock:
summarizer = self._summarizers.get(session_id)
if summarizer is None:
summarizer = ChatHistorySummarizer(session_id=session_id)
self._summarizers[session_id] = summarizer
created = True
if created:
await summarizer.start()
async def shutdown(self) -> None:
async with self._lock:
items = list(self._summarizers.items())
self._summarizers.clear()
for session_id, summarizer in items:
try:
await summarizer.stop()
except Exception as exc:
logger.warning("停止聊天总结器失败: session=%s err=%s", session_id, exc)
class PersonFactWritebackService:
def __init__(self) -> None:
self._queue: asyncio.Queue[Any] = asyncio.Queue(maxsize=256)
@@ -123,7 +89,11 @@ class PersonFactWritebackService:
if not session_id:
return
person_name = str(getattr(target_person, "person_name", "") or getattr(target_person, "nickname", "") or "").strip()
person_name = str(
getattr(target_person, "person_name", "")
or getattr(target_person, "nickname", "")
or ""
).strip()
if not person_name:
return
@@ -242,7 +212,6 @@ class PersonFactWritebackService:
class MemoryAutomationService:
def __init__(self) -> None:
self.session_manager = LongTermMemorySessionManager()
self.fact_writeback = PersonFactWritebackService()
self._started = False
@@ -255,14 +224,13 @@ class MemoryAutomationService:
async def shutdown(self) -> None:
if not self._started:
return
await self.session_manager.shutdown()
await self.fact_writeback.shutdown()
self._started = False
async def on_incoming_message(self, message: Any) -> None:
del message
if not self._started:
await self.start()
await self.session_manager.on_message(message)
async def on_message_sent(self, message: Any) -> None:
if not self._started: