From adda11738e33fc76e03b6eca58bb48671a535de5 Mon Sep 17 00:00:00 2001 From: DawnARC Date: Thu, 7 May 2026 14:41:57 +0800 Subject: [PATCH 1/4] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E4=BA=BA=E7=89=A9?= =?UTF-8?q?=E7=94=BB=E5=83=8F=E6=B7=B7=E5=85=A5=E8=81=8A=E5=A4=A9=E6=91=98?= =?UTF-8?q?=E8=A6=81=E4=B8=8E=E6=9C=BA=E5=99=A8=E4=BA=BA=E8=BE=93=E5=87=BA?= =?UTF-8?q?=E4=BA=8B=E5=AE=9E=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...test_chat_summary_writeback_integration.py | 29 +---- .../test_memory_flow_service.py | 85 +++++++++---- .../test_person_profile_service.py | 115 +++++++++++++++++ .../core/utils/person_profile_service.py | 70 +++++++++- src/A_memorix/core/utils/summary_importer.py | 2 + src/person_info/person_info.py | 13 +- src/services/memory_flow_service.py | 120 ++++++++++++++++-- 7 files changed, 369 insertions(+), 65 deletions(-) create mode 100644 pytests/A_memorix_test/test_person_profile_service.py diff --git a/pytests/A_memorix_test/test_chat_summary_writeback_integration.py b/pytests/A_memorix_test/test_chat_summary_writeback_integration.py index 24c7ff4f..7618bca7 100644 --- a/pytests/A_memorix_test/test_chat_summary_writeback_integration.py +++ b/pytests/A_memorix_test/test_chat_summary_writeback_integration.py @@ -337,30 +337,11 @@ async def test_text_to_stream_triggers_real_chat_summary_writeback( else None ), ) - monkeypatch.setattr( - memory_flow_service_module.global_config.memory, - "chat_summary_writeback_enabled", - True, - raising=False, - ) - monkeypatch.setattr( - memory_flow_service_module.global_config.memory, - "chat_summary_writeback_message_threshold", - 2, - raising=False, - ) - monkeypatch.setattr( - memory_flow_service_module.global_config.memory, - "chat_summary_writeback_context_length", - 10, - raising=False, - ) - monkeypatch.setattr( - memory_flow_service_module.global_config.memory, - "person_fact_writeback_enabled", - False, - raising=False, - ) + integration_config = memory_flow_service_module.global_config.a_memorix.integration + monkeypatch.setattr(integration_config, "chat_summary_writeback_enabled", True, raising=False) + monkeypatch.setattr(integration_config, "chat_summary_writeback_message_threshold", 2, raising=False) + monkeypatch.setattr(integration_config, "chat_summary_writeback_context_length", 10, raising=False) + monkeypatch.setattr(integration_config, "person_fact_writeback_enabled", False, raising=False) await kernel.initialize() diff --git a/pytests/A_memorix_test/test_memory_flow_service.py b/pytests/A_memorix_test/test_memory_flow_service.py index 4699d0e5..98c9639d 100644 --- a/pytests/A_memorix_test/test_memory_flow_service.py +++ b/pytests/A_memorix_test/test_memory_flow_service.py @@ -5,6 +5,14 @@ import pytest from src.services import memory_flow_service as memory_flow_module +def _fake_global_config(**integration_values): + return SimpleNamespace( + a_memorix=SimpleNamespace( + integration=SimpleNamespace(**integration_values), + ) + ) + + def test_person_fact_parse_fact_list_deduplicates_and_filters_short_items(): raw = '["他喜欢猫", "他喜欢猫", "好", "", "他会弹吉他"]' @@ -38,6 +46,43 @@ def test_person_fact_resolve_target_person_for_private_chat(monkeypatch): assert person.person_id == "qq:123" +@pytest.mark.asyncio +async def test_person_fact_writeback_skips_bot_only_fact_without_user_evidence(monkeypatch): + stored_facts: list[tuple[str, str, str]] = [] + + class FakePerson: + person_id = "person-1" + person_name = "测试用户" + nickname = "测试用户" + is_known = True + + service = memory_flow_module.PersonFactWritebackService.__new__(memory_flow_module.PersonFactWritebackService) + service._resolve_target_person = lambda message: FakePerson() + + async def fake_extract_facts(person, reply_text, user_evidence_text): + del person, reply_text, user_evidence_text + return ["测试用户喜欢辣椒"] + + async def fake_store_person_memory_from_answer(person_name: str, memory_content: str, chat_id: str, **kwargs): + del kwargs + stored_facts.append((person_name, memory_content, chat_id)) + + service._extract_facts = fake_extract_facts + monkeypatch.setattr(memory_flow_module, "store_person_memory_from_answer", fake_store_person_memory_from_answer) + monkeypatch.setattr(memory_flow_module, "find_messages", lambda **kwargs: []) + + message = SimpleNamespace( + processed_plain_text="我记得你喜欢辣椒。", + session_id="session-1", + reply_to="", + session=SimpleNamespace(platform="qq", user_id="bot-1", group_id=""), + ) + + await service._handle_message(message) + + assert stored_facts == [] + + @pytest.mark.asyncio async def test_chat_summary_writeback_service_triggers_when_threshold_reached(monkeypatch): events: list[tuple[str, object]] = [] @@ -45,12 +90,10 @@ async def test_chat_summary_writeback_service_triggers_when_threshold_reached(mo monkeypatch.setattr( memory_flow_module, "global_config", - SimpleNamespace( - memory=SimpleNamespace( - chat_summary_writeback_enabled=True, - chat_summary_writeback_message_threshold=3, - chat_summary_writeback_context_length=7, - ) + _fake_global_config( + chat_summary_writeback_enabled=True, + chat_summary_writeback_message_threshold=3, + chat_summary_writeback_context_length=7, ), ) monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 5) @@ -94,12 +137,10 @@ async def test_chat_summary_writeback_service_skips_when_threshold_not_reached(m monkeypatch.setattr( memory_flow_module, "global_config", - SimpleNamespace( - memory=SimpleNamespace( - chat_summary_writeback_enabled=True, - chat_summary_writeback_message_threshold=6, - chat_summary_writeback_context_length=9, - ) + _fake_global_config( + chat_summary_writeback_enabled=True, + chat_summary_writeback_message_threshold=6, + chat_summary_writeback_context_length=9, ), ) monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 5) @@ -135,12 +176,10 @@ async def test_chat_summary_writeback_service_restores_previous_trigger_count(mo monkeypatch.setattr( memory_flow_module, "global_config", - SimpleNamespace( - memory=SimpleNamespace( - chat_summary_writeback_enabled=True, - chat_summary_writeback_message_threshold=3, - chat_summary_writeback_context_length=7, - ) + _fake_global_config( + chat_summary_writeback_enabled=True, + chat_summary_writeback_message_threshold=3, + chat_summary_writeback_context_length=7, ), ) monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 8) @@ -178,12 +217,10 @@ async def test_chat_summary_writeback_service_falls_back_to_current_count_for_le monkeypatch.setattr( memory_flow_module, "global_config", - SimpleNamespace( - memory=SimpleNamespace( - chat_summary_writeback_enabled=True, - chat_summary_writeback_message_threshold=3, - chat_summary_writeback_context_length=7, - ) + _fake_global_config( + chat_summary_writeback_enabled=True, + chat_summary_writeback_message_threshold=3, + chat_summary_writeback_context_length=7, ), ) monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 5) diff --git a/pytests/A_memorix_test/test_person_profile_service.py b/pytests/A_memorix_test/test_person_profile_service.py new file mode 100644 index 00000000..b75beb16 --- /dev/null +++ b/pytests/A_memorix_test/test_person_profile_service.py @@ -0,0 +1,115 @@ +from types import SimpleNamespace + +import pytest + +from src.A_memorix.core.utils.person_profile_service import PersonProfileService + + +class FakeMetadataStore: + def __init__(self) -> None: + self.snapshots: list[dict] = [] + + @staticmethod + def get_latest_person_profile_snapshot(person_id: str): + del person_id + return None + + @staticmethod + def get_relations(**kwargs): + del kwargs + return [] + + @staticmethod + def get_paragraphs_by_source(source: str): + if source == "person_fact:person-1": + return [ + { + "hash": "person-fact-1", + "content": "测试用户喜欢猫。", + "source": source, + "metadata": {"source_type": "person_fact"}, + "created_at": 2.0, + "updated_at": 2.0, + } + ] + return [] + + @staticmethod + def get_paragraph(hash_value: str): + if hash_value == "chat-summary-1": + return { + "hash": hash_value, + "content": "机器人建议测试用户以后叫星灯。", + "source": "chat_summary:session-1", + "metadata": {"source_type": "chat_summary"}, + "word_count": 1, + } + if hash_value == "person-fact-1": + return { + "hash": hash_value, + "content": "测试用户喜欢猫。", + "source": "person_fact:person-1", + "metadata": {"source_type": "person_fact"}, + "word_count": 1, + } + return None + + @staticmethod + def get_paragraph_stale_relation_marks_batch(paragraph_hashes): + del paragraph_hashes + return {} + + @staticmethod + def get_relation_status_batch(relation_hashes): + del relation_hashes + return {} + + @staticmethod + def get_person_profile_override(person_id: str): + del person_id + return None + + def upsert_person_profile_snapshot(self, **kwargs): + self.snapshots.append(kwargs) + return { + "person_id": kwargs["person_id"], + "profile_text": kwargs["profile_text"], + "aliases": kwargs["aliases"], + "relation_edges": kwargs["relation_edges"], + "vector_evidence": kwargs["vector_evidence"], + "evidence_ids": kwargs["evidence_ids"], + "updated_at": 1.0, + "expires_at": kwargs["expires_at"], + "source_note": kwargs["source_note"], + } + + +class FakeRetriever: + async def retrieve(self, query: str, top_k: int): + del query, top_k + return [ + SimpleNamespace( + hash_value="chat-summary-1", + result_type="paragraph", + score=0.95, + content="机器人建议测试用户以后叫星灯。", + metadata={"source_type": "chat_summary"}, + ) + ] + + +@pytest.mark.asyncio +async def test_person_profile_keeps_chat_summary_as_recent_interaction_not_stable_profile(): + metadata_store = FakeMetadataStore() + service = PersonProfileService(metadata_store=metadata_store, retriever=FakeRetriever()) + service.get_person_aliases = lambda person_id: (["测试用户"], "测试用户", []) + + payload = await service.query_person_profile(person_id="person-1", top_k=6, force_refresh=True) + + assert payload["success"] is True + profile_text = payload["profile_text"] + stable_section = profile_text.split("近期相关互动:", 1)[0] + assert "测试用户喜欢猫" in stable_section + assert "星灯" not in stable_section + assert "近期相关互动:" in profile_text + assert "星灯" in profile_text diff --git a/src/A_memorix/core/utils/person_profile_service.py b/src/A_memorix/core/utils/person_profile_service.py index 6215778b..14f3a943 100644 --- a/src/A_memorix/core/utils/person_profile_service.py +++ b/src/A_memorix/core/utils/person_profile_service.py @@ -340,11 +340,51 @@ class PersonProfileService: "type": "paragraph", "score": 1.1, "content": content[:220], - "metadata": {}, + "source": str(row.get("source", "") or source), + "metadata": dict(row.get("metadata", {}) or {}), } ) return self._filter_stale_paragraph_evidence(evidence) + @staticmethod + def _source_type_from_source(source: str) -> str: + token = str(source or "").strip() + if token.startswith("chat_summary:"): + return "chat_summary" + if token.startswith("person_fact:"): + return "person_fact" + return "" + + def _enrich_paragraph_evidence_metadata( + self, + paragraph_hash: str, + metadata: Dict[str, Any], + ) -> Tuple[Dict[str, Any], str]: + merged = dict(metadata or {}) + source = str(merged.get("source", "") or "").strip() + try: + paragraph = self.metadata_store.get_paragraph(paragraph_hash) + except Exception: + paragraph = None + if isinstance(paragraph, dict): + paragraph_metadata = paragraph.get("metadata", {}) or {} + if isinstance(paragraph_metadata, dict): + merged = {**paragraph_metadata, **merged} + source = source or str(paragraph.get("source", "") or "").strip() + source_type = str(merged.get("source_type", "") or "").strip() or self._source_type_from_source(source) + if source_type: + merged["source_type"] = source_type + if source: + merged["source"] = source + return merged, source + + @staticmethod + def _is_chat_summary_evidence(item: Dict[str, Any]) -> bool: + metadata = item.get("metadata", {}) if isinstance(item.get("metadata"), dict) else {} + source_type = str(metadata.get("source_type", "") or "").strip() + source = str(item.get("source", "") or metadata.get("source", "") or "").strip() + return source_type == "chat_summary" or source.startswith("chat_summary:") + def _filter_stale_paragraph_evidence( self, evidence: List[Dict[str, Any]], @@ -417,7 +457,8 @@ class PersonProfileService: "type": "paragraph", "score": 0.0, "content": str(para.get("content", ""))[:180], - "metadata": {}, + "source": str(para.get("source", "") or ""), + "metadata": dict(para.get("metadata", {}) or {}), } ) return self._filter_stale_paragraph_evidence(fallback[:top_k]) @@ -443,13 +484,18 @@ class PersonProfileService: if not h or h in seen_hash: continue seen_hash.add(h) + metadata, source = self._enrich_paragraph_evidence_metadata( + h, + dict(getattr(item, "metadata", {}) or {}), + ) evidence.append( { "hash": h, "type": str(getattr(item, "result_type", "")), "score": float(getattr(item, "score", 0.0) or 0.0), "content": str(getattr(item, "content", "") or "")[:220], - "metadata": dict(getattr(item, "metadata", {}) or {}), + "source": source, + "metadata": metadata, } ) evidence.sort(key=lambda x: x.get("score", 0.0), reverse=True) @@ -475,7 +521,7 @@ class PersonProfileService: lines.append(f"记忆特征: {'; '.join(memory_traits[:6])}") if relation_edges: - lines.append("关系证据:") + lines.append("稳定关系证据:") for rel in relation_edges[:6]: s = rel.get("subject", "") p = rel.get("predicate", "") @@ -483,9 +529,19 @@ class PersonProfileService: conf = float(rel.get("confidence", 0.0)) lines.append(f"- {s} {p} {o} (conf={conf:.2f})") - if vector_evidence: - lines.append("向量证据摘要:") - for item in vector_evidence[:4]: + stable_evidence = [item for item in vector_evidence if not self._is_chat_summary_evidence(item)] + recent_interactions = [item for item in vector_evidence if self._is_chat_summary_evidence(item)] + + if stable_evidence: + lines.append("稳定人物事实:") + for item in stable_evidence[:4]: + content = str(item.get("content", "")).strip() + if content: + lines.append(f"- {content}") + + if recent_interactions: + lines.append("近期相关互动:") + for item in recent_interactions[:2]: content = str(item.get("content", "")).strip() if content: lines.append(f"- {content}") diff --git a/src/A_memorix/core/utils/summary_importer.py b/src/A_memorix/core/utils/summary_importer.py index d2c18ed5..1c30b8df 100644 --- a/src/A_memorix/core/utils/summary_importer.py +++ b/src/A_memorix/core/utils/summary_importer.py @@ -43,6 +43,7 @@ SUMMARY_PROMPT_TEMPLATE = """ 请完成以下任务: 1. **生成总结**:以第三人称或机器人的视角,简洁明了地总结这段对话的主要内容、发生的事件或讨论的主题。 2. **提取实体与关系**:识别并提取对话中提到的重要实体以及它们之间的关系。 +3. **区分事实来源**:用户自己明确表达的稳定人物事实可以记录;机器人发言只能作为上下文,不能单独作为用户画像事实来源。 请严格以 JSON 格式输出,格式如下: {{ @@ -54,6 +55,7 @@ SUMMARY_PROMPT_TEMPLATE = """ }} 注意:总结应具有叙事性,能够作为长程记忆的一部分。直接使用实体的实际名称,不要使用 e1/e2 等代号。 +不要把机器人提出的建议、猜测、玩笑、承诺或复述,写成用户的稳定偏好、身份或长期事实。 """ diff --git a/src/person_info/person_info.py b/src/person_info/person_info.py index 90ac16af..467967e8 100644 --- a/src/person_info/person_info.py +++ b/src/person_info/person_info.py @@ -1,5 +1,5 @@ from datetime import datetime -from typing import Optional, Union +from typing import List, Optional, Union import hashlib import json @@ -506,7 +506,14 @@ class Person: logger.error(f"同步用户 {self.person_id} 信息到数据库时出错: {e}") -async def store_person_memory_from_answer(person_name: str, memory_content: str, chat_id: str) -> None: +async def store_person_memory_from_answer( + person_name: str, + memory_content: str, + chat_id: str, + *, + evidence_source: str = "user_supported", + evidence_message_ids: Optional[List[str]] = None, +) -> None: """将人物事实写入长期记忆系统。 Args: @@ -569,6 +576,8 @@ async def store_person_memory_from_answer(person_name: str, memory_content: str, "person_id": person_id, "person_name": participant_name, "writeback_source": "memory_flow_service", + "evidence_source": str(evidence_source or "user_supported"), + "evidence_message_ids": evidence_message_ids or [], }, respect_filter=True, user_id=session_user_id, diff --git a/src/services/memory_flow_service.py b/src/services/memory_flow_service.py index 24098de4..5d851feb 100644 --- a/src/services/memory_flow_service.py +++ b/src/services/memory_flow_service.py @@ -84,7 +84,12 @@ class PersonFactWritebackService: if target_person is None or not target_person.is_known: return - facts = await self._extract_facts(target_person, reply_text) + user_evidence_messages = self._collect_user_evidence_messages(message, target_person) + if not user_evidence_messages: + return + user_evidence_text = self._format_user_evidence(user_evidence_messages) + + facts = await self._extract_facts(target_person, reply_text, user_evidence_text) if not facts: return @@ -104,8 +109,19 @@ class PersonFactWritebackService: if not person_name: return + evidence_message_ids = [ + str(getattr(item, "message_id", "") or "").strip() + for item in user_evidence_messages + if str(getattr(item, "message_id", "") or "").strip() + ] for fact in facts: - await store_person_memory_from_answer(person_name, fact, session_id) + await store_person_memory_from_answer( + person_name, + fact, + session_id, + evidence_source="user_supported", + evidence_message_ids=evidence_message_ids, + ) def _resolve_target_person(self, message: Any) -> Optional[Person]: session = getattr(message, "session", None) @@ -140,22 +156,110 @@ class PersonFactWritebackService: person = Person(person_id=person_id) return person if person.is_known else None - async def _extract_facts(self, person: Person, reply_text: str) -> List[str]: + def _collect_user_evidence_messages(self, message: Any, person: Person) -> List[Any]: + session = getattr(message, "session", None) + session_id = str( + getattr(message, "session_id", "") + or getattr(session, "session_id", "") + or "" + ).strip() + if not session_id: + return [] + + evidence: List[Any] = [] + seen_ids = set() + + reply_to = str(getattr(message, "reply_to", "") or "").strip() + if reply_to: + try: + replies = find_messages(message_id=reply_to, limit=1) + except Exception as exc: + logger.debug("查询人物事实 reply_to 证据失败: %s", exc) + replies = [] + evidence.extend(self._filter_target_user_messages(replies, person, seen_ids)) + + if evidence: + return evidence[:3] + + timestamp = self._extract_message_timestamp(message) + try: + candidates = find_messages( + session_id=session_id, + before_time=timestamp, + limit=6, + limit_mode="latest", + filter_bot=True, + ) + except Exception as exc: + logger.debug("查询人物事实近期用户证据失败: %s", exc) + return [] + return self._filter_target_user_messages(candidates, person, seen_ids)[:3] + + @staticmethod + def _extract_message_timestamp(message: Any) -> float | None: + raw_timestamp = getattr(message, "timestamp", None) + if hasattr(raw_timestamp, "timestamp") and callable(raw_timestamp.timestamp): + try: + return float(raw_timestamp.timestamp()) + except Exception: + return None + if isinstance(raw_timestamp, (int, float)): + return float(raw_timestamp) + return None + + @staticmethod + def _filter_target_user_messages(messages: List[Any], person: Person, seen_ids: set) -> List[Any]: + filtered: List[Any] = [] + target_person_id = str(getattr(person, "person_id", "") or "").strip() + for item in messages: + platform = str(getattr(item, "platform", "") or "").strip() + user_info = getattr(getattr(item, "message_info", None), "user_info", None) + user_id = str(getattr(user_info, "user_id", "") or getattr(item, "user_id", "") or "").strip() + if not platform or not user_id or is_bot_self(platform, user_id): + continue + if target_person_id and get_person_id(platform, user_id) != target_person_id: + continue + text = str(getattr(item, "processed_plain_text", "") or "").strip() + if not text: + continue + message_id = str(getattr(item, "message_id", "") or "").strip() + dedup_key = message_id or f"{platform}:{user_id}:{text}" + if dedup_key in seen_ids: + continue + seen_ids.add(dedup_key) + filtered.append(item) + return filtered + + @staticmethod + def _format_user_evidence(messages: List[Any]) -> str: + lines: List[str] = [] + for item in messages[:3]: + text = str(getattr(item, "processed_plain_text", "") or "").strip() + if text: + lines.append(f"- {text}") + return "\n".join(lines) + + async def _extract_facts(self, person: Person, reply_text: str, user_evidence_text: str) -> List[str]: person_name = str(getattr(person, "person_name", "") or getattr(person, "nickname", "") or person.person_id) - prompt = f"""你要从一条机器人刚刚发送的回复中,提取“关于{person_name}的稳定事实”。 + prompt = f"""你要从用户原始发言中提取“关于{person_name}的稳定事实”。 目标人物:{person_name} +用户原始发言证据: +{user_evidence_text} + 机器人回复: {reply_text} 请只提取满足以下条件的事实: -1. 明确是关于目标人物本人的信息。 -2. 具有相对稳定性,可以作为长期记忆保存。 -3. 用简洁中文陈述句表达。 -4. 如果回复是在直接对目标人物说话,出现“你/你的/你自己”时,默认都指目标人物,请先改写成关于目标人物的第三人称事实再输出。 +1. 必须能被“用户原始发言证据”直接支持,不能只来自机器人回复。 +2. 明确是关于目标人物本人的信息。 +3. 具有相对稳定性,可以作为长期记忆保存。 +4. 用简洁中文陈述句表达。 +5. 如果用户原始发言中出现“我/我的/自己”,默认指目标人物,请先改写成关于目标人物的第三人称事实再输出。 不要提取: - 机器人的情绪、计划、临时动作、客套话 +- 仅由机器人提出的建议、猜测、玩笑、回忆或承诺 - 只适用于当前时刻的短期安排 - 不确定、猜测、反问 - 与目标人物无关的信息 From 2a7722f84e6851f3f68009e4a498fb7e26d3c4a2 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Thu, 7 May 2026 18:20:46 +0800 Subject: [PATCH 2/4] =?UTF-8?q?fix=EF=BC=9Alog=E9=94=99=E8=AF=AF=E5=88=86?= =?UTF-8?q?=E7=B1=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/maisaka/builtin_tool/send_emoji.py | 1 + src/maisaka/chat_loop_service.py | 19 ++++++++++++++++++- src/maisaka/display/display_utils.py | 2 ++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/maisaka/builtin_tool/send_emoji.py b/src/maisaka/builtin_tool/send_emoji.py index e55b3d05..cf5027c6 100644 --- a/src/maisaka/builtin_tool/send_emoji.py +++ b/src/maisaka/builtin_tool/send_emoji.py @@ -376,6 +376,7 @@ async def _select_emoji_with_sub_agent( context_message_limit=_EMOJI_SUB_AGENT_CONTEXT_LIMIT, system_prompt=system_prompt, extra_messages=[prompt_message, candidate_message], + request_kind="emotion", model_task_name=model_task_name, ) selection_duration_ms = round((datetime.now() - selection_started_at).total_seconds() * 1000, 2) diff --git a/src/maisaka/chat_loop_service.py b/src/maisaka/chat_loop_service.py index b9377478..76621584 100644 --- a/src/maisaka/chat_loop_service.py +++ b/src/maisaka/chat_loop_service.py @@ -45,6 +45,14 @@ REQUEST_TYPE_BY_REQUEST_KIND = { "planner": "maisaka_planner", "timing_gate": "maisaka_timing_gate", } +PROMPT_PREVIEW_CATEGORY_BY_REQUEST_KIND = { + "planner": "planner", + "timing_gate": "timing_gate", + "reply_effect_judge": "reply_effect_judge", + "expression_selector": "expression_selector", + "emotion": "emotion", + "sub_agent": "sub_agent", +} CONTEXT_SELECTION_CACHE_STABILITY_RATIO = 2.0 @@ -234,6 +242,15 @@ class MaisakaChatLoopService: f"maisaka_{normalized_request_kind}" if normalized_request_kind else "maisaka_planner", ) + @staticmethod + def _resolve_prompt_preview_category(request_kind: str) -> str: + """根据请求类型决定 Prompt 预览落盘目录,避免子代理混入 planner。""" + + normalized_request_kind = str(request_kind or "").strip().lower() + if not normalized_request_kind: + return "planner" + return PROMPT_PREVIEW_CATEGORY_BY_REQUEST_KIND.get(normalized_request_kind, normalized_request_kind) + def _get_llm_chat_client(self, request_kind: str) -> LLMServiceClient: """获取当前请求类型对应的 LLM 客户端。""" @@ -544,7 +561,7 @@ class MaisakaChatLoopService: if global_config.debug.show_maisaka_thinking: prompt_section_result = PromptCLIVisualizer.build_prompt_section_result( built_messages, - category="planner" if request_kind != "timing_gate" else "timing_gate", + category=self._resolve_prompt_preview_category(request_kind), chat_id=self._session_id, request_kind=request_kind, selection_reason=selection_reason, diff --git a/src/maisaka/display/display_utils.py b/src/maisaka/display/display_utils.py index 5f15ed7f..326eb2a3 100644 --- a/src/maisaka/display/display_utils.py +++ b/src/maisaka/display/display_utils.py @@ -8,6 +8,8 @@ _REQUEST_PANEL_STYLE_MAP: dict[str, tuple[str, str]] = { "timing_gate": ("MaiSaka 大模型请求 - Timing Gate 子代理", "bright_magenta"), "replyer": ("MaiSaka 回复器 Prompt", "bright_yellow"), "emotion": ("MaiSaka Emotion Tool Prompt", "bright_cyan"), + "expression_selector": ("MaiSaka 表达选择子代理 Prompt", "bright_yellow"), + "reply_effect_judge": ("MaiSaka 回复效果评分器 Prompt", "bright_red"), "sub_agent": ("MaiSaka 大模型请求 - 子代理", "bright_blue"), } From 827cdbd4415cf08754cc93bac891c6cb8b3bdc7a Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Thu, 7 May 2026 20:15:14 +0800 Subject: [PATCH 3/4] =?UTF-8?q?perf=EF=BC=9A=E4=BC=98=E5=8C=96=E9=BA=A6?= =?UTF-8?q?=E9=BA=A6=E8=A7=82=E5=AF=9F=E4=BD=93=E9=AA=8C=EF=BC=8C=E4=BC=98?= =?UTF-8?q?=E5=8C=96=E6=8E=A8=E7=90=86=E6=A3=80=E7=B4=A2=E4=BD=93=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboard/src/lib/maisaka-monitor-client.ts | 36 ++ dashboard/src/lib/reasoning-process-api.ts | 5 +- dashboard/src/lib/system-api.ts | 79 +++++ .../src/routes/monitor/maisaka-monitor.tsx | 162 +++++++-- .../src/routes/monitor/use-maisaka-monitor.ts | 91 ++++++ dashboard/src/routes/reasoning-process.tsx | 22 +- .../src/routes/settings/LocalCacheTab.tsx | 309 ++++++++++++++++++ dashboard/src/routes/settings/index.tsx | 13 +- prompts/zh-CN/maisaka_chat.prompt | 11 +- pytests/test_maisaka_timing_gate.py | 1 - src/config/config.py | 2 +- src/config/official_configs.py | 9 - src/main.py | 4 - src/maisaka/builtin_tool/continue_tool.py | 1 - src/maisaka/display/__init__.py | 4 - src/maisaka/display/stage_status_board.py | 132 +++----- src/maisaka/display/stage_status_viewer.py | 93 ------ src/maisaka/monitor_events.py | 78 +++++ src/maisaka/reasoning_engine.py | 77 +---- src/maisaka/runtime.py | 83 ++--- src/webui/routers/reasoning_process.py | 75 ++++- src/webui/routers/system.py | 286 +++++++++++++++- src/webui/routers/websocket/unified.py | 9 + 23 files changed, 1206 insertions(+), 376 deletions(-) create mode 100644 dashboard/src/routes/settings/LocalCacheTab.tsx delete mode 100644 src/maisaka/display/stage_status_viewer.py diff --git a/dashboard/src/lib/maisaka-monitor-client.ts b/dashboard/src/lib/maisaka-monitor-client.ts index 40935278..dedbe568 100644 --- a/dashboard/src/lib/maisaka-monitor-client.ts +++ b/dashboard/src/lib/maisaka-monitor-client.ts @@ -33,6 +33,29 @@ export interface SessionStartEvent { timestamp: number } +export interface StageStatusEvent { + session_id: string + session_name?: string + stage: string + detail: string + round_text: string + agent_state: string + stage_started_at: number + updated_at: number + timestamp: number +} + +export interface StageRemovedEvent { + session_id: string + session_name?: string + timestamp: number +} + +export interface StageSnapshotEvent { + entries: StageStatusEvent[] + timestamp: number +} + export interface MessageIngestedEvent { session_id: string speaker_name: string @@ -41,6 +64,15 @@ export interface MessageIngestedEvent { timestamp: number } +export interface MessageSentEvent { + session_id: string + speaker_name: string + content: string + message_id: string + source_kind?: string + timestamp: number +} + export interface CycleStartEvent { session_id: string cycle_id: number @@ -181,7 +213,11 @@ export interface ReplierResponseEvent { export type MaisakaMonitorEvent = | { type: 'session.start'; data: SessionStartEvent } + | { type: 'stage.status'; data: StageStatusEvent } + | { type: 'stage.removed'; data: StageRemovedEvent } + | { type: 'stage.snapshot'; data: StageSnapshotEvent } | { type: 'message.ingested'; data: MessageIngestedEvent } + | { type: 'message.sent'; data: MessageSentEvent } | { type: 'cycle.start'; data: CycleStartEvent } | { type: 'timing_gate.result'; data: TimingGateResultEvent } | { type: 'planner.request'; data: PlannerRequestEvent } diff --git a/dashboard/src/lib/reasoning-process-api.ts b/dashboard/src/lib/reasoning-process-api.ts index ff8cc8fd..c565d398 100644 --- a/dashboard/src/lib/reasoning-process-api.ts +++ b/dashboard/src/lib/reasoning-process-api.ts @@ -22,6 +22,7 @@ export type ReasoningPromptListResponse = { page_size: number stages: string[] sessions: string[] + selected_session: string } export type ReasoningPromptContentResponse = { @@ -43,8 +44,8 @@ export async function listReasoningPromptFiles( params: ReasoningPromptListParams ): Promise { const queryParams = new URLSearchParams() - queryParams.set('stage', params.stage ?? 'all') - queryParams.set('session', params.session ?? 'all') + queryParams.set('stage', params.stage ?? 'planner') + queryParams.set('session', params.session ?? 'auto') queryParams.set('search', params.search ?? '') queryParams.set('page', String(params.page ?? 1)) queryParams.set('page_size', String(params.pageSize ?? 50)) diff --git a/dashboard/src/lib/system-api.ts b/dashboard/src/lib/system-api.ts index ee7dd758..7a3c333e 100644 --- a/dashboard/src/lib/system-api.ts +++ b/dashboard/src/lib/system-api.ts @@ -51,6 +51,50 @@ export interface DashboardVersionStatus { pypi_url: string } +export interface CacheDirectoryStats { + key: string + label: string + path: string + exists: boolean + file_count: number + total_size: number + db_records: number +} + +export interface DatabaseFileStats { + path: string + exists: boolean + size: number +} + +export interface DatabaseTableStats { + name: string + rows: number +} + +export interface DatabaseStorageStats { + files: DatabaseFileStats[] + tables: DatabaseTableStats[] + total_size: number +} + +export interface LocalCacheStats { + directories: CacheDirectoryStats[] + database: DatabaseStorageStats +} + +export interface LocalCacheCleanupResult { + success: boolean + message: string + target: 'images' | 'emoji' | 'logs' + removed_files: number + removed_bytes: number + removed_records: number +} + +export type LocalCacheCleanupTarget = LocalCacheCleanupResult['target'] +export type LogCleanupTable = 'llm_usage' | 'tool_records' | 'mai_messages' + /** * 检查 WebUI 是否有 PyPI 新版本 */ @@ -70,3 +114,38 @@ export async function getDashboardVersionStatus( return await response.json() } + +export async function getLocalCacheStats(): Promise { + const response = await fetchWithAuth('/api/webui/system/local-cache', { + method: 'GET', + headers: getAuthHeaders(), + }) + + if (!response.ok) { + const error = await response.json() + throw new Error(error.detail || '获取本地缓存统计失败') + } + + return await response.json() +} + +export async function cleanupLocalCache( + target: LocalCacheCleanupTarget, + tables: LogCleanupTable[] = [] +): Promise { + const response = await fetchWithAuth('/api/webui/system/local-cache/cleanup', { + method: 'POST', + headers: { + ...getAuthHeaders(), + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ target, tables }), + }) + + if (!response.ok) { + const error = await response.json() + throw new Error(error.detail || '清理本地缓存失败') + } + + return await response.json() +} diff --git a/dashboard/src/routes/monitor/maisaka-monitor.tsx b/dashboard/src/routes/monitor/maisaka-monitor.tsx index 53992389..95e286b6 100644 --- a/dashboard/src/routes/monitor/maisaka-monitor.tsx +++ b/dashboard/src/routes/monitor/maisaka-monitor.tsx @@ -38,13 +38,14 @@ import type { CycleStartEvent, MaisakaToolCall, MessageIngestedEvent, + MessageSentEvent, PlannerFinalizedEvent, PlannerResponseEvent, ReplierResponseEvent, TimingGateResultEvent, ToolExecutionEvent, } from '@/lib/maisaka-monitor-client' -import type { SessionInfo, TimelineEntry } from './use-maisaka-monitor' +import type { SessionInfo, StageStatusInfo, TimelineEntry } from './use-maisaka-monitor' import { useMaisakaMonitor } from './use-maisaka-monitor' // ─── 工具函数 ────────────────────────────────────────────────── @@ -78,11 +79,13 @@ function formatRelativeTime(ts: number): string { function SessionSidebar({ sessions, + stageStatuses, selectedSession, onSelect, collapsed, }: { sessions: Map + stageStatuses: Map selectedSession: string | null onSelect: (id: string) => void collapsed: boolean @@ -110,31 +113,36 @@ function SessionSidebar({ return (
- {sortedSessions.map((session) => ( + {sortedSessions.map((session) => { + const status = stageStatuses.get(session.sessionId) + return ( - ))} + ) + })}
) } // ─── 单条时间线事件渲染 ────────────────────────────────────── +function StageStatusPanel({ status }: { status?: StageStatusInfo }) { + if (!status) { + return ( +
+ 当前聊天流暂无阶段状态 +
+ ) + } + + return ( +
+
+ + + {status.stage || '未知阶段'} + + {status.roundText && ( + + {status.roundText} + + )} + {status.agentState && ( + + {status.agentState} + + )} + + 更新于 {formatRelativeTime(status.updatedAt)} + +
+ {status.detail && ( +

{status.detail}

+ )} +
+ ) +} + function MessageIngestedCard({ data }: { data: MessageIngestedEvent }) { return (
@@ -172,6 +221,26 @@ function MessageIngestedCard({ data }: { data: MessageIngestedEvent }) { ) } +function MessageSentCard({ data }: { data: MessageSentEvent }) { + return ( +
+
+ +
+
+
+ {data.speaker_name || '麦麦'} + 已发送 + {formatTimestamp(data.timestamp)} +
+

+ {data.content || '[非文本消息]'} +

+
+
+ ) +} + function CycleStartCard({ data }: { data: CycleStartEvent }) { return (
@@ -201,7 +270,7 @@ function TimingGateCard({ data }: { data: TimingGateResultEvent }) { const Icon = config.icon return ( -
+
@@ -330,11 +399,26 @@ function PlannerToolCallsBlock({ data }: { data: PlannerFinalizedEvent }) { duration_ms: 0, summary: '', })) + const isFinishTool = (toolName?: string) => toolName?.trim().toLowerCase() === 'finish' + const finishTools = displayTools.filter((tool) => isFinishTool(tool.tool_name)) + const regularTools = displayTools.filter((tool) => !isFinishTool(tool.tool_name)) if (displayTools.length <= 0) { return null } + if (regularTools.length <= 0 && finishTools.length > 0) { + return ( +
+
+ + 本轮思考暂时结束 + 等待新的消息。 +
+
+ ) + } + return ( @@ -342,11 +426,18 @@ function PlannerToolCallsBlock({ data }: { data: PlannerFinalizedEvent }) { Planner 工具调用 - {displayTools.length} 个 + {regularTools.length} 个
+ {finishTools.length > 0 && ( +
+ + 本轮思考暂时结束 + 等待新的消息。 +
+ )}
- {displayTools.map((tool, idx) => ( + {regularTools.map((tool, idx) => (
a + b, 0) return ( -
-
- -
-
- 循环结束 +
+ +
+ + 循环结束 - 总耗时 {formatMs(totalTime * 1000)} + #{data.cycle_id} - {Object.entries(data.time_records).map(([name, duration]) => ( - - {name}: {formatMs(duration * 1000)} - - ))} + {formatMs(totalTime * 1000)}
+
) } @@ -551,6 +638,8 @@ function TimelineEventRenderer({ switch (entry.type) { case 'message.ingested': return + case 'message.sent': + return case 'cycle.start': if (!showCycleMarkers) return null return @@ -559,6 +648,9 @@ function TimelineEventRenderer({ case 'planner.response': return case 'planner.finalized': + if ((entry.data as PlannerFinalizedEvent).timing_gate?.result?.action !== 'continue') { + return null + } return (
@@ -583,6 +675,7 @@ export function MaisakaMonitor() { const { timeline, sessions, + stageStatuses, selectedSession, setSelectedSession, connected, @@ -629,7 +722,7 @@ export function MaisakaMonitor() { // 统计当前会话的各事件类型计数 const stats = { - messages: timeline.filter((e) => e.type === 'message.ingested').length, + messages: timeline.filter((e) => e.type === 'message.ingested' || e.type === 'message.sent').length, cycles: timeline.filter((e) => e.type === 'cycle.start').length, toolCalls: timeline.reduce((count, entry) => { if (entry.type === 'tool.execution') { @@ -641,6 +734,7 @@ export function MaisakaMonitor() { return count }, 0), } + const selectedStageStatus = selectedSession ? stageStatuses.get(selectedSession) : undefined return (
@@ -674,6 +768,7 @@ export function MaisakaMonitor() { {/* 时间线 */} + + { const continuedTimingGateCycles = new Set() + const stoppedTimingGateCycles = new Set() return timeline.map((entry) => { if (entry.type === 'timing_gate.result') { const data = entry.data as TimingGateResultEvent if (data.action === 'continue') { continuedTimingGateCycles.add(buildCycleKey(data.session_id, data.cycle_id)) + } else { + stoppedTimingGateCycles.add(buildCycleKey(data.session_id, data.cycle_id)) } } if (entry.type === 'planner.response' || entry.type === 'planner.finalized') { const data = entry.data as PlannerResponseEvent | PlannerFinalizedEvent - if (!continuedTimingGateCycles.has(buildCycleKey(data.session_id, data.cycle_id))) { + const cycleKey = buildCycleKey(data.session_id, data.cycle_id) + if (stoppedTimingGateCycles.has(cycleKey) || !continuedTimingGateCycles.has(cycleKey)) { return null } } @@ -784,9 +885,6 @@ export function MaisakaMonitor() { className="animate-in fade-in-0 slide-in-from-bottom-2 duration-300" > {rendered} - {entry.type === 'cycle.end' && ( - - )}
) }) diff --git a/dashboard/src/routes/monitor/use-maisaka-monitor.ts b/dashboard/src/routes/monitor/use-maisaka-monitor.ts index 64b07b88..6ca75350 100644 --- a/dashboard/src/routes/monitor/use-maisaka-monitor.ts +++ b/dashboard/src/routes/monitor/use-maisaka-monitor.ts @@ -35,6 +35,17 @@ export interface SessionInfo { eventCount: number } +export interface StageStatusInfo { + sessionId: string + sessionName?: string + stage: string + detail: string + roundText: string + agentState: string + stageStartedAt: number + updatedAt: number +} + /** 前端内存中最多恢复/展示的时间线条目数,避免一次渲染过多节点。 */ const MAX_TIMELINE_ENTRIES = 3000 /** IndexedDB 中最多持久化的时间线条目数。 */ @@ -78,6 +89,7 @@ function resolveSessionDisplayName({ let entryCounter = 0 let cachedTimeline: TimelineEntry[] = [] let cachedSessions: Map = new Map() +let cachedStageStatuses: Map = new Map() let cachedSelectedSession: string | null = null let cachedConnected = false let backgroundCollectionEnabled = false @@ -121,6 +133,23 @@ interface MaisakaMonitorDb extends DBSchema { } } +function toStageStatusInfo(raw: Record): StageStatusInfo | null { + const sessionId = typeof raw.session_id === 'string' ? raw.session_id : '' + if (!sessionId) { + return null + } + return { + sessionId, + sessionName: typeof raw.session_name === 'string' ? raw.session_name : undefined, + stage: typeof raw.stage === 'string' ? raw.stage : '', + detail: typeof raw.detail === 'string' ? raw.detail : '', + roundText: typeof raw.round_text === 'string' ? raw.round_text : '', + agentState: typeof raw.agent_state === 'string' ? raw.agent_state : '', + stageStartedAt: typeof raw.stage_started_at === 'number' ? raw.stage_started_at : Date.now() / 1000, + updatedAt: typeof raw.updated_at === 'number' ? raw.updated_at : Date.now() / 1000, + } +} + function notifyStoreListeners() { storeListeners.forEach((listener) => listener()) } @@ -359,15 +388,72 @@ function updateSessionInfo(event: MaisakaMonitorEvent, sessionId: string, timest cachedSessions = next } +function updateStageStatus(event: MaisakaMonitorEvent) { + if (event.type === 'stage.snapshot') { + const rawEntries = (event.data as unknown as Record).entries + if (!Array.isArray(rawEntries)) { + return + } + const next = new Map(cachedStageStatuses) + for (const rawEntry of rawEntries) { + if (!rawEntry || typeof rawEntry !== 'object') { + continue + } + const status = toStageStatusInfo(rawEntry as Record) + if (status) { + next.set(status.sessionId, status) + } + } + cachedStageStatuses = next + return + } + + if (event.type === 'stage.status') { + const status = toStageStatusInfo(event.data as unknown as Record) + if (!status) { + return + } + const next = new Map(cachedStageStatuses) + next.set(status.sessionId, status) + cachedStageStatuses = next + return + } + + if (event.type === 'stage.removed') { + const dataRecord = event.data as unknown as Record + const sessionId = typeof dataRecord.session_id === 'string' ? dataRecord.session_id : '' + if (!sessionId) { + return + } + const next = new Map(cachedStageStatuses) + next.delete(sessionId) + cachedStageStatuses = next + } +} + function handleMonitorEvent(event: MaisakaMonitorEvent) { const dataRecord = event.data as unknown as Record const sessionId = dataRecord.session_id as string const timestamp = dataRecord.timestamp as number + if (event.type === 'stage.snapshot') { + updateStageStatus(event) + notifyStoreListeners() + return + } + if (!sessionId || typeof timestamp !== 'number') { return } + if (event.type === 'stage.status' || event.type === 'stage.removed') { + updateStageStatus(event) + updateSessionInfo(event, sessionId, timestamp) + schedulePersistMonitorSnapshot(undefined, sessionId) + notifyStoreListeners() + return + } + const entry: TimelineEntry = { id: `evt_${++entryCounter}_${Date.now()}`, type: event.type, @@ -435,6 +521,7 @@ function stopMonitorSubscriptionIfIdle() { export function useMaisakaMonitor() { const [timeline, setTimeline] = useState(cachedTimeline) const [sessions, setSessions] = useState>(new Map(cachedSessions)) + const [stageStatuses, setStageStatuses] = useState>(new Map(cachedStageStatuses)) const [selectedSession, setSelectedSessionState] = useState(cachedSelectedSession) const [connected, setConnected] = useState(cachedConnected) const [backgroundCollection, setBackgroundCollection] = useState(loadBackgroundCollectionPreference) @@ -445,6 +532,7 @@ export function useMaisakaMonitor() { const syncFromStore = () => { setTimeline(cachedTimeline) setSessions(new Map(cachedSessions)) + setStageStatuses(new Map(cachedStageStatuses)) setSelectedSessionState(cachedSelectedSession) setConnected(cachedConnected) setBackgroundCollection(backgroundCollectionEnabled) @@ -462,9 +550,11 @@ export function useMaisakaMonitor() { const clearTimeline = useCallback(() => { cachedTimeline = [] cachedSessions = new Map() + cachedStageStatuses = new Map() cachedSelectedSession = null setTimeline([]) setSessions(new Map()) + setStageStatuses(new Map()) setSelectedSessionState(null) pendingPersistEntries = [] pendingPersistSessionIds = new Set() @@ -504,6 +594,7 @@ export function useMaisakaMonitor() { timeline: filteredTimeline, allTimeline: timeline, sessions, + stageStatuses, selectedSession, setSelectedSession, connected, diff --git a/dashboard/src/routes/reasoning-process.tsx b/dashboard/src/routes/reasoning-process.tsx index 73ad1b50..b40c75e3 100644 --- a/dashboard/src/routes/reasoning-process.tsx +++ b/dashboard/src/routes/reasoning-process.tsx @@ -29,6 +29,7 @@ import { import { cn } from '@/lib/utils' const PAGE_SIZE = 50 +const AUTO_SESSION = 'auto' function formatTime(timestamp: number | null, modifiedAt: number): string { const value = timestamp ? timestamp : modifiedAt * 1000 @@ -51,8 +52,8 @@ export function ReasoningProcessPage() { const [items, setItems] = useState([]) const [stages, setStages] = useState([]) const [sessions, setSessions] = useState([]) - const [stage, setStage] = useState('all') - const [session, setSession] = useState('all') + const [stage, setStage] = useState('planner') + const [session, setSession] = useState(AUTO_SESSION) const [search, setSearch] = useState('') const [page, setPage] = useState(1) const [refreshKey, setRefreshKey] = useState(0) @@ -85,6 +86,9 @@ export function ReasoningProcessPage() { setItems(data.items) setStages(data.stages) setSessions(data.sessions) + if (data.selected_session && data.selected_session !== session) { + setSession(data.selected_session) + } setTotal(data.total) setSelected((current) => { if ( @@ -185,7 +189,8 @@ export function ReasoningProcessPage() { onValueChange={(value) => resetToFirstPage(() => { setStage(value) - setSession('all') + setSession(AUTO_SESSION) + setSelected(null) }) } > @@ -193,7 +198,11 @@ export function ReasoningProcessPage() { - 全部阶段 + {!stages.includes(stage) && ( + + {stage} + + )} {stages.map((item) => ( {item} @@ -205,12 +214,15 @@ export function ReasoningProcessPage() {