From c6e9959474a7c26cf8f4ac634e42628c7ed22572 Mon Sep 17 00:00:00 2001 From: DawnARC Date: Thu, 7 May 2026 14:41:57 +0800 Subject: [PATCH 1/9] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E4=BA=BA=E7=89=A9?= =?UTF-8?q?=E7=94=BB=E5=83=8F=E6=B7=B7=E5=85=A5=E8=81=8A=E5=A4=A9=E6=91=98?= =?UTF-8?q?=E8=A6=81=E4=B8=8E=E6=9C=BA=E5=99=A8=E4=BA=BA=E8=BE=93=E5=87=BA?= =?UTF-8?q?=E4=BA=8B=E5=AE=9E=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...test_chat_summary_writeback_integration.py | 29 +---- .../test_memory_flow_service.py | 85 +++++++++---- .../test_person_profile_service.py | 115 +++++++++++++++++ .../core/utils/person_profile_service.py | 70 +++++++++- src/A_memorix/core/utils/summary_importer.py | 2 + src/person_info/person_info.py | 13 +- src/services/memory_flow_service.py | 120 ++++++++++++++++-- 7 files changed, 369 insertions(+), 65 deletions(-) create mode 100644 pytests/A_memorix_test/test_person_profile_service.py diff --git a/pytests/A_memorix_test/test_chat_summary_writeback_integration.py b/pytests/A_memorix_test/test_chat_summary_writeback_integration.py index 24c7ff4f..7618bca7 100644 --- a/pytests/A_memorix_test/test_chat_summary_writeback_integration.py +++ b/pytests/A_memorix_test/test_chat_summary_writeback_integration.py @@ -337,30 +337,11 @@ async def test_text_to_stream_triggers_real_chat_summary_writeback( else None ), ) - monkeypatch.setattr( - memory_flow_service_module.global_config.memory, - "chat_summary_writeback_enabled", - True, - raising=False, - ) - monkeypatch.setattr( - memory_flow_service_module.global_config.memory, - "chat_summary_writeback_message_threshold", - 2, - raising=False, - ) - monkeypatch.setattr( - memory_flow_service_module.global_config.memory, - "chat_summary_writeback_context_length", - 10, - raising=False, - ) - monkeypatch.setattr( - memory_flow_service_module.global_config.memory, - "person_fact_writeback_enabled", - False, - raising=False, - ) + integration_config = memory_flow_service_module.global_config.a_memorix.integration + monkeypatch.setattr(integration_config, "chat_summary_writeback_enabled", True, raising=False) + monkeypatch.setattr(integration_config, "chat_summary_writeback_message_threshold", 2, raising=False) + monkeypatch.setattr(integration_config, "chat_summary_writeback_context_length", 10, raising=False) + monkeypatch.setattr(integration_config, "person_fact_writeback_enabled", False, raising=False) await kernel.initialize() diff --git a/pytests/A_memorix_test/test_memory_flow_service.py b/pytests/A_memorix_test/test_memory_flow_service.py index 4699d0e5..98c9639d 100644 --- a/pytests/A_memorix_test/test_memory_flow_service.py +++ b/pytests/A_memorix_test/test_memory_flow_service.py @@ -5,6 +5,14 @@ import pytest from src.services import memory_flow_service as memory_flow_module +def _fake_global_config(**integration_values): + return SimpleNamespace( + a_memorix=SimpleNamespace( + integration=SimpleNamespace(**integration_values), + ) + ) + + def test_person_fact_parse_fact_list_deduplicates_and_filters_short_items(): raw = '["他喜欢猫", "他喜欢猫", "好", "", "他会弹吉他"]' @@ -38,6 +46,43 @@ def test_person_fact_resolve_target_person_for_private_chat(monkeypatch): assert person.person_id == "qq:123" +@pytest.mark.asyncio +async def test_person_fact_writeback_skips_bot_only_fact_without_user_evidence(monkeypatch): + stored_facts: list[tuple[str, str, str]] = [] + + class FakePerson: + person_id = "person-1" + person_name = "测试用户" + nickname = "测试用户" + is_known = True + + service = memory_flow_module.PersonFactWritebackService.__new__(memory_flow_module.PersonFactWritebackService) + service._resolve_target_person = lambda message: FakePerson() + + async def fake_extract_facts(person, reply_text, user_evidence_text): + del person, reply_text, user_evidence_text + return ["测试用户喜欢辣椒"] + + async def fake_store_person_memory_from_answer(person_name: str, memory_content: str, chat_id: str, **kwargs): + del kwargs + stored_facts.append((person_name, memory_content, chat_id)) + + service._extract_facts = fake_extract_facts + monkeypatch.setattr(memory_flow_module, "store_person_memory_from_answer", fake_store_person_memory_from_answer) + monkeypatch.setattr(memory_flow_module, "find_messages", lambda **kwargs: []) + + message = SimpleNamespace( + processed_plain_text="我记得你喜欢辣椒。", + session_id="session-1", + reply_to="", + session=SimpleNamespace(platform="qq", user_id="bot-1", group_id=""), + ) + + await service._handle_message(message) + + assert stored_facts == [] + + @pytest.mark.asyncio async def test_chat_summary_writeback_service_triggers_when_threshold_reached(monkeypatch): events: list[tuple[str, object]] = [] @@ -45,12 +90,10 @@ async def test_chat_summary_writeback_service_triggers_when_threshold_reached(mo monkeypatch.setattr( memory_flow_module, "global_config", - SimpleNamespace( - memory=SimpleNamespace( - chat_summary_writeback_enabled=True, - chat_summary_writeback_message_threshold=3, - chat_summary_writeback_context_length=7, - ) + _fake_global_config( + chat_summary_writeback_enabled=True, + chat_summary_writeback_message_threshold=3, + chat_summary_writeback_context_length=7, ), ) monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 5) @@ -94,12 +137,10 @@ async def test_chat_summary_writeback_service_skips_when_threshold_not_reached(m monkeypatch.setattr( memory_flow_module, "global_config", - SimpleNamespace( - memory=SimpleNamespace( - chat_summary_writeback_enabled=True, - chat_summary_writeback_message_threshold=6, - chat_summary_writeback_context_length=9, - ) + _fake_global_config( + chat_summary_writeback_enabled=True, + chat_summary_writeback_message_threshold=6, + chat_summary_writeback_context_length=9, ), ) monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 5) @@ -135,12 +176,10 @@ async def test_chat_summary_writeback_service_restores_previous_trigger_count(mo monkeypatch.setattr( memory_flow_module, "global_config", - SimpleNamespace( - memory=SimpleNamespace( - chat_summary_writeback_enabled=True, - chat_summary_writeback_message_threshold=3, - chat_summary_writeback_context_length=7, - ) + _fake_global_config( + chat_summary_writeback_enabled=True, + chat_summary_writeback_message_threshold=3, + chat_summary_writeback_context_length=7, ), ) monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 8) @@ -178,12 +217,10 @@ async def test_chat_summary_writeback_service_falls_back_to_current_count_for_le monkeypatch.setattr( memory_flow_module, "global_config", - SimpleNamespace( - memory=SimpleNamespace( - chat_summary_writeback_enabled=True, - chat_summary_writeback_message_threshold=3, - chat_summary_writeback_context_length=7, - ) + _fake_global_config( + chat_summary_writeback_enabled=True, + chat_summary_writeback_message_threshold=3, + chat_summary_writeback_context_length=7, ), ) monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 5) diff --git a/pytests/A_memorix_test/test_person_profile_service.py b/pytests/A_memorix_test/test_person_profile_service.py new file mode 100644 index 00000000..b75beb16 --- /dev/null +++ b/pytests/A_memorix_test/test_person_profile_service.py @@ -0,0 +1,115 @@ +from types import SimpleNamespace + +import pytest + +from src.A_memorix.core.utils.person_profile_service import PersonProfileService + + +class FakeMetadataStore: + def __init__(self) -> None: + self.snapshots: list[dict] = [] + + @staticmethod + def get_latest_person_profile_snapshot(person_id: str): + del person_id + return None + + @staticmethod + def get_relations(**kwargs): + del kwargs + return [] + + @staticmethod + def get_paragraphs_by_source(source: str): + if source == "person_fact:person-1": + return [ + { + "hash": "person-fact-1", + "content": "测试用户喜欢猫。", + "source": source, + "metadata": {"source_type": "person_fact"}, + "created_at": 2.0, + "updated_at": 2.0, + } + ] + return [] + + @staticmethod + def get_paragraph(hash_value: str): + if hash_value == "chat-summary-1": + return { + "hash": hash_value, + "content": "机器人建议测试用户以后叫星灯。", + "source": "chat_summary:session-1", + "metadata": {"source_type": "chat_summary"}, + "word_count": 1, + } + if hash_value == "person-fact-1": + return { + "hash": hash_value, + "content": "测试用户喜欢猫。", + "source": "person_fact:person-1", + "metadata": {"source_type": "person_fact"}, + "word_count": 1, + } + return None + + @staticmethod + def get_paragraph_stale_relation_marks_batch(paragraph_hashes): + del paragraph_hashes + return {} + + @staticmethod + def get_relation_status_batch(relation_hashes): + del relation_hashes + return {} + + @staticmethod + def get_person_profile_override(person_id: str): + del person_id + return None + + def upsert_person_profile_snapshot(self, **kwargs): + self.snapshots.append(kwargs) + return { + "person_id": kwargs["person_id"], + "profile_text": kwargs["profile_text"], + "aliases": kwargs["aliases"], + "relation_edges": kwargs["relation_edges"], + "vector_evidence": kwargs["vector_evidence"], + "evidence_ids": kwargs["evidence_ids"], + "updated_at": 1.0, + "expires_at": kwargs["expires_at"], + "source_note": kwargs["source_note"], + } + + +class FakeRetriever: + async def retrieve(self, query: str, top_k: int): + del query, top_k + return [ + SimpleNamespace( + hash_value="chat-summary-1", + result_type="paragraph", + score=0.95, + content="机器人建议测试用户以后叫星灯。", + metadata={"source_type": "chat_summary"}, + ) + ] + + +@pytest.mark.asyncio +async def test_person_profile_keeps_chat_summary_as_recent_interaction_not_stable_profile(): + metadata_store = FakeMetadataStore() + service = PersonProfileService(metadata_store=metadata_store, retriever=FakeRetriever()) + service.get_person_aliases = lambda person_id: (["测试用户"], "测试用户", []) + + payload = await service.query_person_profile(person_id="person-1", top_k=6, force_refresh=True) + + assert payload["success"] is True + profile_text = payload["profile_text"] + stable_section = profile_text.split("近期相关互动:", 1)[0] + assert "测试用户喜欢猫" in stable_section + assert "星灯" not in stable_section + assert "近期相关互动:" in profile_text + assert "星灯" in profile_text diff --git a/src/A_memorix/core/utils/person_profile_service.py b/src/A_memorix/core/utils/person_profile_service.py index 6215778b..14f3a943 100644 --- a/src/A_memorix/core/utils/person_profile_service.py +++ b/src/A_memorix/core/utils/person_profile_service.py @@ -340,11 +340,51 @@ class PersonProfileService: "type": "paragraph", "score": 1.1, "content": content[:220], - "metadata": {}, + "source": str(row.get("source", "") or source), + "metadata": dict(row.get("metadata", {}) or {}), } ) return self._filter_stale_paragraph_evidence(evidence) + @staticmethod + def _source_type_from_source(source: str) -> str: + token = str(source or "").strip() + if token.startswith("chat_summary:"): + return "chat_summary" + if token.startswith("person_fact:"): + return "person_fact" + return "" + + def _enrich_paragraph_evidence_metadata( + self, + paragraph_hash: str, + metadata: Dict[str, Any], + ) -> Tuple[Dict[str, Any], str]: + merged = dict(metadata or {}) + source = str(merged.get("source", "") or "").strip() + try: + paragraph = self.metadata_store.get_paragraph(paragraph_hash) + except Exception: + paragraph = None + if isinstance(paragraph, dict): + paragraph_metadata = paragraph.get("metadata", {}) or {} + if isinstance(paragraph_metadata, dict): + merged = {**paragraph_metadata, **merged} + source = source or str(paragraph.get("source", "") or "").strip() + source_type = str(merged.get("source_type", "") or "").strip() or self._source_type_from_source(source) + if source_type: + merged["source_type"] = source_type + if source: + merged["source"] = source + return merged, source + + @staticmethod + def _is_chat_summary_evidence(item: Dict[str, Any]) -> bool: + metadata = item.get("metadata", {}) if isinstance(item.get("metadata"), dict) else {} + source_type = str(metadata.get("source_type", "") or "").strip() + source = str(item.get("source", "") or metadata.get("source", "") or "").strip() + return source_type == "chat_summary" or source.startswith("chat_summary:") + def _filter_stale_paragraph_evidence( self, evidence: List[Dict[str, Any]], @@ -417,7 +457,8 @@ class PersonProfileService: "type": "paragraph", "score": 0.0, "content": str(para.get("content", ""))[:180], - "metadata": {}, + "source": str(para.get("source", "") or ""), + "metadata": dict(para.get("metadata", {}) or {}), } ) return self._filter_stale_paragraph_evidence(fallback[:top_k]) @@ -443,13 +484,18 @@ class PersonProfileService: if not h or h in seen_hash: continue seen_hash.add(h) + metadata, source = self._enrich_paragraph_evidence_metadata( + h, + dict(getattr(item, "metadata", {}) or {}), + ) evidence.append( { "hash": h, "type": str(getattr(item, "result_type", "")), "score": float(getattr(item, "score", 0.0) or 0.0), "content": str(getattr(item, "content", "") or "")[:220], - "metadata": dict(getattr(item, "metadata", {}) or {}), + "source": source, + "metadata": metadata, } ) evidence.sort(key=lambda x: x.get("score", 0.0), reverse=True) @@ -475,7 +521,7 @@ class PersonProfileService: lines.append(f"记忆特征: {'; '.join(memory_traits[:6])}") if relation_edges: - lines.append("关系证据:") + lines.append("稳定关系证据:") for rel in relation_edges[:6]: s = rel.get("subject", "") p = rel.get("predicate", "") @@ -483,9 +529,19 @@ class PersonProfileService: conf = float(rel.get("confidence", 0.0)) lines.append(f"- {s} {p} {o} (conf={conf:.2f})") - if vector_evidence: - lines.append("向量证据摘要:") - for item in vector_evidence[:4]: + stable_evidence = [item for item in vector_evidence if not self._is_chat_summary_evidence(item)] + recent_interactions = [item for item in vector_evidence if self._is_chat_summary_evidence(item)] + + if stable_evidence: + lines.append("稳定人物事实:") + for item in stable_evidence[:4]: + content = str(item.get("content", "")).strip() + if content: + lines.append(f"- {content}") + + if recent_interactions: + lines.append("近期相关互动:") + for item in recent_interactions[:2]: content = str(item.get("content", "")).strip() if content: lines.append(f"- {content}") diff --git a/src/A_memorix/core/utils/summary_importer.py b/src/A_memorix/core/utils/summary_importer.py index d2c18ed5..1c30b8df 100644 --- a/src/A_memorix/core/utils/summary_importer.py +++ b/src/A_memorix/core/utils/summary_importer.py @@ -43,6 +43,7 @@ SUMMARY_PROMPT_TEMPLATE = """ 请完成以下任务: 1. **生成总结**:以第三人称或机器人的视角,简洁明了地总结这段对话的主要内容、发生的事件或讨论的主题。 2. **提取实体与关系**:识别并提取对话中提到的重要实体以及它们之间的关系。 +3. **区分事实来源**:用户自己明确表达的稳定人物事实可以记录;机器人发言只能作为上下文,不能单独作为用户画像事实来源。 请严格以 JSON 格式输出,格式如下: {{ @@ -54,6 +55,7 @@ SUMMARY_PROMPT_TEMPLATE = """ }} 注意:总结应具有叙事性,能够作为长程记忆的一部分。直接使用实体的实际名称,不要使用 e1/e2 等代号。 +不要把机器人提出的建议、猜测、玩笑、承诺或复述,写成用户的稳定偏好、身份或长期事实。 """ diff --git a/src/person_info/person_info.py b/src/person_info/person_info.py index 90ac16af..467967e8 100644 --- a/src/person_info/person_info.py +++ b/src/person_info/person_info.py @@ -1,5 +1,5 @@ from datetime import datetime -from typing import Optional, Union +from typing import List, Optional, Union import hashlib import json @@ -506,7 +506,14 @@ class Person: logger.error(f"同步用户 {self.person_id} 信息到数据库时出错: {e}") -async def store_person_memory_from_answer(person_name: str, memory_content: str, chat_id: str) -> None: +async def store_person_memory_from_answer( + person_name: str, + memory_content: str, + chat_id: str, + *, + evidence_source: str = "user_supported", + evidence_message_ids: Optional[List[str]] = None, +) -> None: """将人物事实写入长期记忆系统。 Args: @@ -569,6 +576,8 @@ async def store_person_memory_from_answer(person_name: str, memory_content: str, "person_id": person_id, "person_name": participant_name, "writeback_source": "memory_flow_service", + "evidence_source": str(evidence_source or "user_supported"), + "evidence_message_ids": evidence_message_ids or [], }, respect_filter=True, user_id=session_user_id, diff --git a/src/services/memory_flow_service.py b/src/services/memory_flow_service.py index 5ef7feb2..8d42a8d9 100644 --- a/src/services/memory_flow_service.py +++ b/src/services/memory_flow_service.py @@ -84,7 +84,12 @@ class PersonFactWritebackService: if target_person is None or not target_person.is_known: return - facts = await self._extract_facts(target_person, reply_text) + user_evidence_messages = self._collect_user_evidence_messages(message, target_person) + if not user_evidence_messages: + return + user_evidence_text = self._format_user_evidence(user_evidence_messages) + + facts = await self._extract_facts(target_person, reply_text, user_evidence_text) if not facts: return @@ -104,8 +109,19 @@ class PersonFactWritebackService: if not person_name: return + evidence_message_ids = [ + str(getattr(item, "message_id", "") or "").strip() + for item in user_evidence_messages + if str(getattr(item, "message_id", "") or "").strip() + ] for fact in facts: - await store_person_memory_from_answer(person_name, fact, session_id) + await store_person_memory_from_answer( + person_name, + fact, + session_id, + evidence_source="user_supported", + evidence_message_ids=evidence_message_ids, + ) def _resolve_target_person(self, message: Any) -> Optional[Person]: session = getattr(message, "session", None) @@ -140,22 +156,110 @@ class PersonFactWritebackService: person = Person(person_id=person_id) return person if person.is_known else None - async def _extract_facts(self, person: Person, reply_text: str) -> List[str]: + def _collect_user_evidence_messages(self, message: Any, person: Person) -> List[Any]: + session = getattr(message, "session", None) + session_id = str( + getattr(message, "session_id", "") + or getattr(session, "session_id", "") + or "" + ).strip() + if not session_id: + return [] + + evidence: List[Any] = [] + seen_ids = set() + + reply_to = str(getattr(message, "reply_to", "") or "").strip() + if reply_to: + try: + replies = find_messages(message_id=reply_to, limit=1) + except Exception as exc: + logger.debug("查询人物事实 reply_to 证据失败: %s", exc) + replies = [] + evidence.extend(self._filter_target_user_messages(replies, person, seen_ids)) + + if evidence: + return evidence[:3] + + timestamp = self._extract_message_timestamp(message) + try: + candidates = find_messages( + session_id=session_id, + before_time=timestamp, + limit=6, + limit_mode="latest", + filter_bot=True, + ) + except Exception as exc: + logger.debug("查询人物事实近期用户证据失败: %s", exc) + return [] + return self._filter_target_user_messages(candidates, person, seen_ids)[:3] + + @staticmethod + def _extract_message_timestamp(message: Any) -> float | None: + raw_timestamp = getattr(message, "timestamp", None) + if hasattr(raw_timestamp, "timestamp") and callable(raw_timestamp.timestamp): + try: + return float(raw_timestamp.timestamp()) + except Exception: + return None + if isinstance(raw_timestamp, (int, float)): + return float(raw_timestamp) + return None + + @staticmethod + def _filter_target_user_messages(messages: List[Any], person: Person, seen_ids: set) -> List[Any]: + filtered: List[Any] = [] + target_person_id = str(getattr(person, "person_id", "") or "").strip() + for item in messages: + platform = str(getattr(item, "platform", "") or "").strip() + user_info = getattr(getattr(item, "message_info", None), "user_info", None) + user_id = str(getattr(user_info, "user_id", "") or getattr(item, "user_id", "") or "").strip() + if not platform or not user_id or is_bot_self(platform, user_id): + continue + if target_person_id and get_person_id(platform, user_id) != target_person_id: + continue + text = str(getattr(item, "processed_plain_text", "") or "").strip() + if not text: + continue + message_id = str(getattr(item, "message_id", "") or "").strip() + dedup_key = message_id or f"{platform}:{user_id}:{text}" + if dedup_key in seen_ids: + continue + seen_ids.add(dedup_key) + filtered.append(item) + return filtered + + @staticmethod + def _format_user_evidence(messages: List[Any]) -> str: + lines: List[str] = [] + for item in messages[:3]: + text = str(getattr(item, "processed_plain_text", "") or "").strip() + if text: + lines.append(f"- {text}") + return "\n".join(lines) + + async def _extract_facts(self, person: Person, reply_text: str, user_evidence_text: str) -> List[str]: person_name = str(getattr(person, "person_name", "") or getattr(person, "nickname", "") or person.person_id) - prompt = f"""你要从一条机器人刚刚发送的回复中,提取“关于{person_name}的稳定事实”。 + prompt = f"""你要从用户原始发言中提取“关于{person_name}的稳定事实”。 目标人物:{person_name} +用户原始发言证据: +{user_evidence_text} + 机器人回复: {reply_text} 请只提取满足以下条件的事实: -1. 明确是关于目标人物本人的信息。 -2. 具有相对稳定性,可以作为长期记忆保存。 -3. 用简洁中文陈述句表达。 -4. 如果回复是在直接对目标人物说话,出现“你/你的/你自己”时,默认都指目标人物,请先改写成关于目标人物的第三人称事实再输出。 +1. 必须能被“用户原始发言证据”直接支持,不能只来自机器人回复。 +2. 明确是关于目标人物本人的信息。 +3. 具有相对稳定性,可以作为长期记忆保存。 +4. 用简洁中文陈述句表达。 +5. 如果用户原始发言中出现“我/我的/自己”,默认指目标人物,请先改写成关于目标人物的第三人称事实再输出。 不要提取: - 机器人的情绪、计划、临时动作、客套话 +- 仅由机器人提出的建议、猜测、玩笑、回忆或承诺 - 只适用于当前时刻的短期安排 - 不确定、猜测、反问 - 与目标人物无关的信息 From fb3f4c28efa8ebbef383faa3d4fb341e3f25044b Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 8 May 2026 13:05:39 +0800 Subject: [PATCH 2/9] =?UTF-8?q?feat=EF=BC=9A=E5=AE=9E=E9=99=85=E5=BA=94?= =?UTF-8?q?=E7=94=A8=E8=87=AA=E5=AE=9A=E4=B9=89prompt=EF=BC=8C=E4=BF=AE?= =?UTF-8?q?=E5=A4=8Ddocker=E5=90=8C=E7=BA=A7=E7=9B=AE=E5=BD=95=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboard/src/routes/reasoning-process.tsx | 39 ++++++++++ docker-compose.yml | 2 +- pytests/prompt_test/test_prompt_i18n.py | 54 ++++++++++++++ src/chat/utils/statistic.py | 33 ++++++--- src/common/prompt_i18n.py | 83 ++++++++++++++++++++-- src/maisaka/chat_loop_service.py | 6 +- src/webui/app.py | 13 +++- src/webui/routers/config.py | 4 +- 8 files changed, 216 insertions(+), 18 deletions(-) diff --git a/dashboard/src/routes/reasoning-process.tsx b/dashboard/src/routes/reasoning-process.tsx index b40c75e3..af2b8543 100644 --- a/dashboard/src/routes/reasoning-process.tsx +++ b/dashboard/src/routes/reasoning-process.tsx @@ -2,6 +2,7 @@ import { useEffect, useState } from 'react' import { Clock, Code2, + Copy, FileCode2, FileText, RefreshCw, @@ -10,6 +11,7 @@ import { import { Badge } from '@/components/ui/badge' import { Button } from '@/components/ui/button' +import { useToast } from '@/hooks/use-toast' import { Input } from '@/components/ui/input' import { ScrollArea } from '@/components/ui/scroll-area' import { @@ -49,6 +51,7 @@ function formatSize(size: number): string { } export function ReasoningProcessPage() { + const { toast } = useToast() const [items, setItems] = useState([]) const [stages, setStages] = useState([]) const [sessions, setSessions] = useState([]) @@ -165,6 +168,31 @@ export function ReasoningProcessPage() { setPage(1) } + async function handleCopyPrompt() { + if (!textContent || contentLoading) { + toast({ + title: '暂无可复制内容', + description: '请先选择一条包含 txt 的 prompt 记录', + variant: 'destructive', + }) + return + } + + try { + await navigator.clipboard.writeText(textContent) + toast({ + title: '已复制完整 Prompt', + description: selected ? `${selected.stage}/${selected.session_id}/${selected.stem}` : undefined, + }) + } catch (err) { + toast({ + title: '复制失败', + description: err instanceof Error ? err.message : '请手动选择文本复制', + variant: 'destructive', + }) + } + } + return (
@@ -328,6 +356,17 @@ export function ReasoningProcessPage() {
{selected && (
+ {selected.text_path && ( diff --git a/docker-compose.yml b/docker-compose.yml index f6dcded1..922b524c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,6 +12,7 @@ services: - EULA_AGREE=1b662741904d7155d1ce1c00b3530d0d - PRIVACY_AGREE=9943b855e72199d0f5016ea39052f1b6 - MAIBOT_LEGACY_0X_UPGRADE_CONFIRMED=1 # Docker 无法交互确认旧版升级迁移,默认跳过确认提示 + - MAIBOT_STATISTICS_REPORT_PATH=/MaiMBot/data/maibot_statistics.html # 统计数据输出到共享目录,首次运行可自动创建文件 # - EULA_AGREE=1b662741904d7155d1ce1c00b3530d0d # 同意EULA # - PRIVACY_AGREE=9943b855e72199d0f5016ea39052f1b6 # 同意EULA ports: @@ -20,7 +21,6 @@ services: volumes: # 监听地址和端口已迁移到 ./docker-config/mmc/bot_config.toml 的 maim_message 与 webui 配置段 - ./docker-config/mmc:/MaiMBot/config # 持久化bot配置文件 - - ./data/MaiMBot/maibot_statistics.html:/MaiMBot/maibot_statistics.html #统计数据输出 - ./data/MaiMBot:/MaiMBot/data # 共享目录 - ./data/MaiMBot/emoji:/data/emoji # 持久化表情包 - ./data/MaiMBot/plugins:/MaiMBot/plugins # 插件目录 diff --git a/pytests/prompt_test/test_prompt_i18n.py b/pytests/prompt_test/test_prompt_i18n.py index 66b844dc..0b586f0e 100644 --- a/pytests/prompt_test/test_prompt_i18n.py +++ b/pytests/prompt_test/test_prompt_i18n.py @@ -60,6 +60,60 @@ def test_load_prompt_with_category_falls_back_to_default_locale_root(tmp_path: P assert rendered == "你好,Mai" +def test_load_prompt_prefers_custom_prompt_override(tmp_path: Path) -> None: + prompts_root = tmp_path / "prompts" + custom_prompts_root = tmp_path / "data" / "custom_prompts" + write_prompt(prompts_root, "zh-CN", "replyer", "Base {user_name}") + write_prompt(custom_prompts_root, "zh-CN", "replyer", "Custom {user_name}") + + rendered = load_prompt( + "replyer", + locale="zh-CN", + prompts_root=prompts_root, + custom_prompts_root=custom_prompts_root, + user_name="Mai", + ) + + assert rendered == "Custom Mai" + + +def test_load_prompt_prefers_custom_prompt_requested_locale(tmp_path: Path) -> None: + prompts_root = tmp_path / "prompts" + custom_prompts_root = tmp_path / "data" / "custom_prompts" + write_prompt(prompts_root, "zh-CN", "replyer", "Base zh {user_name}") + write_prompt(prompts_root, "en-US", "replyer", "Base en {user_name}") + write_prompt(custom_prompts_root, "zh-CN", "replyer", "Custom zh {user_name}") + write_prompt(custom_prompts_root, "en-US", "replyer", "Custom en {user_name}") + + rendered = load_prompt( + "replyer", + locale="en-US", + prompts_root=prompts_root, + custom_prompts_root=custom_prompts_root, + user_name="Mai", + ) + + assert rendered == "Custom en Mai" + + +def test_load_prompt_uses_requested_locale_source_before_default_custom(tmp_path: Path) -> None: + prompts_root = tmp_path / "prompts" + custom_prompts_root = tmp_path / "data" / "custom_prompts" + write_prompt(prompts_root, "zh-CN", "replyer", "Base zh {user_name}") + write_prompt(prompts_root, "en-US", "replyer", "Base en {user_name}") + write_prompt(custom_prompts_root, "zh-CN", "replyer", "Custom zh {user_name}") + + rendered = load_prompt( + "replyer", + locale="en-US", + prompts_root=prompts_root, + custom_prompts_root=custom_prompts_root, + user_name="Mai", + ) + + assert rendered == "Base en Mai" + + def test_load_prompt_strict_mode_raises_on_missing_placeholder(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: prompts_root = tmp_path / "prompts" write_prompt(prompts_root, "zh-CN", "replyer", "你好,{user_name},现在是 {current_time}") diff --git a/src/chat/utils/statistic.py b/src/chat/utils/statistic.py index da70db7b..d3df804b 100644 --- a/src/chat/utils/statistic.py +++ b/src/chat/utils/statistic.py @@ -1,11 +1,13 @@ +from collections import defaultdict +from datetime import datetime, timedelta +from os import getenv +from pathlib import Path +from typing import cast + import asyncio import concurrent.futures import json -from collections import defaultdict -from datetime import datetime, timedelta -from typing import cast - from typing_extensions import TypedDict from sqlmodel import col, select @@ -26,6 +28,17 @@ from src.services.statistics_service import ( logger = get_logger("maibot_statistic") +STATISTICS_REPORT_PATH_ENV = "MAIBOT_STATISTICS_REPORT_PATH" +DEFAULT_STATISTICS_REPORT_PATH = "maibot_statistics.html" + + +def _resolve_statistics_report_path(record_file_path: str | None = None) -> str: + if record_file_path: + return record_file_path + + configured_path = getenv(STATISTICS_REPORT_PATH_ENV, "").strip() + return configured_path or DEFAULT_STATISTICS_REPORT_PATH + class StatPeriodData(TypedDict): total_requests: int @@ -233,7 +246,7 @@ class StatisticOutputTask(AsyncTask): SEP_LINE = "-" * 84 - def __init__(self, record_file_path: str = "maibot_statistics.html"): + def __init__(self, record_file_path: str | None = None): # 延迟300秒启动,运行间隔300秒 super().__init__(task_name="Statistics Data Output Task", wait_before_start=0, run_interval=300) @@ -243,7 +256,7 @@ class StatisticOutputTask(AsyncTask): 注:设计记录时间的目的是方便更新名称,使联系人/群聊名称保持最新 """ - self.record_file_path: str = record_file_path + self.record_file_path: str = _resolve_statistics_report_path(record_file_path) """ 记录文件路径 """ @@ -1730,7 +1743,11 @@ class StatisticOutputTask(AsyncTask): """ ) - with open(self.record_file_path, "w", encoding="utf-8") as f: + record_file = Path(self.record_file_path) + if record_file.parent != Path("."): + record_file.parent.mkdir(parents=True, exist_ok=True) + + with open(record_file, "w", encoding="utf-8") as f: f.write(html_template) def _generate_chart_data(self, stat: StatPeriodMapping) -> dict[str, dict[str, object]]: @@ -2431,7 +2448,7 @@ class StatisticOutputTask(AsyncTask): class AsyncStatisticOutputTask(AsyncTask): """完全异步的统计输出任务 - 更高性能版本""" - def __init__(self, record_file_path: str = "maibot_statistics.html"): + def __init__(self, record_file_path: str | None = None): # 延迟0秒启动,运行间隔300秒 super().__init__(task_name="Async Statistics Data Output Task", wait_before_start=0, run_interval=300) diff --git a/src/common/prompt_i18n.py b/src/common/prompt_i18n.py index 5b487ded..cbdc4b1c 100644 --- a/src/common/prompt_i18n.py +++ b/src/common/prompt_i18n.py @@ -18,10 +18,12 @@ logger = logging.getLogger("maibot.prompt_i18n") PROJECT_ROOT = Path(__file__).resolve().parents[2] PROMPTS_ROOT = (PROJECT_ROOT / "prompts").resolve() +CUSTOM_PROMPTS_ROOT = (PROJECT_ROOT / "data" / "custom_prompts").resolve() PROMPT_EXTENSIONS = (".prompt",) SAFE_SEGMENT_PATTERN = re.compile(r"^[A-Za-z0-9_.-]+$") STRICT_ENV_KEYS = ("MAIBOT_PROMPT_I18N_STRICT", "MAIBOT_I18N_STRICT") STRICT_ENV_VALUES = {"1", "true", "yes", "on"} +_PROMPT_CACHE_REVISION = 0 extract_prompt_placeholders = extract_placeholders @@ -43,6 +45,17 @@ def get_prompts_root(prompts_root: Path | None = None) -> Path: return (prompts_root or PROMPTS_ROOT).resolve() +def get_custom_prompts_root( + custom_prompts_root: Path | None = None, + prompts_root: Path | None = None, +) -> Path: + if custom_prompts_root is not None: + return custom_prompts_root.resolve() + if prompts_root is not None: + return (prompts_root.resolve().parent / "data" / "custom_prompts").resolve() + return CUSTOM_PROMPTS_ROOT + + def normalize_prompt_name(name: str) -> str: candidate_name = name.strip() for suffix in PROMPT_EXTENSIONS: @@ -194,6 +207,28 @@ def _iter_locale_candidates(requested_locale: str) -> list[str]: return locale_candidates +def _iter_prompt_path_candidates(base_dir: Path, name: str, category: str | None = None) -> list[Path]: + candidates: list[Path] = [] + for suffix in PROMPT_EXTENSIONS: + if category is not None: + candidates.append((base_dir / category / f"{name}{suffix}").resolve()) + candidates.append((base_dir / f"{name}{suffix}").resolve()) + return candidates + + +def _resolve_custom_prompt_path( + name: str, + locale: str, + category: str | None, + custom_prompts_root: Path, +) -> Path | None: + custom_locale_dir = custom_prompts_root / locale + for candidate_path in _iter_prompt_path_candidates(custom_locale_dir, name, category): + if candidate_path.is_file(): + return candidate_path + return None + + def list_prompt_templates(locale: str | None = None, prompts_root: Path | None = None) -> dict[str, PromptTemplateInfo]: resolved_prompts_root = get_prompts_root(prompts_root) requested_locale = normalize_locale(locale or get_locale()) @@ -206,15 +241,29 @@ def list_prompt_templates(locale: str | None = None, prompts_root: Path | None = def resolve_prompt_path( - name: str, locale: str | None = None, category: str | None = None, prompts_root: Path | None = None + name: str, + locale: str | None = None, + category: str | None = None, + prompts_root: Path | None = None, + custom_prompts_root: Path | None = None, ) -> Path: resolved_prompts_root = get_prompts_root(prompts_root) + resolved_custom_prompts_root = get_custom_prompts_root(custom_prompts_root, prompts_root) normalized_name = normalize_prompt_name(name) normalized_category = normalize_prompt_category(category) requested_locale = normalize_locale(locale or get_locale()) if normalized_category is not None: for locale_candidate in _iter_locale_candidates(requested_locale): + custom_path = _resolve_custom_prompt_path( + normalized_name, + locale_candidate, + normalized_category, + resolved_custom_prompts_root, + ) + if custom_path is not None: + return custom_path + base_dir = resolved_prompts_root / locale_candidate for suffix in PROMPT_EXTENSIONS: candidate_path = (base_dir / normalized_category / f"{normalized_name}{suffix}").resolve() @@ -226,9 +275,20 @@ def resolve_prompt_path( if fallback_path.is_file(): return fallback_path else: - prompt_paths = list_prompt_templates(locale=requested_locale, prompts_root=resolved_prompts_root) - if normalized_name in prompt_paths: - return prompt_paths[normalized_name].path + for locale_candidate in _iter_locale_candidates(requested_locale): + custom_path = _resolve_custom_prompt_path( + normalized_name, + locale_candidate, + None, + resolved_custom_prompts_root, + ) + if custom_path is not None: + return custom_path + + base_dir = resolved_prompts_root / locale_candidate + for candidate_path in _iter_prompt_path_candidates(base_dir, normalized_name): + if candidate_path.is_file(): + return candidate_path raise FileNotFoundError(t("prompt.template_not_found", locale=requested_locale, name=normalized_name)) @@ -263,13 +323,26 @@ def load_prompt( locale: str | None = None, category: str | None = None, prompts_root: Path | None = None, + custom_prompts_root: Path | None = None, **kwargs: object, ) -> str: normalized_name = normalize_prompt_name(name) - prompt_path = resolve_prompt_path(name=normalized_name, locale=locale, category=category, prompts_root=prompts_root) + prompt_path = resolve_prompt_path( + name=normalized_name, + locale=locale, + category=category, + prompts_root=prompts_root, + custom_prompts_root=custom_prompts_root, + ) template = _read_prompt_template(prompt_path) return _format_prompt_template(normalized_name, template, **kwargs) def clear_prompt_cache() -> None: + global _PROMPT_CACHE_REVISION + _PROMPT_CACHE_REVISION += 1 _read_prompt_template.cache_clear() + + +def get_prompt_cache_revision() -> int: + return _PROMPT_CACHE_REVISION diff --git a/src/maisaka/chat_loop_service.py b/src/maisaka/chat_loop_service.py index 2cce5e28..606e1269 100644 --- a/src/maisaka/chat_loop_service.py +++ b/src/maisaka/chat_loop_service.py @@ -10,7 +10,7 @@ from rich.console import RenderableType from src.common.data_models.llm_service_data_models import LLMGenerationOptions from src.common.i18n import get_locale from src.common.logger import get_logger -from src.common.prompt_i18n import load_prompt +from src.common.prompt_i18n import get_prompt_cache_revision, load_prompt from src.common.utils.utils_config import ChatConfigUtils from src.config.config import global_config from src.core.tooling import ToolAvailabilityContext, ToolRegistry @@ -219,6 +219,7 @@ class MaisakaChatLoopService: self._interrupt_flag: asyncio.Event | None = None self._tool_registry: ToolRegistry | None = None self._prompts_loaded = chat_system_prompt is not None + self._prompt_cache_revision = get_prompt_cache_revision() self._prompt_load_lock = asyncio.Lock() self._personality_prompt = self._build_personality_prompt() if chat_system_prompt is None: @@ -354,6 +355,7 @@ class MaisakaChatLoopService: self._chat_system_prompt = f"{self._personality_prompt}\n\nYou are a helpful AI assistant." self._prompts_loaded = True + self._prompt_cache_revision = get_prompt_cache_revision() def build_prompt_template_context(self, tools_section: str = "") -> dict[str, str]: """构造 Maisaka prompt 模板的公共渲染参数。""" @@ -519,7 +521,7 @@ class MaisakaChatLoopService: ChatResponse: 本轮规划器返回结果。 """ - if not self._prompts_loaded: + if not self._prompts_loaded or self._prompt_cache_revision != get_prompt_cache_revision(): await self.ensure_chat_prompt_loaded() enable_visual_message = self._resolve_enable_visual_message(request_kind) selected_history, selection_reason = self.select_llm_context_messages( diff --git a/src/webui/app.py b/src/webui/app.py index 1b66863e..6258aaf3 100644 --- a/src/webui/app.py +++ b/src/webui/app.py @@ -18,6 +18,8 @@ logger = get_logger("webui.app") _DASHBOARD_PACKAGE_NAME = "maibot-dashboard" _LOCAL_DASHBOARD_ENV = "MAIBOT_WEBUI_USE_LOCAL_DASHBOARD" +_STATISTICS_REPORT_PATH_ENV = "MAIBOT_STATISTICS_REPORT_PATH" +_DEFAULT_STATISTICS_REPORT_PATH = "maibot_statistics.html" _MANUAL_INSTALL_COMMAND = f"pip install {_DASHBOARD_PACKAGE_NAME}" @@ -38,6 +40,15 @@ def _get_project_root() -> Path: return Path(__file__).resolve().parents[2] +def _resolve_statistics_report_path() -> Path: + configured_path = getenv(_STATISTICS_REPORT_PATH_ENV, "").strip() + report_path = Path(configured_path or _DEFAULT_STATISTICS_REPORT_PATH) + if report_path.is_absolute(): + return report_path.resolve() + + return (_get_project_root() / report_path).resolve() + + def _is_local_dashboard_enabled() -> bool: return getenv(_LOCAL_DASHBOARD_ENV, "").strip().lower() in {"1", "true", "yes", "on"} @@ -187,7 +198,7 @@ def _setup_static_files(app: FastAPI): @app.get("/maibot_statistics.html", include_in_schema=False) async def serve_statistics_report(): - report_path = (_get_project_root() / "maibot_statistics.html").resolve() + report_path = _resolve_statistics_report_path() if not report_path.exists() or not report_path.is_file(): raise HTTPException(status_code=404, detail=t("core.not_found")) diff --git a/src/webui/routers/config.py b/src/webui/routers/config.py index 5f5feff0..4ceb7a05 100644 --- a/src/webui/routers/config.py +++ b/src/webui/routers/config.py @@ -14,7 +14,7 @@ from pydantic import BaseModel, Field import tomlkit from src.common.logger import get_logger -from src.common.prompt_i18n import list_prompt_templates +from src.common.prompt_i18n import clear_prompt_cache, list_prompt_templates from src.config.config import CONFIG_DIR, PROJECT_ROOT, Config, ModelConfig from src.config.config_base import AttributeData, ConfigBase from src.config.model_configs import ( @@ -323,6 +323,7 @@ async def update_prompt_file(language: str, filename: str, content: PromptConten try: custom_prompt_path.parent.mkdir(parents=True, exist_ok=True) custom_prompt_path.write_text(content, encoding="utf-8", newline="\n") + clear_prompt_cache() return PromptFileResponse(language=language, filename=filename, content=content, customized=True) except Exception as e: logger.error(f"保存 Prompt 文件失败: {prompt_path} {e}", exc_info=True) @@ -341,6 +342,7 @@ async def reset_prompt_file(language: str, filename: str): try: if custom_prompt_path.exists(): custom_prompt_path.unlink() + clear_prompt_cache() content = prompt_path.read_text(encoding="utf-8") return PromptFileResponse(language=language, filename=filename, content=content, customized=False) except Exception as e: From ca667ee6016c6041d6b2f7ab142e4779070c3712 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 8 May 2026 13:54:35 +0800 Subject: [PATCH 3/9] =?UTF-8?q?fix=EF=BC=9A=E5=AF=B9replyer=20tool=20reusl?= =?UTF-8?q?t=E6=A0=BC=E5=BC=8F=E4=BF=AE=E6=94=B9=EF=BC=8C=E6=94=B9?= =?UTF-8?q?=E5=8A=A8=E6=8F=92=E4=BB=B6=E5=A4=87=E4=BB=BD=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E4=BD=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/maisaka/builtin_tool/reply.py | 2 +- src/webui/routers/plugin/support.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/maisaka/builtin_tool/reply.py b/src/maisaka/builtin_tool/reply.py index 35c7df78..65001dd3 100644 --- a/src/maisaka/builtin_tool/reply.py +++ b/src/maisaka/builtin_tool/reply.py @@ -291,7 +291,7 @@ async def handle_tool( ) return tool_ctx.build_success_result( invocation.tool_name, - "回复已生成并发送。", + f'已生成并发送回复"{combined_reply_text}"\n发送对象:{target_user_name}', structured_content={ "msg_id": target_message_id, "set_quote": set_quote, diff --git a/src/webui/routers/plugin/support.py b/src/webui/routers/plugin/support.py index 6af06ece..04aa14a1 100644 --- a/src/webui/routers/plugin/support.py +++ b/src/webui/routers/plugin/support.py @@ -269,7 +269,9 @@ def backup_file(file_path: Path, action: str, move_file: bool = False) -> Option return None backup_name = f"{file_path.name}.{action}.{datetime.now().strftime('%Y%m%d%H%M%S')}" - backup_path = file_path.parent / backup_name + backup_dir = file_path.parent / "config_back" + backup_dir.mkdir(parents=True, exist_ok=True) + backup_path = backup_dir / backup_name if move_file: shutil.move(file_path, backup_path) else: From 340d5c1d6c19b8114acfeba568071f7c17ced3de Mon Sep 17 00:00:00 2001 From: DawnARC Date: Fri, 8 May 2026 14:47:47 +0800 Subject: [PATCH 4/9] =?UTF-8?q?fix=EF=BC=9A=E4=BC=98=E5=8C=96=E4=BA=BA?= =?UTF-8?q?=E7=89=A9=E7=94=BB=E5=83=8F=E6=9D=A5=E6=BA=90=E9=99=90=E5=88=B6?= =?UTF-8?q?=EF=BC=8C=E5=B9=B6=E6=94=B9=E8=BF=9B=E6=A8=A1=E5=9E=8B=E9=80=89?= =?UTF-8?q?=E6=8B=A9=E5=99=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 对关系/向量段落证据进行过滤,强制每条记录显式绑定至给定的 person_id,从而避免因别名导致的跨人员误召回。 同时新增辅助工具,用于安全处理元数据和列表,并提供证据绑定检查; 调整证据收集顺序,在去重之前先验证绑定关系。从元数据存储返回段落时,统一采用游标配合 _row_to_dict 方法。 全面改版摘要模型解析逻辑:允许接收字符串或列表形式的选择器,将选定的模型映射到任务级配置(返回 task_name 与 TaskConfig),从 config_manager 读取当前模型字典,并增加详细的日志记录与回退处理 --- src/A_memorix/core/storage/metadata_store.py | 4 +- .../core/utils/person_profile_service.py | 115 +++++++++++++++--- src/A_memorix/core/utils/summary_importer.py | 88 +++++++++----- 3 files changed, 161 insertions(+), 46 deletions(-) diff --git a/src/A_memorix/core/storage/metadata_store.py b/src/A_memorix/core/storage/metadata_store.py index 1ff30d46..0a29138d 100644 --- a/src/A_memorix/core/storage/metadata_store.py +++ b/src/A_memorix/core/storage/metadata_store.py @@ -2612,7 +2612,9 @@ class MetadataStore: Returns: 段落列表 """ - return self.query("SELECT * FROM paragraphs WHERE source = ?", (source,)) + cursor = self._conn.cursor() + cursor.execute("SELECT * FROM paragraphs WHERE source = ?", (source,)) + return [self._row_to_dict(row, "paragraph") for row in cursor.fetchall()] def get_all_sources(self) -> List[Dict[str, Any]]: """ diff --git a/src/A_memorix/core/utils/person_profile_service.py b/src/A_memorix/core/utils/person_profile_service.py index 14f3a943..081eaa66 100644 --- a/src/A_memorix/core/utils/person_profile_service.py +++ b/src/A_memorix/core/utils/person_profile_service.py @@ -283,7 +283,13 @@ class PersonProfileService: logger.warning(f"解析人物别名失败: person_id={person_id}, err={e}") return aliases, primary_name, memory_traits - def _collect_relation_evidence(self, aliases: List[str], limit: int = 30) -> List[Dict[str, Any]]: + def _collect_relation_evidence( + self, + aliases: List[str], + limit: int = 30, + *, + person_id: str = "", + ) -> List[Dict[str, Any]]: relation_by_hash: Dict[str, Dict[str, Any]] = {} for alias in aliases: for rel in self.metadata_store.get_relations(subject=alias, include_inactive=False): @@ -296,6 +302,12 @@ class PersonProfileService: relation_by_hash[h] = rel relations = list(relation_by_hash.values()) + if person_id: + relations = [ + rel + for rel in relations + if self._is_relation_bound_to_person(rel, person_id=person_id) + ] relations.sort(key=lambda item: float(item.get("confidence", 0.0)), reverse=True) relations = relations[: max(1, int(limit))] @@ -312,6 +324,38 @@ class PersonProfileService: ) return edges + def _is_relation_bound_to_person( + self, + relation: Dict[str, Any], + *, + person_id: str, + ) -> bool: + pid = str(person_id or "").strip() + if not pid: + return False + + metadata = self._metadata_dict(relation.get("metadata")) + if str(metadata.get("person_id", "") or "").strip() == pid: + return True + if pid in self._list_tokens(metadata.get("person_ids")): + return True + + source_paragraph = str(relation.get("source_paragraph", "") or "").strip() + if source_paragraph: + try: + paragraph = self.metadata_store.get_paragraph(source_paragraph) + except Exception: + paragraph = None + if isinstance(paragraph, dict): + payload = { + "hash": source_paragraph, + "source": str(paragraph.get("source", "") or ""), + "metadata": self._metadata_dict(paragraph.get("metadata")), + } + return self._is_evidence_bound_to_person(payload, person_id=pid) + + return False + def _collect_person_fact_evidence(self, person_id: str, limit: int = 4) -> List[Dict[str, Any]]: token = str(person_id or "").strip() if not token: @@ -346,6 +390,42 @@ class PersonProfileService: ) return self._filter_stale_paragraph_evidence(evidence) + @staticmethod + def _metadata_dict(value: Any) -> Dict[str, Any]: + return dict(value) if isinstance(value, dict) else {} + + @staticmethod + def _list_tokens(value: Any) -> List[str]: + if value is None: + return [] + if isinstance(value, (list, tuple, set)): + return [str(item or "").strip() for item in value if str(item or "").strip()] + token = str(value or "").strip() + return [token] if token else [] + + def _is_evidence_bound_to_person( + self, + item: Dict[str, Any], + *, + person_id: str, + ) -> bool: + """画像证据必须显式绑定到 person_id,避免别名全局召回串人。""" + pid = str(person_id or "").strip() + if not pid: + return False + + metadata = self._metadata_dict(item.get("metadata")) + source = str(item.get("source", "") or metadata.get("source", "") or "").strip() + if source == f"person_fact:{pid}": + return True + + if str(metadata.get("person_id", "") or "").strip() == pid: + return True + if pid in self._list_tokens(metadata.get("person_ids")): + return True + + return False + @staticmethod def _source_type_from_source(source: str) -> str: token = str(source or "").strip() @@ -360,7 +440,7 @@ class PersonProfileService: paragraph_hash: str, metadata: Dict[str, Any], ) -> Tuple[Dict[str, Any], str]: - merged = dict(metadata or {}) + merged = self._metadata_dict(metadata) source = str(merged.get("source", "") or "").strip() try: paragraph = self.metadata_store.get_paragraph(paragraph_hash) @@ -458,9 +538,11 @@ class PersonProfileService: "score": 0.0, "content": str(para.get("content", ""))[:180], "source": str(para.get("source", "") or ""), - "metadata": dict(para.get("metadata", {}) or {}), + "metadata": self._metadata_dict(para.get("metadata")), } ) + if not self._is_evidence_bound_to_person(fallback[-1], person_id=person_id): + fallback.pop() return self._filter_stale_paragraph_evidence(fallback[:top_k]) per_alias_top_k = max(2, int(top_k / max(1, len(alias_queries)))) @@ -483,21 +565,22 @@ class PersonProfileService: h = str(getattr(item, "hash_value", "") or "") if not h or h in seen_hash: continue - seen_hash.add(h) metadata, source = self._enrich_paragraph_evidence_metadata( h, - dict(getattr(item, "metadata", {}) or {}), - ) - evidence.append( - { - "hash": h, - "type": str(getattr(item, "result_type", "")), - "score": float(getattr(item, "score", 0.0) or 0.0), - "content": str(getattr(item, "content", "") or "")[:220], - "source": source, - "metadata": metadata, - } + self._metadata_dict(getattr(item, "metadata", {})), ) + payload = { + "hash": h, + "type": str(getattr(item, "result_type", "")), + "score": float(getattr(item, "score", 0.0) or 0.0), + "content": str(getattr(item, "content", "") or "")[:220], + "source": source, + "metadata": metadata, + } + if not self._is_evidence_bound_to_person(payload, person_id=person_id): + continue + seen_hash.add(h) + evidence.append(payload) evidence.sort(key=lambda x: x.get("score", 0.0), reverse=True) return self._filter_stale_paragraph_evidence(evidence[:top_k]) @@ -640,7 +723,7 @@ class PersonProfileService: if not aliases and person_keyword: aliases = [person_keyword.strip()] primary_name = person_keyword.strip() - relation_edges = self._collect_relation_evidence(aliases, limit=max(10, top_k * 2)) + relation_edges = self._collect_relation_evidence(aliases, limit=max(10, top_k * 2), person_id=pid) vector_evidence = await self._collect_vector_evidence(aliases, top_k=max(4, top_k), person_id=pid) evidence_ids = [ diff --git a/src/A_memorix/core/utils/summary_importer.py b/src/A_memorix/core/utils/summary_importer.py index 1c30b8df..0098aa76 100644 --- a/src/A_memorix/core/utils/summary_importer.py +++ b/src/A_memorix/core/utils/summary_importer.py @@ -16,7 +16,7 @@ import traceback from src.common.logger import get_logger from src.services import llm_service as llm_api from src.services import message_service as message_api -from src.config.config import global_config, model_config as host_model_config +from src.config.config import config_manager, global_config from src.config.model_configs import TaskConfig from ..storage import ( @@ -150,14 +150,20 @@ class SummaryImporter: return True def _normalize_summary_model_selectors(self, raw_value: Any) -> List[str]: - """标准化 summarization.model_name 配置(vNext 仅接受字符串数组)。""" + """标准化 summarization.model_name 配置。""" if raw_value is None: return ["auto"] if isinstance(raw_value, list): selectors = [str(x).strip() for x in raw_value if str(x).strip()] return selectors or ["auto"] + if isinstance(raw_value, str): + selector = raw_value.strip() + if selector: + logger.warning("summarization.model_name 建议使用 List[str],当前字符串配置已兼容处理。") + return [selector] + return ["auto"] raise ValueError( - "summarization.model_name 在 vNext 必须为 List[str]。" + "summarization.model_name 必须为 List[str] 或 str。" " 请执行 scripts/release_vnext_migrate.py migrate。" ) @@ -182,9 +188,17 @@ class SummaryImporter: return None, None - def _resolve_summary_model_config(self) -> Optional[TaskConfig]: + @staticmethod + def _current_model_dict() -> Dict[str, Any]: + try: + return getattr(config_manager.get_model_config(), "models_dict", {}) or {} + except Exception as exc: + logger.warning(f"读取当前模型字典失败: {exc}") + return {} + + def _resolve_summary_model_config(self) -> Optional[Tuple[str, TaskConfig]]: """ - 解析 summarization.model_name 为 TaskConfig。 + 解析 summarization.model_name 为 (task_name, TaskConfig)。 支持: - "auto" - "replyer"(任务名) @@ -201,14 +215,16 @@ class SummaryImporter: selectors = self._normalize_summary_model_selectors(raw_cfg) default_task_name, default_task_cfg = self._pick_default_summary_task(available_tasks) - selected_models: List[str] = [] base_cfg: Optional[TaskConfig] = None - model_dict = getattr(host_model_config, "models_dict", {}) + base_task_name: Optional[str] = None + model_dict = self._current_model_dict() - def _append_models(models: List[str]): - for model_name in models: - if model_name and model_name not in selected_models: - selected_models.append(model_name) + def _find_task_for_model(model_name: str) -> Tuple[Optional[str], Optional[TaskConfig]]: + for task_name, task_cfg in available_tasks.items(): + task_models = [str(item).strip() for item in (getattr(task_cfg, "model_list", []) or []) if str(item).strip()] + if model_name in task_models: + return task_name, task_cfg + return None, None for raw_selector in selectors: selector = raw_selector.strip() @@ -217,9 +233,9 @@ class SummaryImporter: if selector.lower() == "auto": if default_task_cfg: - _append_models(default_task_cfg.model_list) if base_cfg is None: base_cfg = default_task_cfg + base_task_name = default_task_name continue if ":" in selector: @@ -233,47 +249,60 @@ class SummaryImporter: if base_cfg is None: base_cfg = task_cfg + base_task_name = task_name if not model_name or model_name.lower() == "auto": - _append_models(task_cfg.model_list) continue - if model_name in model_dict or model_name in task_cfg.model_list: - _append_models([model_name]) + if model_name in task_cfg.model_list: + logger.info( + f"总结模型选择器 '{selector}' 已定位到任务 '{task_name}';" + "当前 LLM 服务按任务候选列表执行,不单独覆盖具体模型。" + ) else: - logger.warning(f"总结模型选择器 '{selector}' 的模型 '{model_name}' 不存在,已跳过") + logger.warning(f"总结模型选择器 '{selector}' 的模型 '{model_name}' 不在任务 '{task_name}' 中,已跳过") continue task_cfg = available_tasks.get(selector) if task_cfg: - _append_models(task_cfg.model_list) if base_cfg is None: base_cfg = task_cfg + base_task_name = selector continue if selector in model_dict: - _append_models([selector]) + task_name, task_cfg = _find_task_for_model(selector) + if task_name and task_cfg: + if base_cfg is None: + base_cfg = task_cfg + base_task_name = task_name + logger.info( + f"总结模型选择器 '{selector}' 已映射到任务 '{task_name}';" + "当前 LLM 服务按任务候选列表执行,不单独覆盖具体模型。" + ) + continue + logger.warning(f"总结模型选择器 '{selector}' 未归属于任何任务,已跳过") continue logger.warning(f"总结模型选择器 '{selector}' 无法识别,已跳过") - if not selected_models: + if base_cfg is None or not base_task_name: if default_task_cfg: - _append_models(default_task_cfg.model_list) if base_cfg is None: base_cfg = default_task_cfg + base_task_name = default_task_name else: - first_cfg = next(iter(available_tasks.values())) - _append_models(first_cfg.model_list) + base_task_name, first_cfg = next(iter(available_tasks.items())) if base_cfg is None: base_cfg = first_cfg - if not selected_models: + if base_cfg is None or not base_task_name: return None - template_cfg = base_cfg or default_task_cfg or next(iter(available_tasks.values())) - return TaskConfig( - model_list=selected_models, + template_cfg = base_cfg + task_name_to_use = base_task_name + return task_name_to_use, TaskConfig( + model_list=list(template_cfg.model_list), max_tokens=template_cfg.max_tokens, temperature=template_cfg.temperature, slow_threshold=template_cfg.slow_threshold, @@ -343,12 +372,13 @@ class SummaryImporter: chat_history=chat_history_text ) - model_config_to_use = self._resolve_summary_model_config() - if model_config_to_use is None: + resolved_model = self._resolve_summary_model_config() + if resolved_model is None: return False, "未找到可用的总结模型配置" - task_name_to_use = llm_api.resolve_task_name_from_model_config(model_config_to_use) + task_name_to_use, model_config_to_use = resolved_model logger.info(f"正在为流 {stream_id} 执行总结,消息条数: {len(messages)}") + logger.info(f"总结模型任务: {task_name_to_use}") logger.info(f"总结模型候选列表: {model_config_to_use.model_list}") result = await llm_api.generate( From 7c5c614e3ee54173856b9c57574c785d60231a36 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 8 May 2026 16:31:00 +0800 Subject: [PATCH 5/9] =?UTF-8?q?fix=EF=BC=9A=E6=AD=A3=E7=A1=AE=E8=AF=86?= =?UTF-8?q?=E5=88=AB=E5=B7=B2=E5=AE=89=E8=A3=85=E6=8F=92=E4=BB=B6=EF=BC=8C?= =?UTF-8?q?=E5=B9=B6=E4=B8=94=E4=BF=AE=E5=A4=8D=E6=97=A0=E6=B3=95=E6=9B=B4?= =?UTF-8?q?=E6=96=B0=E5=92=8C=E5=8D=B8=E8=BD=BD=E8=87=AA=E8=A1=8C=E5=AE=89?= =?UTF-8?q?=E8=A3=85=E6=8F=92=E4=BB=B6=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboard/src/lib/plugin-api/marketplace.ts | 11 ++++++++-- dashboard/src/lib/plugin-api/types.ts | 1 + dashboard/src/routes/plugin-detail.tsx | 12 +++++----- dashboard/src/routes/plugins/index.tsx | 1 + dashboard/src/types/plugin.ts | 2 ++ .../webui/test_plugin_management_routes.py | 14 ++++++++++++ src/webui/routers/plugin/support.py | 22 +++++++++++++++++-- 7 files changed, 54 insertions(+), 9 deletions(-) diff --git a/dashboard/src/lib/plugin-api/marketplace.ts b/dashboard/src/lib/plugin-api/marketplace.ts index 4e243410..d347e1c8 100644 --- a/dashboard/src/lib/plugin-api/marketplace.ts +++ b/dashboard/src/lib/plugin-api/marketplace.ts @@ -21,6 +21,7 @@ interface PluginApiResponse { id: string manifest: { manifest_version: number + id?: string name: string version: string description: string @@ -56,6 +57,7 @@ function normalizePluginManifest(manifest: PluginApiResponse['manifest']): Plugi return { manifest_version: manifest.manifest_version || 1, + id: manifest.id, name: manifest.name, version: manifest.version, description: manifest.description || '', @@ -104,10 +106,15 @@ export async function fetchPluginList(): Promise> { const pluginList = data .filter(item => { - if (!item?.id || !item?.manifest) { + if (!item?.manifest) { console.warn('跳过无效插件数据:', item) return false } + const pluginId = item.manifest.id || item.id + if (!pluginId) { + console.warn('跳过缺少 ID 的插件:', item) + return false + } if (!item.manifest.name || !item.manifest.version) { console.warn('跳过缺少必需字段的插件:', item.id) return false @@ -115,7 +122,7 @@ export async function fetchPluginList(): Promise> { return true }) .map((item) => ({ - id: item.id, + id: item.manifest.id || item.id, manifest: normalizePluginManifest(item.manifest), downloads: 0, rating: 0, diff --git a/dashboard/src/lib/plugin-api/types.ts b/dashboard/src/lib/plugin-api/types.ts index fe0148f8..aaafe19d 100644 --- a/dashboard/src/lib/plugin-api/types.ts +++ b/dashboard/src/lib/plugin-api/types.ts @@ -25,6 +25,7 @@ export interface InstalledPlugin { id: string manifest: { manifest_version: number + id?: string name: string version: string description: string diff --git a/dashboard/src/routes/plugin-detail.tsx b/dashboard/src/routes/plugin-detail.tsx index a2cff384..c0372659 100644 --- a/dashboard/src/routes/plugin-detail.tsx +++ b/dashboard/src/routes/plugin-detail.tsx @@ -104,21 +104,23 @@ export function PluginDetailPage() { } const pluginList = JSON.parse(result.data) - const foundPlugin = pluginList.find((p: any) => p.id === search.pluginId) + const foundPlugin = pluginList.find((p: any) => (p.manifest?.id || p.id) === search.pluginId) if (!foundPlugin) { throw new Error('未找到该插件') } const rawManifest = foundPlugin.manifest || {} + const pluginId = rawManifest.id || foundPlugin.id const repositoryUrl = rawManifest.repository_url || rawManifest.urls?.repository const homepageUrl = rawManifest.homepage_url || rawManifest.urls?.homepage // 转换为 PluginInfo 格式 const pluginInfo: PluginInfo = { - id: foundPlugin.id, + id: pluginId, manifest: { ...rawManifest, + id: pluginId, homepage_url: homepageUrl, repository_url: repositoryUrl, default_locale: rawManifest.default_locale || rawManifest.i18n?.default_locale || 'zh-CN', @@ -170,8 +172,8 @@ export function PluginDetailPage() { return } - setIsInstalled(checkPluginInstalled(search.pluginId, installedPlugins.data)) - setInstalledVersion(getInstalledPluginVersion(search.pluginId, installedPlugins.data)) + setIsInstalled(checkPluginInstalled(pluginId, installedPlugins.data)) + setInstalledVersion(getInstalledPluginVersion(pluginId, installedPlugins.data)) } catch (err) { setError(err instanceof Error ? err.message : '加载失败') } finally { @@ -196,7 +198,7 @@ export function PluginDetailPage() { // 如果插件已安装,优先尝试从本地读取 README if (isInstalled && search.pluginId) { try { - const localResponse = await fetchWithAuth(`/api/webui/plugins/local-readme/${search.pluginId}`) + const localResponse = await fetchWithAuth(`/api/webui/plugins/local-readme/${plugin.id}`) if (localResponse.ok) { const localResult = await localResponse.json() diff --git a/dashboard/src/routes/plugins/index.tsx b/dashboard/src/routes/plugins/index.tsx index 64ef02de..f2f5c1c1 100644 --- a/dashboard/src/routes/plugins/index.tsx +++ b/dashboard/src/routes/plugins/index.tsx @@ -220,6 +220,7 @@ function PluginsPageContent() { id: installedPlugin.id, manifest: { manifest_version: installedPlugin.manifest.manifest_version || 1, + id: installedPlugin.manifest.id || installedPlugin.id, name: installedPlugin.manifest.name, version: installedPlugin.manifest.version, description: installedPlugin.manifest.description || '', diff --git a/dashboard/src/types/plugin.ts b/dashboard/src/types/plugin.ts index db38853e..9bc7e0ef 100644 --- a/dashboard/src/types/plugin.ts +++ b/dashboard/src/types/plugin.ts @@ -20,6 +20,8 @@ export interface HostApplication { export interface PluginManifest { /** 清单文件版本 */ manifest_version: number + /** Manifest 声明的插件唯一标识 */ + id?: string /** 插件名称 */ name: string /** 插件版本 */ diff --git a/pytests/webui/test_plugin_management_routes.py b/pytests/webui/test_plugin_management_routes.py index 8b47b640..132cee86 100644 --- a/pytests/webui/test_plugin_management_routes.py +++ b/pytests/webui/test_plugin_management_routes.py @@ -47,3 +47,17 @@ def test_installed_plugins_only_scan_plugins_dir_and_exclude_a_memorix(client: T assert ids == ["test.demo"] assert "a-dawn.a-memorix" not in ids assert all("/src/plugins/built_in/" not in plugin["path"] for plugin in payload["plugins"]) + + +def test_resolve_installed_plugin_path_falls_back_to_manifest_id(client: TestClient): + plugin_path = support_module.resolve_installed_plugin_path("test.demo") + + assert plugin_path is not None + assert plugin_path.name == "demo_plugin" + + +def test_resolve_installed_plugin_path_accepts_manifest_id_case_mismatch(client: TestClient): + plugin_path = support_module.resolve_installed_plugin_path("Test.Demo") + + assert plugin_path is not None + assert plugin_path.name == "demo_plugin" diff --git a/src/webui/routers/plugin/support.py b/src/webui/routers/plugin/support.py index 04aa14a1..f7a3c827 100644 --- a/src/webui/routers/plugin/support.py +++ b/src/webui/routers/plugin/support.py @@ -213,7 +213,7 @@ def resolve_installed_plugin_path(plugin_id: str) -> Optional[Path]: return _resolve_safe_plugin_directory(new_format_path, plugins_dir, strict=True) if old_format_path.exists(): return _resolve_safe_plugin_directory(old_format_path, plugins_dir, strict=True) - return None + return find_plugin_path_by_id(plugin_id) def parse_repository_url(repository_url: str) -> Tuple[str, str, str]: @@ -256,11 +256,29 @@ def iter_plugin_directories() -> List[Path]: def find_plugin_path_by_id(plugin_id: str) -> Optional[Path]: + casefold_matched_path: Optional[Path] = None + normalized_plugin_id = plugin_id.casefold() + for plugin_path in iter_plugin_directories(): manifest_path = resolve_plugin_file_path(plugin_path, "_manifest.json") manifest = load_manifest_json(manifest_path) - if manifest is not None and (manifest.get("id") == plugin_id or plugin_path.name == plugin_id): + if manifest is None: + continue + + manifest_id = str(manifest.get("id", "")) + if manifest_id == plugin_id or plugin_path.name == plugin_id: return plugin_path + + if ( + casefold_matched_path is None + and (manifest_id.casefold() == normalized_plugin_id or plugin_path.name.casefold() == normalized_plugin_id) + ): + casefold_matched_path = plugin_path + + if casefold_matched_path is not None: + logger.warning(f"插件 ID 大小写不一致,已按大小写不敏感匹配: {plugin_id} -> {casefold_matched_path}") + return casefold_matched_path + return None From f736f3fbc1dfbe2e88be1607769a45bc9a1d0c9a Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 8 May 2026 18:02:38 +0800 Subject: [PATCH 6/9] =?UTF-8?q?perf=EF=BC=9A=E4=BF=AE=E6=94=B9=E6=8F=92?= =?UTF-8?q?=E4=BB=B6=E5=8D=A1=E7=89=87=E6=A0=B7=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dashboard/package.json | 2 +- dashboard/src/lib/version.ts | 2 +- dashboard/src/routes/plugins/InstalledTab.tsx | 2 +- .../src/routes/plugins/MarketplaceTab.tsx | 2 +- dashboard/src/routes/plugins/PluginCard.tsx | 26 +++++++------- dashboard/src/routes/plugins/index.tsx | 35 +++++++++++++------ pyproject.toml | 2 +- requirements.txt | 2 +- src/config/config.py | 2 +- uv.lock | 8 ++--- 10 files changed, 49 insertions(+), 34 deletions(-) diff --git a/dashboard/package.json b/dashboard/package.json index c88b8bca..0656c4a3 100644 --- a/dashboard/package.json +++ b/dashboard/package.json @@ -1,7 +1,7 @@ { "name": "maibot-dashboard", "private": true, - "version": "1.0.9", + "version": "1.0.10", "type": "module", "main": "./out/main/index.js", "scripts": { diff --git a/dashboard/src/lib/version.ts b/dashboard/src/lib/version.ts index 6a5f4070..e1520244 100644 --- a/dashboard/src/lib/version.ts +++ b/dashboard/src/lib/version.ts @@ -5,7 +5,7 @@ * 修改此处的版本号后,所有展示版本的地方都会自动更新 */ -export const APP_VERSION = '1.0.9' +export const APP_VERSION = '1.0.10' export const APP_NAME = 'MaiBot Dashboard' export const APP_FULL_NAME = `${APP_NAME} v${APP_VERSION}` diff --git a/dashboard/src/routes/plugins/InstalledTab.tsx b/dashboard/src/routes/plugins/InstalledTab.tsx index 3447f949..659cd83b 100644 --- a/dashboard/src/routes/plugins/InstalledTab.tsx +++ b/dashboard/src/routes/plugins/InstalledTab.tsx @@ -67,7 +67,7 @@ export function InstalledTab({ }) return ( -
+
{filteredPlugins.map((plugin) => ( +
{filteredPlugins.map((plugin) => ( - -
- {plugin.manifest?.name || plugin.id} -
+ +
+ {plugin.manifest?.name || plugin.id} +
{plugin.manifest?.categories && plugin.manifest.categories[0] && ( {CATEGORY_NAMES[plugin.manifest.categories[0]] || plugin.manifest.categories[0]} @@ -56,18 +56,18 @@ export function PluginCard({ {getStatusBadge(plugin)}
- {plugin.manifest?.description || '无描述'} + {plugin.manifest?.description || '无描述'}
- -
+ +
{/* 统计信息 */} -
+
- + {(pluginStats[plugin.id]?.downloads ?? plugin.downloads ?? 0).toLocaleString()}
- + {(pluginStats[plugin.id]?.rating ?? plugin.rating ?? 0).toFixed(1)}
@@ -85,7 +85,7 @@ export function PluginCard({ )}
{/* 版本和作者 */} -
+
v{plugin.manifest?.version || 'unknown'} · {plugin.manifest?.author?.name || 'Unknown'}
{/* 支持版本 */} {plugin.manifest?.host_application && ( @@ -103,7 +103,7 @@ export function PluginCard({
- +
+
+ + + )} {/* Git 状态警告 */} {gitStatus && !gitStatus.installed && ( diff --git a/pyproject.toml b/pyproject.toml index 4a639f2e..5c020d1f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ dependencies = [ "jieba>=0.42.1", "json-repair>=0.47.6", "maim-message>=0.6.2", - "maibot-dashboard>=1.0.8", + "maibot-dashboard>=1.0.9", "maibot-plugin-sdk>=2.4.0", "matplotlib>=3.10.5", "mcp", diff --git a/requirements.txt b/requirements.txt index 737bedcc..ba6f59d2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,4 +33,4 @@ tomlkit>=0.13.3 typing-extensions uvicorn>=0.35.0 watchfiles>=1.1.1 -maibot-dashboard>=1.0.8 \ No newline at end of file +maibot-dashboard>=1.0.9 \ No newline at end of file diff --git a/src/config/config.py b/src/config/config.py index f0366d5f..38ea4c6f 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -56,7 +56,7 @@ BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute() MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute() LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute() A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute() -MMC_VERSION: str = "1.0.0-pre.15" +MMC_VERSION: str = "1.0.0-pre.16" CONFIG_VERSION: str = "8.10.15" MODEL_CONFIG_VERSION: str = "1.16.1" diff --git a/uv.lock b/uv.lock index 81e0cff4..2e06f64f 100644 --- a/uv.lock +++ b/uv.lock @@ -1511,7 +1511,7 @@ requires-dist = [ { name = "httpx", extras = ["socks"] }, { name = "jieba", specifier = ">=0.42.1" }, { name = "json-repair", specifier = ">=0.47.6" }, - { name = "maibot-dashboard", specifier = ">=1.0.8" }, + { name = "maibot-dashboard", specifier = ">=1.0.9" }, { name = "maibot-plugin-sdk", specifier = ">=2.4.0" }, { name = "maim-message", specifier = ">=0.6.2" }, { name = "matplotlib", specifier = ">=3.10.5" }, @@ -1549,11 +1549,11 @@ dev = [ [[package]] name = "maibot-dashboard" -version = "1.0.8" +version = "1.0.9" source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" } -sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/13/9f/e59b1a6299cc4f8c9ac16c7c2774581220fdd27227ac9c2fdfb947dfc2f5/maibot_dashboard-1.0.8.tar.gz", hash = "sha256:a47309072d8154905738d02ccad17a543d5159a1e62ca87076ac4dce39e6c922", size = 2496374, upload-time = "2026-05-07T13:58:39.386Z" } +sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/ac/5b/e90896cbdddc89ec5586873de07a3d70c0107e4dc76db8666a0c0fde6ae8/maibot_dashboard-1.0.9.tar.gz", hash = "sha256:0e5c00be021419686105238cded501024f0383a3815bd85f9a1e747f3f04d0cd", size = 2496957, upload-time = "2026-05-07T18:37:51.291Z" } wheels = [ - { url = "https://pypi.tuna.tsinghua.edu.cn/packages/0f/60/fde671bf332133f1403673096eefcd49f36133141a6b9229e72c2588b221/maibot_dashboard-1.0.8-py3-none-any.whl", hash = "sha256:39da973fed56f1491245109615d81ea79add859467798af92d4ace7d8a5d7557", size = 2563243, upload-time = "2026-05-07T13:58:37.868Z" }, + { url = "https://pypi.tuna.tsinghua.edu.cn/packages/8c/27/ab227a84e55356039004a375e78031e5e8aaf4192e11908a568498816d5e/maibot_dashboard-1.0.9-py3-none-any.whl", hash = "sha256:197b26c5c3d0e6ba1238b91d12c88e57db71c65303cc602fcccdca84ce4db582", size = 2563281, upload-time = "2026-05-07T18:37:49.648Z" }, ] [[package]] From 80b7585da692bf7c277bdba7880a606d1fdc751b Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 8 May 2026 18:02:48 +0800 Subject: [PATCH 7/9] =?UTF-8?q?fix=EF=BC=9A=E4=B8=8D=E6=B3=A8=E5=86=8C?= =?UTF-8?q?=E5=B0=81=E7=A6=81=E8=A1=A8=E6=83=85=E5=8C=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/emoji_system/emoji_manager.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/emoji_system/emoji_manager.py b/src/emoji_system/emoji_manager.py index 19823ff3..6029221f 100644 --- a/src/emoji_system/emoji_manager.py +++ b/src/emoji_system/emoji_manager.py @@ -388,7 +388,6 @@ class EmojiManager: if existing_record := session.exec(statement).first(): existing_record.full_path = str(emoji.full_path) existing_record.no_file_flag = False - existing_record.is_banned = False existing_record.last_used_time = datetime.now() existing_record.query_count += 1 session.add(existing_record) @@ -473,7 +472,6 @@ class EmojiManager: image_record.full_path = str(new_emoji.full_path) image_record.description = new_emoji.description image_record.no_file_flag = False - image_record.is_banned = False session.add(image_record) except Exception as exc: logger.error(f"Update cached emoji description failed: {exc}") @@ -531,6 +529,9 @@ class EmojiManager: statement = select(Images).filter_by(image_hash=emoji.file_hash, image_type=ImageType.EMOJI).limit(1) existing_record = session.exec(statement).first() if existing_record: + if existing_record.is_banned: + logger.info(f"[register_emoji] Emoji is banned, skipping: {emoji.file_hash}") + return "skipped" if existing_record.is_registered and _is_available_emoji_record(existing_record): # logger.info(f"[register_emoji] Emoji already registered, skipping: {emoji.file_hash}") return "skipped" @@ -1085,6 +1086,10 @@ class EmojiManager: return "failed" if existing_record is not None: + if existing_record.is_banned: + logger.info(f"[register_emoji] Emoji is banned, skipping: {target_emoji.file_name}") + return "skipped" + if existing_record.is_registered and _is_available_emoji_record(existing_record): logger.info(f"[register_emoji] Emoji already registered, skipping: {target_emoji.file_name}") return "skipped" From ed091dd70dd5c1d7fa0b2b733304de2302642b8f Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 8 May 2026 19:14:42 +0800 Subject: [PATCH 8/9] Update docker-entrypoint.sh --- docker-entrypoint.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index 0ac15c7b..17c14c5b 100644 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -10,4 +10,6 @@ if [ ! -e "$ADAPTER_TARGET" ] && [ -d "$ADAPTER_TEMPLATE" ]; then cp -a "$ADAPTER_TEMPLATE" "$ADAPTER_TARGET" fi +uv pip install --python "/MaiMBot/.venv/bin/python" --upgrade maibot-dashboard + exec /MaiMBot/.venv/bin/python bot.py "$@" From 05052ba02b16b704dac47f0569f903d835548df8 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Fri, 8 May 2026 21:14:12 +0800 Subject: [PATCH 9/9] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E9=BB=98=E8=AE=A4?= =?UTF-8?q?=E4=BE=9D=E8=B5=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5c020d1f..8b830551 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ dependencies = [ "jieba>=0.42.1", "json-repair>=0.47.6", "maim-message>=0.6.2", - "maibot-dashboard>=1.0.9", + "maibot-dashboard>=1.0.10", "maibot-plugin-sdk>=2.4.0", "matplotlib>=3.10.5", "mcp", diff --git a/requirements.txt b/requirements.txt index ba6f59d2..2b50090a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,4 +33,4 @@ tomlkit>=0.13.3 typing-extensions uvicorn>=0.35.0 watchfiles>=1.1.1 -maibot-dashboard>=1.0.9 \ No newline at end of file +maibot-dashboard>=1.0.10 \ No newline at end of file