This commit is contained in:
DawnARC
2026-04-13 13:09:57 +08:00
parent 7a42a1cb2a
commit b3fb662f3d
17 changed files with 3589 additions and 74 deletions

View File

@@ -8,7 +8,7 @@
- 运行时主目录由 `storage.data_dir` 决定(当前模板默认 `data/a-memorix`
- 部分离线脚本仍以 `data/plugins/a-dawn.a-memorix` 作为默认处理目录。
- 修正文档中的导入示例参数,`memory_import_admin.create_paste``input_mode` 示例统一为 `text`/`json`
- 更新 `README.md` 关于元数据 schema 的描述,和当前代码 `SCHEMA_VERSION = 9` 保持一致。
- 更新 `README.md` 关于元数据 schema 的描述,和当前代码 `SCHEMA_VERSION = 10` 保持一致。
## [2.0.0] - 2026-03-18

View File

@@ -1,6 +1,6 @@
# A_Memorix 配置参考 (v2.0.0)
本文档对应当前仓库代码(`__version__ = 2.0.0``SCHEMA_VERSION = 9`)。
本文档对应当前仓库代码(`__version__ = 2.0.0``SCHEMA_VERSION = 10`)。
说明:

View File

@@ -632,7 +632,7 @@ class DualPathRetriever:
results: List[RetrievalResult] = []
for row in rows:
hash_value = row["hash"]
relation = self.metadata_store.get_relation(hash_value)
relation = self.metadata_store.get_relation(hash_value, include_inactive=False)
if relation is None:
continue
@@ -888,8 +888,8 @@ class DualPathRetriever:
entity_name = entity["name"]
related_rels = []
related_rels.extend(self.metadata_store.get_relations(subject=entity_name))
related_rels.extend(self.metadata_store.get_relations(object=entity_name))
related_rels.extend(self.metadata_store.get_relations(subject=entity_name, include_inactive=False))
related_rels.extend(self.metadata_store.get_relations(object=entity_name, include_inactive=False))
for rel in related_rels:
if rel["hash"] in seen_relations:
@@ -1280,7 +1280,7 @@ class DualPathRetriever:
results = []
for hash_value, score in zip(rel_ids, rel_scores):
relation = self.metadata_store.get_relation(hash_value)
relation = self.metadata_store.get_relation(hash_value, include_inactive=False)
if relation is None:
continue
@@ -1378,7 +1378,7 @@ class DualPathRetriever:
deduplicated_results.append(result)
continue
# 检查关系关联的段落是否已存在
relation = self.metadata_store.get_relation(result.hash_value)
relation = self.metadata_store.get_relation(result.hash_value, include_inactive=False)
if relation:
# 获取关联的段落
para_rels = self.metadata_store.query("""

View File

@@ -255,7 +255,7 @@ class GraphRelationRecallService:
graph_hops: int,
graph_seed_entities: Sequence[str],
) -> Optional[GraphRelationCandidate]:
relation = self.metadata_store.get_relation(relation_hash)
relation = self.metadata_store.get_relation(relation_hash, include_inactive=False)
if relation is None:
return None
supporting_paragraphs = self.metadata_store.get_paragraphs_by_relation(relation_hash)

View File

@@ -338,6 +338,7 @@ class SparseBM25Index:
match_query=match_query,
limit=max(1, int(k)),
max_doc_len=self.config.relation_max_doc_len,
include_inactive=False,
conn=self._conn,
)
out: List[Dict[str, Any]] = []

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -18,6 +18,7 @@ from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from src.common.logger import get_logger
from src.config.config import global_config
from .episode_segmentation_service import EpisodeSegmentationService
from .hash import compute_hash
@@ -528,7 +529,11 @@ class EpisodeService:
"paragraph_count": 0,
}
paragraphs = self.metadata_store.get_live_paragraphs_by_source(token)
memory_cfg = getattr(global_config, "memory", None)
paragraphs = self.metadata_store.get_live_paragraphs_by_source(
token,
exclude_stale=bool(getattr(memory_cfg, "feedback_correction_paragraph_hard_filter_enabled", True)),
)
if not paragraphs:
replace_result = self.metadata_store.replace_episodes_for_source(token, [])
return {

View File

@@ -90,9 +90,9 @@ def find_paths_between_entities(
else:
pred = "related"
direction = "->"
rels = metadata_store.get_relations(subject=u, object=v)
rels = metadata_store.get_relations(subject=u, object=v, include_inactive=False)
if not rels:
rels = metadata_store.get_relations(subject=v, object=u)
rels = metadata_store.get_relations(subject=v, object=u, include_inactive=False)
direction = "<-"
if rels:
best_rel = max(rels, key=lambda x: x.get("confidence", 1.0))
@@ -162,4 +162,3 @@ def to_retrieval_results(paths: Sequence[Dict[str, Any]]) -> List[RetrievalResul
)
)
return converted

View File

@@ -15,6 +15,7 @@ from sqlmodel import select
from src.common.logger import get_logger
from src.common.database.database import get_db_session
from src.common.database.database_model import PersonInfo
from src.config.config import global_config
from ..embedding import EmbeddingAPIAdapter
from ..retrieval import (
@@ -285,11 +286,11 @@ class PersonProfileService:
def _collect_relation_evidence(self, aliases: List[str], limit: int = 30) -> List[Dict[str, Any]]:
relation_by_hash: Dict[str, Dict[str, Any]] = {}
for alias in aliases:
for rel in self.metadata_store.get_relations(subject=alias):
for rel in self.metadata_store.get_relations(subject=alias, include_inactive=False):
h = str(rel.get("hash", ""))
if h:
relation_by_hash[h] = rel
for rel in self.metadata_store.get_relations(object=alias):
for rel in self.metadata_store.get_relations(object=alias, include_inactive=False):
h = str(rel.get("hash", ""))
if h:
relation_by_hash[h] = rel
@@ -342,7 +343,53 @@ class PersonProfileService:
"metadata": {},
}
)
return evidence
return self._filter_stale_paragraph_evidence(evidence)
def _filter_stale_paragraph_evidence(
self,
evidence: List[Dict[str, Any]],
) -> List[Dict[str, Any]]:
memory_cfg = getattr(global_config, "memory", None)
if not bool(getattr(memory_cfg, "feedback_correction_paragraph_hard_filter_enabled", True)):
return evidence
paragraph_hashes = [
str(item.get("hash", "") or "").strip()
for item in evidence
if str(item.get("type", "") or "").strip() == "paragraph" and str(item.get("hash", "") or "").strip()
]
if not paragraph_hashes:
return evidence
marks_by_paragraph = self.metadata_store.get_paragraph_stale_relation_marks_batch(paragraph_hashes)
relation_hashes: List[str] = []
seen = set()
for marks in marks_by_paragraph.values():
for mark in marks:
relation_hash = str(mark.get("relation_hash", "") or "").strip()
if not relation_hash or relation_hash in seen:
continue
seen.add(relation_hash)
relation_hashes.append(relation_hash)
status_map = self.metadata_store.get_relation_status_batch(relation_hashes) if relation_hashes else {}
filtered: List[Dict[str, Any]] = []
for item in evidence:
item_type = str(item.get("type", "") or "").strip()
item_hash = str(item.get("hash", "") or "").strip()
if item_type != "paragraph" or not item_hash:
filtered.append(item)
continue
marks = marks_by_paragraph.get(item_hash, [])
should_hide = any(
status_map.get(str(mark.get("relation_hash", "") or "").strip()) is None
or bool((status_map.get(str(mark.get("relation_hash", "") or "").strip()) or {}).get("is_inactive"))
for mark in marks
if str(mark.get("relation_hash", "") or "").strip()
)
if should_hide:
continue
filtered.append(item)
return filtered
async def _collect_vector_evidence(
self,
@@ -373,7 +420,7 @@ class PersonProfileService:
"metadata": {},
}
)
return fallback[:top_k]
return self._filter_stale_paragraph_evidence(fallback[:top_k])
per_alias_top_k = max(2, int(top_k / max(1, len(alias_queries))))
seen_hash = set()
@@ -406,7 +453,7 @@ class PersonProfileService:
}
)
evidence.sort(key=lambda x: x.get("score", 0.0), reverse=True)
return evidence[:top_k]
return self._filter_stale_paragraph_evidence(evidence[:top_k])
def _build_profile_text(
self,

View File

@@ -190,6 +190,16 @@ class AMemorixHostService:
)
)
if component_name == "enqueue_feedback_task":
return await kernel.enqueue_feedback_task(
query_tool_id=str(payload.get("query_tool_id", "") or ""),
session_id=str(payload.get("session_id", "") or ""),
query_timestamp=payload.get("query_timestamp"),
structured_content=payload.get("structured_content")
if isinstance(payload.get("structured_content"), dict)
else {},
)
if component_name == "ingest_summary":
return await kernel.ingest_summary(
external_id=str(payload.get("external_id", "") or ""),
@@ -251,6 +261,7 @@ class AMemorixHostService:
"memory_source_admin": kernel.memory_source_admin,
"memory_episode_admin": kernel.memory_episode_admin,
"memory_profile_admin": kernel.memory_profile_admin,
"memory_feedback_admin": kernel.memory_feedback_admin,
"memory_runtime_admin": kernel.memory_runtime_admin,
"memory_import_admin": kernel.memory_import_admin,
"memory_tuning_admin": kernel.memory_tuning_admin,

View File

@@ -62,7 +62,10 @@ if any(arg in {"-h", "--help"} for arg in sys.argv[1:]):
try:
from A_memorix.core.storage import GraphStore, KnowledgeType, MetadataStore, QuantizationType, VectorStore
from A_memorix.core.storage.metadata_store import SCHEMA_VERSION
from A_memorix.core.storage.metadata_store import (
RUNTIME_AUTO_MIGRATION_MIN_SCHEMA_VERSION,
SCHEMA_VERSION,
)
except Exception as e: # pragma: no cover
print(f"❌ failed to import storage modules: {e}")
raise SystemExit(2)
@@ -125,6 +128,14 @@ def _sqlite_table_exists(conn: sqlite3.Connection, table: str) -> bool:
return row is not None
def _sqlite_column_exists(conn: sqlite3.Connection, table: str, column: str) -> bool:
try:
rows = conn.execute(f"PRAGMA table_info({table})").fetchall()
except Exception:
return False
return any(str(row[1] or "") == str(column or "") for row in rows)
def _collect_hash_alias_conflicts(conn: sqlite3.Connection) -> Dict[str, List[str]]:
hashes: List[str] = []
if _sqlite_table_exists(conn, "relations"):
@@ -152,6 +163,8 @@ def _collect_hash_alias_conflicts(conn: sqlite3.Connection) -> Dict[str, List[st
def _collect_invalid_knowledge_types(conn: sqlite3.Connection) -> List[str]:
if not _sqlite_table_exists(conn, "paragraphs"):
return []
if not _sqlite_column_exists(conn, "paragraphs", "knowledge_type"):
return []
allowed = {item.value for item in KnowledgeType}
rows = conn.execute("SELECT DISTINCT knowledge_type FROM paragraphs").fetchall()
@@ -288,6 +301,14 @@ def _preflight_impl(config_path: Path, data_dir: Path) -> Dict[str, Any]:
facts["schema_migrations_exists"] = has_schema_table
has_paragraph_backfill = _sqlite_table_exists(conn, "paragraph_vector_backfill")
facts["paragraph_vector_backfill_exists"] = has_paragraph_backfill
has_stale_marks = _sqlite_table_exists(conn, "paragraph_stale_relation_marks")
facts["paragraph_stale_relation_marks_exists"] = has_stale_marks
has_profile_refresh_queue = _sqlite_table_exists(conn, "person_profile_refresh_queue")
facts["person_profile_refresh_queue_exists"] = has_profile_refresh_queue
has_feedback_rollback_status = _sqlite_column_exists(conn, "memory_feedback_tasks", "rollback_status")
facts["memory_feedback_tasks_rollback_status_exists"] = has_feedback_rollback_status
has_feedback_rollback_plan = _sqlite_column_exists(conn, "memory_feedback_tasks", "rollback_plan_json")
facts["memory_feedback_tasks_rollback_plan_exists"] = has_feedback_rollback_plan
if not has_schema_table:
checks.append(
CheckItem(
@@ -300,14 +321,28 @@ def _preflight_impl(config_path: Path, data_dir: Path) -> Dict[str, Any]:
row = conn.execute("SELECT MAX(version) FROM schema_migrations").fetchone()
version = int(row[0]) if row and row[0] is not None else 0
facts["schema_version"] = version
runtime_auto_migratable = (
version < SCHEMA_VERSION
and version >= RUNTIME_AUTO_MIGRATION_MIN_SCHEMA_VERSION
)
facts["schema_runtime_auto_migratable"] = runtime_auto_migratable
if version != SCHEMA_VERSION:
checks.append(
CheckItem(
"CP-08",
"error",
f"schema version mismatch: current={version}, expected={SCHEMA_VERSION}",
if runtime_auto_migratable:
checks.append(
CheckItem(
"CP-18",
"warning",
f"schema version behind runtime target: current={version}, expected={SCHEMA_VERSION}; runtime auto migration will handle this update",
)
)
else:
checks.append(
CheckItem(
"CP-08",
"error",
f"schema version mismatch: current={version}, expected={SCHEMA_VERSION}",
)
)
)
elif not has_paragraph_backfill:
checks.append(
CheckItem(
@@ -316,6 +351,30 @@ def _preflight_impl(config_path: Path, data_dir: Path) -> Dict[str, Any]:
"paragraph_vector_backfill table missing under current schema version",
)
)
elif not has_stale_marks:
checks.append(
CheckItem(
"CP-15",
"error",
"paragraph_stale_relation_marks table missing under current schema version",
)
)
elif not has_profile_refresh_queue:
checks.append(
CheckItem(
"CP-16",
"error",
"person_profile_refresh_queue table missing under current schema version",
)
)
elif not has_feedback_rollback_status or not has_feedback_rollback_plan:
checks.append(
CheckItem(
"CP-17",
"error",
"memory_feedback_tasks rollback columns missing under current schema version",
)
)
if _sqlite_table_exists(conn, "relations"):
row = conn.execute("SELECT COUNT(*) FROM relations").fetchone()
@@ -616,6 +675,46 @@ def _verify_impl(config_path: Path, data_dir: Path) -> Dict[str, Any]:
"paragraph_vector_backfill table missing after migration",
)
)
has_feedback_tasks = _sqlite_table_exists(conn, "memory_feedback_tasks")
facts["memory_feedback_tasks_exists"] = bool(has_feedback_tasks)
if not has_feedback_tasks:
checks.append(
CheckItem(
"CP-15",
"error",
"memory_feedback_tasks table missing after migration",
)
)
has_feedback_logs = _sqlite_table_exists(conn, "memory_feedback_action_logs")
facts["memory_feedback_action_logs_exists"] = bool(has_feedback_logs)
if not has_feedback_logs:
checks.append(
CheckItem(
"CP-16",
"error",
"memory_feedback_action_logs table missing after migration",
)
)
has_feedback_rollback_status = _sqlite_column_exists(conn, "memory_feedback_tasks", "rollback_status")
facts["memory_feedback_tasks_rollback_status_exists"] = bool(has_feedback_rollback_status)
if not has_feedback_rollback_status:
checks.append(
CheckItem(
"CP-17",
"error",
"memory_feedback_tasks.rollback_status missing after migration",
)
)
has_feedback_rollback_plan = _sqlite_column_exists(conn, "memory_feedback_tasks", "rollback_plan_json")
facts["memory_feedback_tasks_rollback_plan_exists"] = bool(has_feedback_rollback_plan)
if not has_feedback_rollback_plan:
checks.append(
CheckItem(
"CP-18",
"error",
"memory_feedback_tasks.rollback_plan_json missing after migration",
)
)
conflicts = _collect_hash_alias_conflicts(conn)
invalid_knowledge_types = _collect_invalid_knowledge_types(conn)
finally:

View File

@@ -431,6 +431,153 @@ class MemoryConfig(ConfigBase):
},
)
"""是否在发送回复后自动提取并写回人物事实到长期记忆"""
feedback_correction_enabled: bool = Field(
default=False,
json_schema_extra={
"x-widget": "switch",
"x-icon": "message-circle-warning",
},
)
"""是否启用反馈驱动的延迟记忆纠错任务"""
feedback_correction_window_hours: float = Field(
default=12.0,
ge=0.1,
json_schema_extra={
"x-widget": "input",
"x-icon": "clock-4",
},
)
"""反馈窗口时长(小时),以 query_memory 执行时间为起点"""
feedback_correction_check_interval_minutes: int = Field(
default=30,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "timer",
},
)
"""反馈纠错定时任务轮询间隔(分钟)"""
feedback_correction_batch_size: int = Field(
default=20,
ge=1,
le=200,
json_schema_extra={
"x-widget": "input",
"x-icon": "list-ordered",
},
)
"""反馈纠错每轮最大处理任务数"""
feedback_correction_auto_apply_threshold: float = Field(
default=0.85,
ge=0.0,
le=1.0,
json_schema_extra={
"x-widget": "slider",
"x-icon": "gauge",
"step": 0.01,
},
)
"""自动应用纠错动作的最低置信度阈值"""
feedback_correction_max_feedback_messages: int = Field(
default=30,
ge=1,
le=200,
json_schema_extra={
"x-widget": "input",
"x-icon": "messages-square",
},
)
"""每个纠错任务最多使用的窗口内用户反馈消息数"""
feedback_correction_prefilter_enabled: bool = Field(
default=True,
json_schema_extra={
"x-widget": "switch",
"x-icon": "filter",
},
)
"""是否启用纠错前置预筛(用于减少不必要的模型调用)"""
feedback_correction_paragraph_mark_enabled: bool = Field(
default=True,
json_schema_extra={
"x-widget": "switch",
"x-icon": "sticky-note",
},
)
"""是否为受影响 paragraph 写入已纠正旧事实标记"""
feedback_correction_paragraph_hard_filter_enabled: bool = Field(
default=True,
json_schema_extra={
"x-widget": "switch",
"x-icon": "eye-off",
},
)
"""是否在用户侧查询中硬过滤带有 stale 标记的 paragraph"""
feedback_correction_profile_refresh_enabled: bool = Field(
default=True,
json_schema_extra={
"x-widget": "switch",
"x-icon": "user-round-search",
},
)
"""是否在反馈纠错后将受影响人物画像加入刷新队列"""
feedback_correction_profile_force_refresh_on_read: bool = Field(
default=True,
json_schema_extra={
"x-widget": "switch",
"x-icon": "refresh-ccw",
},
)
"""人物画像处于脏队列时,读取是否强制刷新而不直接复用旧快照"""
feedback_correction_episode_rebuild_enabled: bool = Field(
default=True,
json_schema_extra={
"x-widget": "switch",
"x-icon": "clapperboard",
},
)
"""是否在反馈纠错后将受影响 source 加入 episode 重建队列"""
feedback_correction_episode_query_block_enabled: bool = Field(
default=True,
json_schema_extra={
"x-widget": "switch",
"x-icon": "ban",
},
)
"""episode source 处于重建队列时,是否对用户侧查询做屏蔽"""
feedback_correction_reconcile_interval_minutes: int = Field(
default=5,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "repeat",
},
)
"""反馈纠错二阶段一致性后台协调任务轮询间隔(分钟)"""
feedback_correction_reconcile_batch_size: int = Field(
default=20,
ge=1,
le=200,
json_schema_extra={
"x-widget": "input",
"x-icon": "list-restart",
},
)
"""反馈纠错二阶段一致性每轮处理 profile/episode 队列的批大小"""
chat_history_topic_check_message_threshold: int = Field(
default=80,
ge=1,
@@ -502,6 +649,39 @@ class MemoryConfig(ConfigBase):
raise ValueError(
f"chat_history_finalize_message_count 必须至少为1当前值: {self.chat_history_finalize_message_count}"
)
if self.feedback_correction_window_hours <= 0:
raise ValueError(
f"feedback_correction_window_hours 必须大于0当前值: {self.feedback_correction_window_hours}"
)
if self.feedback_correction_check_interval_minutes < 1:
raise ValueError(
"feedback_correction_check_interval_minutes 必须至少为1"
f"当前值: {self.feedback_correction_check_interval_minutes}"
)
if self.feedback_correction_batch_size < 1:
raise ValueError(
f"feedback_correction_batch_size 必须至少为1当前值: {self.feedback_correction_batch_size}"
)
if not 0 <= self.feedback_correction_auto_apply_threshold <= 1:
raise ValueError(
"feedback_correction_auto_apply_threshold 必须在 [0, 1] 之间,"
f"当前值: {self.feedback_correction_auto_apply_threshold}"
)
if self.feedback_correction_max_feedback_messages < 1:
raise ValueError(
"feedback_correction_max_feedback_messages 必须至少为1"
f"当前值: {self.feedback_correction_max_feedback_messages}"
)
if self.feedback_correction_reconcile_interval_minutes < 1:
raise ValueError(
"feedback_correction_reconcile_interval_minutes 必须至少为1"
f"当前值: {self.feedback_correction_reconcile_interval_minutes}"
)
if self.feedback_correction_reconcile_batch_size < 1:
raise ValueError(
"feedback_correction_reconcile_batch_size 必须至少为1"
f"当前值: {self.feedback_correction_reconcile_batch_size}"
)
return super().model_post_init(context)

View File

@@ -19,6 +19,7 @@ from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvo
from src.llm_models.exceptions import ReqAbortException
from src.llm_models.payload_content.tool_option import ToolCall
from src.services import database_service as database_api
from src.services.memory_service import memory_service
from .builtin_tool import get_action_tool_specs
from .builtin_tool import build_builtin_tool_handlers as build_split_builtin_tool_handlers
@@ -1013,15 +1014,30 @@ class MaisakaReasoningEngine:
builtin_prompt = tool_spec.build_llm_description()
try:
await database_api.store_tool_info(
tool_record_payload = self._build_tool_record_payload(invocation, result, tool_spec)
saved_record = await database_api.store_tool_info(
chat_stream=self._runtime.chat_stream,
builtin_prompt=builtin_prompt,
display_prompt=self._build_tool_display_prompt(invocation, result, tool_spec),
tool_id=invocation.call_id,
tool_data=self._build_tool_record_payload(invocation, result, tool_spec),
tool_data=tool_record_payload,
tool_name=invocation.tool_name,
tool_reasoning=invocation.reasoning,
)
if invocation.tool_name == "query_memory" and isinstance(saved_record, dict):
enqueue_payload = await memory_service.enqueue_feedback_task(
query_tool_id=str(saved_record.get("tool_id") or invocation.call_id or "").strip(),
session_id=str(saved_record.get("session_id") or self._runtime.chat_stream.session_id or "").strip(),
query_timestamp=saved_record.get("timestamp"),
structured_content=tool_record_payload.get("structured_content")
if isinstance(tool_record_payload.get("structured_content"), dict)
else {},
)
if not bool(enqueue_payload.get("success")):
logger.debug(
f"{self._runtime.log_prefix} 反馈纠错任务未入队: "
f"tool_call_id={invocation.call_id} reason={enqueue_payload.get('reason', '')}"
)
except Exception:
logger.exception(
f"{self._runtime.log_prefix} 写入工具记录失败: 工具={invocation.tool_name} 调用编号={invocation.call_id}"
@@ -1153,4 +1169,3 @@ class MaisakaReasoningEngine:
return True, tool_result_summaries, tool_monitor_results
return False, tool_result_summaries, tool_monitor_results

View File

@@ -233,6 +233,30 @@ class MemoryService:
logger.warning("长期记忆搜索失败: %s", exc)
return MemorySearchResult(success=False, error=str(exc))
async def enqueue_feedback_task(
self,
*,
query_tool_id: str,
session_id: str,
query_timestamp: Any = None,
structured_content: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
try:
payload = await self._invoke(
"enqueue_feedback_task",
{
"query_tool_id": str(query_tool_id or "").strip(),
"session_id": str(session_id or "").strip(),
"query_timestamp": query_timestamp,
"structured_content": structured_content if isinstance(structured_content, dict) else {},
},
timeout_ms=10000,
)
except Exception as exc:
logger.warning("反馈纠错任务入队失败: %s", exc)
return {"success": False, "queued": False, "reason": str(exc)}
return payload if isinstance(payload, dict) else {"success": False, "queued": False, "reason": "invalid_payload"}
async def ingest_summary(
self,
*,
@@ -388,6 +412,13 @@ class MemoryService:
logger.warning("画像管理调用失败: %s", exc)
return {"success": False, "error": str(exc)}
async def feedback_admin(self, *, action: str, **kwargs) -> Dict[str, Any]:
try:
return await self._invoke_admin("memory_feedback_admin", action=action, **kwargs)
except Exception as exc:
logger.warning("反馈纠错管理调用失败: %s", exc)
return {"success": False, "error": str(exc)}
async def runtime_admin(self, *, action: str, **kwargs) -> Dict[str, Any]:
try:
return await self._invoke_admin("memory_runtime_admin", action=action, **kwargs)

View File

@@ -205,6 +205,12 @@ def _setup_static_files(app: FastAPI):
def _resolve_static_path() -> Path | None:
# 开发环境优先允许复用仓库里的现成 dist
base_dir = _get_project_root()
static_path = base_dir / "dashboard" / "dist"
if static_path.exists():
return static_path
try:
module = import_module("maibot_dashboard")
get_dist_path = getattr(module, "get_dist_path", None)
@@ -215,11 +221,6 @@ def _resolve_static_path() -> Path | None:
except Exception:
pass
# 开发环境允许复用仓库里的现成 dist但不再在用户机器上触发任何前端自愈构建。
base_dir = _get_project_root()
static_path = base_dir / "dashboard" / "dist"
if static_path.exists():
return static_path
return None

View File

@@ -124,6 +124,11 @@ class DeletePurgeRequest(BaseModel):
limit: int = Field(1000, ge=1, le=5000)
class FeedbackRollbackRequest(BaseModel):
requested_by: str = "webui"
reason: str = ""
def _build_import_guide_markdown(settings: dict[str, Any]) -> str:
path_aliases_raw = settings.get("path_aliases")
path_aliases = path_aliases_raw if isinstance(path_aliases_raw, dict) else {}
@@ -359,6 +364,29 @@ async def _profile_delete_override(person_id: str) -> dict:
return await memory_service.profile_admin(action="delete_override", person_id=person_id)
async def _feedback_list(limit: int, status: str, rollback_status: str, query: str) -> dict:
return await memory_service.feedback_admin(
action="list",
limit=limit,
status=status,
rollback_status=rollback_status,
query=query,
)
async def _feedback_get(task_id: int) -> dict:
return await memory_service.feedback_admin(action="get", task_id=task_id)
async def _feedback_rollback(task_id: int, payload: FeedbackRollbackRequest) -> dict:
return await memory_service.feedback_admin(
action="rollback",
task_id=task_id,
requested_by=payload.requested_by,
reason=payload.reason,
)
async def _runtime_save() -> dict:
return await memory_service.runtime_admin(action="save")
@@ -830,6 +858,26 @@ async def delete_memory_profile_override(person_id: str):
return await _profile_delete_override(person_id)
@router.get("/feedback-corrections")
async def list_memory_feedback_corrections(
limit: int = Query(50, ge=1, le=200),
status: str = Query(""),
rollback_status: str = Query(""),
query: str = Query(""),
):
return await _feedback_list(limit, status, rollback_status, query)
@router.get("/feedback-corrections/{task_id}")
async def get_memory_feedback_correction(task_id: int):
return await _feedback_get(task_id)
@router.post("/feedback-corrections/{task_id}/rollback")
async def rollback_memory_feedback_correction(task_id: int, payload: FeedbackRollbackRequest):
return await _feedback_rollback(task_id, payload)
@router.post("/runtime/save")
async def save_memory_runtime():
return await _runtime_save()