feat:同步本地非算法改动到上游基线

保留反馈纠错、WebUI 与运行时增强。\n移除不应提交的 algorithm_redesign 设计目录及其专项测试。
This commit is contained in:
A-Dawn
2026-04-16 13:57:07 +08:00
parent 6c22fdfdf9
commit 21b642d07d
10 changed files with 2244 additions and 34 deletions

View File

@@ -11,7 +11,7 @@ from __future__ import annotations
import asyncio
import time
from typing import Any, List, Optional, Union
from typing import Any, Dict, List, Optional, Tuple, Union
import aiohttp
import numpy as np
@@ -29,6 +29,9 @@ logger = get_logger("A_Memorix.EmbeddingAPIAdapter")
class EmbeddingAPIAdapter:
"""适配宿主 embedding 请求接口。"""
_GLOBAL_DIMENSION_CACHE: Dict[str, int] = {}
_GLOBAL_TEXT_EMBEDDING_CACHE: Dict[Tuple[str, int, str], np.ndarray] = {}
def __init__(
self,
batch_size: int = 32,
@@ -232,10 +235,32 @@ class EmbeddingAPIAdapter:
logger.error(f"通过直接 Client 获取 Embedding 失败: {last_exc}")
return None
def _dimension_cache_key(self) -> str:
candidate_names = self._resolve_candidate_model_names()
return "|".join(
[
str(self.model_name or "auto"),
str(self.default_dimension),
",".join(candidate_names),
]
)
def _embedding_cache_key(self, text: str, dimensions: Optional[int]) -> Tuple[str, int, str]:
requested_dimension = self._resolve_canonical_dimension(dimensions)
return (self._dimension_cache_key(), int(requested_dimension), str(text or ""))
async def _detect_dimension(self) -> int:
if self._dimension_detected and self._dimension is not None:
return self._dimension
cache_key = self._dimension_cache_key()
cached_dimension = self._GLOBAL_DIMENSION_CACHE.get(cache_key)
if cached_dimension is not None:
self._dimension = int(cached_dimension)
self._dimension_detected = True
logger.info(f"嵌入维度命中进程缓存: {self._dimension}")
return self._dimension
logger.info("正在检测嵌入模型维度...")
try:
target_dim = self.default_dimension
@@ -251,6 +276,7 @@ class EmbeddingAPIAdapter:
)
self._dimension = detected_dim
self._dimension_detected = True
self._GLOBAL_DIMENSION_CACHE[cache_key] = int(detected_dim)
return detected_dim
except Exception as exc:
logger.debug(f"带维度参数探测失败: {exc},尝试不带维度参数探测")
@@ -261,6 +287,7 @@ class EmbeddingAPIAdapter:
detected_dim = len(test_embedding)
self._dimension = detected_dim
self._dimension_detected = True
self._GLOBAL_DIMENSION_CACHE[cache_key] = int(detected_dim)
logger.info(f"嵌入维度检测成功 (自然维度): {detected_dim}")
return detected_dim
logger.warning(f"嵌入维度检测失败,使用 configured_dimension: {self.default_dimension}")
@@ -269,6 +296,7 @@ class EmbeddingAPIAdapter:
self._dimension = self.default_dimension
self._dimension_detected = True
self._GLOBAL_DIMENSION_CACHE[cache_key] = int(self.default_dimension)
return self.default_dimension
async def encode(
@@ -336,6 +364,25 @@ class EmbeddingAPIAdapter:
all_embeddings: List[np.ndarray] = []
for offset in range(0, len(texts), batch_size):
batch = texts[offset : offset + batch_size]
batch_results: List[Tuple[int, np.ndarray]] = []
uncached_items: List[Tuple[int, str]] = []
if self.enable_cache:
for index, text in enumerate(batch):
cache_key = self._embedding_cache_key(text, dimensions)
cached_vector = self._GLOBAL_TEXT_EMBEDDING_CACHE.get(cache_key)
if cached_vector is None:
uncached_items.append((index, text))
else:
batch_results.append((index, cached_vector.copy()))
else:
uncached_items = list(enumerate(batch))
if not uncached_items:
batch_results.sort(key=lambda item: item[0])
all_embeddings.extend(emb for _, emb in batch_results)
continue
semaphore = asyncio.Semaphore(self.max_concurrent)
async def encode_with_semaphore(text: str, index: int):
@@ -351,11 +398,20 @@ class EmbeddingAPIAdapter:
tasks = [
encode_with_semaphore(text, offset + index)
for index, text in enumerate(batch)
for index, text in uncached_items
]
results = await asyncio.gather(*tasks)
results.sort(key=lambda item: item[0])
all_embeddings.extend(emb for _, emb in results)
normalized_results: List[Tuple[int, np.ndarray]] = []
for batch_index, vector in results:
normalized_results.append((batch_index, vector))
if self.enable_cache:
text = batch[batch_index]
cache_key = self._embedding_cache_key(text, dimensions)
self._GLOBAL_TEXT_EMBEDDING_CACHE[cache_key] = vector.copy()
batch_results.extend(normalized_results)
batch_results.sort(key=lambda item: item[0])
all_embeddings.extend(emb for _, emb in batch_results)
return np.array(all_embeddings, dtype=np.float32)

View File

@@ -34,7 +34,7 @@ except Exception:
logger = get_logger("A_Memorix.MetadataStore")
SCHEMA_VERSION = 10
SCHEMA_VERSION = 12
RUNTIME_AUTO_MIGRATION_MIN_SCHEMA_VERSION = 9

View File

@@ -375,6 +375,30 @@ def _preflight_impl(config_path: Path, data_dir: Path) -> Dict[str, Any]:
"memory_feedback_tasks rollback columns missing under current schema version",
)
)
elif not has_stale_marks:
checks.append(
CheckItem(
"CP-15",
"error",
"paragraph_stale_relation_marks table missing under current schema version",
)
)
elif not has_profile_refresh_queue:
checks.append(
CheckItem(
"CP-16",
"error",
"person_profile_refresh_queue table missing under current schema version",
)
)
elif not has_feedback_rollback_status or not has_feedback_rollback_plan:
checks.append(
CheckItem(
"CP-17",
"error",
"memory_feedback_tasks rollback columns missing under current schema version",
)
)
if _sqlite_table_exists(conn, "relations"):
row = conn.execute("SELECT COUNT(*) FROM relations").fetchone()

View File

@@ -145,23 +145,23 @@ class VisualConfig(ConfigBase):
__ui_label__ = "视觉"
__ui_icon__ = "image"
planner_mode: Literal["text", "multimodal", "auto"] = Field(
default="auto",
multimodal_planner: bool = Field(
default=True,
json_schema_extra={
"x-widget": "select",
"x-icon": "git-branch",
"x-widget": "switch",
"x-icon": "image",
},
)
"""规划器模式auto根据模型信息自动选择text为纯文本模式multimodal为多模态模式"""
"""是否直接输入图片"""
replyer_mode: Literal["text", "multimodal", "auto"] = Field(
default="auto",
multimodal_replyer: bool = Field(
default=False,
json_schema_extra={
"x-widget": "select",
"x-widget": "switch",
"x-icon": "git-branch",
},
)
"""回复器模式auto根据模型信息自动选择text为纯文本模式multimodal为多模态模式"""
"""是否启用 Maisaka 多模态 replyer 生成器"""
visual_style: str = Field(
default="请用中文描述这张图片的内容。如果有文字请把文字描述概括出来请留意其主题直观感受输出为一段平文本最多30字请注意不要分点就输出一段文本",
@@ -239,12 +239,17 @@ class ChatConfig(ConfigBase):
)
"""Planner 连续被新消息打断的最大次数0 表示不启用打断"""
plan_reply_log_max_per_chat: int = Field(
default=1024,
json_schema_extra={
"x-widget": "input",
"x-icon": "file-text",
},
)
"""每个聊天流最大保存的Plan/Reply日志数量超过此数量时会自动删除最老的日志"""
group_chat_prompt: str = Field(
default="""
你正在qq群里聊天下面是群里正在聊的内容其中包含聊天记录和聊天中的图片。
回复尽量简短一些。最好一次对一个话题进行回复,免得啰嗦或者回复内容太乱。请注意把握聊天内容。
不要回复的太频繁!控制回复的频率,不要每个人的消息都回复,只回复你感兴趣的或者主动提及你的。
""",
default="你需要控制自己发言的频率,如果是一对一聊天,可以以较均匀的频率发言;如果用户较多,不要每句都回复,控制回复频率,不要回复的太频繁!控制回复的频率,不要每个人的消息都回复。",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "users",
@@ -253,11 +258,7 @@ class ChatConfig(ConfigBase):
"""_wrap_群聊通用注意事项"""
private_chat_prompts: str = Field(
default="""
你正在聊天,下面是正在聊的内容,其中包含聊天记录和聊天中的图片。
回复尽量简短一些。请注意把握聊天内容。
请考虑对方的发言频率,想法,思考自己何时回复以及回复内容。
""",
default="你需要控制自己发言的频率,可以以较均匀的频率发言。",
json_schema_extra={
"x-widget": "textarea",
"x-icon": "user",
@@ -663,15 +664,6 @@ class LearningItem(ConfigBase):
)
"""是否启用jargon学习"""
advanced_chosen: bool = Field(
default=False,
json_schema_extra={
"x-widget": "switch",
"x-icon": "sparkles",
},
)
"""是否启用基于子代理的二次表达方式选择"""
class ExpressionGroup(ConfigBase):
"""表达互通组配置类,若列表为空代表全局共享"""
@@ -701,7 +693,6 @@ class ExpressionConfig(ConfigBase):
use_expression=True,
enable_learning=True,
enable_jargon_learning=True,
advanced_chosen=False,
)
],
json_schema_extra={
@@ -1573,6 +1564,35 @@ class MaiSakaConfig(ConfigBase):
)
"""MaiSaka 使用的用户名称"""
tool_filter_task_name: str = Field(
default="utils",
json_schema_extra={
"x-widget": "input",
"x-icon": "sparkles",
},
)
"""工具筛选预判使用的模型任务名"""
tool_filter_threshold: int = Field(
default=20,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "filter",
},
)
"""当可用工具总数超过该阈值时,先进行一轮工具筛选"""
tool_filter_max_keep: int = Field(
default=5,
ge=1,
json_schema_extra={
"x-widget": "input",
"x-icon": "list-filter",
},
)
"""工具筛选阶段最多保留的非内置工具数量"""
show_image_path: bool = Field(
default=True,
json_schema_extra={