Merge remote-tracking branch 'upstream/r-dev' into r-dev

# Conflicts:
#	src/memory_system/chat_history_summarizer.py
#	src/memory_system/memory_retrieval.py
#	src/memory_system/retrieval_tools/__init__.py
This commit is contained in:
A-Dawn
2026-04-01 14:20:22 +08:00
29 changed files with 434 additions and 304 deletions

View File

@@ -68,7 +68,6 @@ class DefaultReplyer:
reply_reason: str = "",
available_actions: Optional[Dict[str, ActionInfo]] = None,
chosen_actions: Optional[List[ActionPlannerInfo]] = None,
enable_tool: bool = True,
from_plugin: bool = True,
stream_id: Optional[str] = None,
reply_message: Optional[SessionMessage] = None,
@@ -87,7 +86,6 @@ class DefaultReplyer:
reply_reason: 回复原因
available_actions: 可用的动作信息字典
chosen_actions: 已选动作
enable_tool: 是否启用工具调用
from_plugin: 是否来自插件
Returns:
@@ -112,7 +110,6 @@ class DefaultReplyer:
extra_info=extra_info,
available_actions=available_actions,
chosen_actions=chosen_actions,
enable_tool=enable_tool,
reply_message=reply_message,
reply_reason=reply_reason,
reply_time_point=reply_time_point,
@@ -394,26 +391,20 @@ class DefaultReplyer:
return f"{expression_habits_title}\n{expression_habits_block}", selected_ids
async def build_tool_info(self, chat_history: str, sender: str, target: str, enable_tool: bool = True) -> str:
async def build_tool_info(self, chat_history: str, sender: str, target: str) -> str:
del chat_history
del sender
del target
del enable_tool
return ""
"""构建工具信息块
Args:
chat_history: 聊天历史记录
reply_to: 回复对象,格式为 "发送者:消息内容"
enable_tool: 是否启用工具调用
Returns:
str: 工具信息字符串
"""
if not enable_tool:
return ""
try:
# 使用工具执行器获取信息
tool_results = []
@@ -741,7 +732,6 @@ class DefaultReplyer:
reply_reason: str = "",
available_actions: Optional[Dict[str, ActionInfo]] = None,
chosen_actions: Optional[List[ActionPlannerInfo]] = None,
enable_tool: bool = True,
reply_time_point: float = time.time(),
think_level: int = 1,
unknown_words: Optional[List[str]] = None,
@@ -755,7 +745,6 @@ class DefaultReplyer:
available_actions: 可用动作
chosen_actions: 已选动作
enable_timeout: 是否启用超时处理
enable_tool: 是否启用工具调用
reply_message: 回复的原始消息
Returns:
str: 构建好的上下文
@@ -840,7 +829,7 @@ class DefaultReplyer:
"expression_habits",
),
self._time_and_run_task(
self.build_tool_info(chat_talking_prompt_short, sender, target, enable_tool=enable_tool), "tool_info"
self.build_tool_info(chat_talking_prompt_short, sender, target), "tool_info"
),
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, sender, target), "prompt_info"),
self._time_and_run_task(self.build_actions_prompt(available_actions, chosen_actions), "actions_info"),

View File

@@ -174,7 +174,7 @@ class MaisakaReplyGenerator:
try:
system_prompt = load_prompt(
"maidairy_replyer",
"maisaka_replyer",
bot_name=global_config.bot.nickname,
time_block=f"当前时间:{current_time}",
identity=self._personality_prompt,
@@ -193,7 +193,7 @@ class MaisakaReplyGenerator:
]
if extra_sections:
user_sections.append("\n\n".join(extra_sections))
user_sections.append(f"你的想法\n{reply_reason}")
user_sections.append(f"回复信息参考\n{reply_reason}")
user_sections.append("现在,你说:")
user_prompt = "\n\n".join(user_sections)
@@ -288,7 +288,6 @@ class MaisakaReplyGenerator:
reply_reason: str = "",
available_actions: Optional[Dict[str, ActionInfo]] = None,
chosen_actions: Optional[List[object]] = None,
enable_tool: bool = True,
from_plugin: bool = True,
stream_id: Optional[str] = None,
reply_message: Optional[SessionMessage] = None,
@@ -303,7 +302,6 @@ class MaisakaReplyGenerator:
"""结合上下文生成 Maisaka 的最终可见回复。"""
del available_actions
del chosen_actions
del enable_tool
del extra_info
del from_plugin
del log_reply

View File

@@ -65,7 +65,6 @@ class PrivateReplyer:
reply_reason: str = "",
available_actions: Optional[Dict[str, ActionInfo]] = None,
chosen_actions: Optional[List[ActionPlannerInfo]] = None,
enable_tool: bool = True,
from_plugin: bool = True,
think_level: int = 1,
stream_id: Optional[str] = None,
@@ -84,7 +83,6 @@ class PrivateReplyer:
reply_reason: 回复原因
available_actions: 可用的动作信息字典
chosen_actions: 已选动作
enable_tool: 是否启用工具调用
from_plugin: 是否来自插件
Returns:
@@ -103,7 +101,6 @@ class PrivateReplyer:
extra_info=extra_info,
available_actions=available_actions,
chosen_actions=chosen_actions,
enable_tool=enable_tool,
reply_message=reply_message,
reply_reason=reply_reason,
unknown_words=unknown_words,
@@ -287,26 +284,20 @@ class PrivateReplyer:
return f"{expression_habits_title}\n{expression_habits_block}", selected_ids
async def build_tool_info(self, chat_history: str, sender: str, target: str, enable_tool: bool = True) -> str:
async def build_tool_info(self, chat_history: str, sender: str, target: str) -> str:
del chat_history
del sender
del target
del enable_tool
return ""
"""构建工具信息块
Args:
chat_history: 聊天历史记录
reply_to: 回复对象,格式为 "发送者:消息内容"
enable_tool: 是否启用工具调用
Returns:
str: 工具信息字符串
"""
if not enable_tool:
return ""
try:
# 使用工具执行器获取信息
tool_results = []
@@ -612,7 +603,6 @@ class PrivateReplyer:
reply_reason: str = "",
available_actions: Optional[Dict[str, ActionInfo]] = None,
chosen_actions: Optional[List[ActionPlannerInfo]] = None,
enable_tool: bool = True,
unknown_words: Optional[List[str]] = None,
) -> Tuple[str, List[int]]:
"""
@@ -624,7 +614,6 @@ class PrivateReplyer:
available_actions: 可用动作
chosen_actions: 已选动作
enable_timeout: 是否启用超时处理
enable_tool: 是否启用工具调用
reply_message: 回复的原始消息
Returns:
str: 构建好的上下文
@@ -719,7 +708,7 @@ class PrivateReplyer:
),
# self._time_and_run_task(self.build_relation_info(chat_talking_prompt_short, sender), "relation_info"),
self._time_and_run_task(
self.build_tool_info(chat_talking_prompt_short, sender, target, enable_tool=enable_tool), "tool_info"
self.build_tool_info(chat_talking_prompt_short, sender, target), "tool_info"
),
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, sender, target), "prompt_info"),
self._time_and_run_task(self.build_actions_prompt(available_actions, chosen_actions), "actions_info"),

View File

@@ -66,6 +66,9 @@ class LLMResponseResult(BaseDataModel):
reasoning: str = field(default_factory=str)
model_name: str = field(default_factory=str)
tool_calls: List[ToolCall] | None = None
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
@dataclass(slots=True)
@@ -120,6 +123,9 @@ class LLMServiceResult(BaseDataModel):
"response": self.completion.response,
"reasoning": self.completion.reasoning,
"model_name": self.completion.model_name,
"prompt_tokens": self.completion.prompt_tokens,
"completion_tokens": self.completion.completion_tokens,
"total_tokens": self.completion.total_tokens,
}
if self.completion.tool_calls is not None:
payload["tool_calls"] = [

View File

@@ -36,7 +36,6 @@ from .official_configs import (
ResponsePostProcessConfig,
ResponseSplitterConfig,
TelemetryConfig,
ToolConfig,
VoiceConfig,
WebUIConfig,
)
@@ -90,9 +89,6 @@ class Config(ConfigBase):
message_receive: MessageReceiveConfig = Field(default_factory=MessageReceiveConfig)
"""消息接收配置类"""
tool: ToolConfig = Field(default_factory=ToolConfig)
"""工具配置类"""
voice: VoiceConfig = Field(default_factory=VoiceConfig)
"""语音配置类"""

View File

@@ -280,6 +280,18 @@ def try_migrate_legacy_bot_config_dict(data: dict[str, Any]) -> MigrationResult:
migrated_any = True
reasons.append("experimental.chat_prompts")
chat = _as_dict(data.get("chat"))
if chat is not None and "think_mode" in chat:
chat.pop("think_mode", None)
migrated_any = True
reasons.append("chat.think_mode_removed")
tool = _as_dict(data.get("tool"))
if tool is not None:
data.pop("tool", None)
migrated_any = True
reasons.append("tool_section_removed")
# ExpressionConfig 中的 manual_reflect_operator_id:
# 旧版本可能是 ""(字符串),新版本期望 Optional[TargetItem]。
# 空字符串视为未配置,转换为 None/删除键以避免校验错误。

View File

@@ -236,20 +236,6 @@ class ChatConfig(ConfigBase):
)
"""上下文长度"""
think_mode: Literal["classic", "deep", "dynamic"] = Field(
default="dynamic",
json_schema_extra={
"x-widget": "select",
"x-icon": "brain",
},
)
"""
思考模式配置
- classic: 默认think_level为0轻量回复不需要思考和回忆
- deep: 默认think_level为1深度回复需要进行回忆和思考
- dynamic: think_level由planner动态给出根据planner返回的think_level决定
"""
plan_reply_log_max_per_chat: int = Field(
default=1024,
json_schema_extra={
@@ -669,21 +655,6 @@ class ExpressionConfig(ConfigBase):
"""是否在回复前尝试对上下文中的黑话进行解释关闭可减少一次LLM调用仅影响回复前的黑话匹配与解释不影响黑话学习"""
class ToolConfig(ConfigBase):
"""工具配置类"""
__ui_parent__ = "emoji"
enable_tool: bool = Field(
default=False,
json_schema_extra={
"x-widget": "switch",
"x-icon": "wrench",
},
)
"""是否在聊天中启用工具"""
class VoiceConfig(ConfigBase):
"""语音识别配置类"""

View File

@@ -34,6 +34,7 @@ from src.llm_models.model_client.base_client import (
ClientRequest,
EmbeddingRequest,
ResponseRequest,
UsageRecord,
client_registry,
)
from src.llm_models.payload_content.message import Message, MessageBuilder
@@ -137,6 +138,7 @@ class LLMOrchestrator:
reasoning_content: str,
model_name: str,
tool_calls: List[ToolCall] | None,
usage: UsageRecord | None = None,
) -> LLMResponseResult:
"""构建统一的文本响应结果。
@@ -154,6 +156,9 @@ class LLMOrchestrator:
reasoning=reasoning_content,
model_name=model_name,
tool_calls=tool_calls,
prompt_tokens=usage.prompt_tokens if usage is not None else 0,
completion_tokens=usage.completion_tokens if usage is not None else 0,
total_tokens=usage.total_tokens if usage is not None else 0,
)
async def generate_response_for_image(
@@ -215,7 +220,13 @@ class LLMOrchestrator:
endpoint="/chat/completions",
time_cost=time_cost,
)
return self._build_generation_result(content, reasoning_content, model_info.name, tool_calls)
return self._build_generation_result(
content,
reasoning_content,
model_info.name,
tool_calls,
response.usage,
)
async def generate_response_for_voice(self, voice_base64: str) -> LLMAudioTranscriptionResult:
"""为语音生成转录响应。
@@ -298,7 +309,13 @@ class LLMOrchestrator:
endpoint="/chat/completions",
time_cost=time.time() - start_time,
)
return self._build_generation_result(content or "", reasoning_content, model_info.name, tool_calls)
return self._build_generation_result(
content or "",
reasoning_content,
model_info.name,
tool_calls,
response.usage,
)
async def generate_response_with_message_async(
self,
@@ -343,11 +360,6 @@ class LLMOrchestrator:
)
response = execution_result.api_response
model_info = execution_result.model_info
if self.request_type.startswith("maisaka_"):
logger.info(
f"LLMOrchestrator[{self.request_type}] generate_response_with_message_async 执行完成 "
f"(model={model_info.name}, time_cost={time.time() - start_time:.2f}s)"
)
time_cost = time.time() - start_time
logger.debug(f"LLM请求总耗时: {time_cost}")
@@ -369,7 +381,13 @@ class LLMOrchestrator:
endpoint="/chat/completions",
time_cost=time_cost,
)
return self._build_generation_result(content or "", reasoning_content, model_info.name, tool_calls)
return self._build_generation_result(
content or "",
reasoning_content,
model_info.name,
tool_calls,
response.usage,
)
async def get_embedding(self, embedding_input: str) -> LLMEmbeddingResult:
"""获取嵌入向量。
@@ -833,14 +851,7 @@ class LLMOrchestrator:
message_list = []
if message_factory:
if self.request_type.startswith("maisaka_"):
logger.info(f"LLMOrchestrator[{self.request_type}] 正在通过 message_factory 构建消息列表")
message_list = message_factory(client)
if self.request_type.startswith("maisaka_"):
logger.info(
f"LLMOrchestrator[{self.request_type}] message_factory 返回了 {len(message_list)} 条消息"
)
try:
request = self._build_client_request(
request_type=request_type,

View File

@@ -121,11 +121,7 @@ def create_builtin_tool_specs() -> List[ToolSpec]:
),
_build_tool_spec(
name="no_reply",
brief_description="本轮不发送可见回复,继续下一步思考",
),
_build_tool_spec(
name="stop",
brief_description="暂停当前内部循环,等待新的外部消息。",
brief_description="本轮不进行回复,等待其他用户的新消息",
),
_build_tool_spec(
name="send_emoji",

View File

@@ -137,7 +137,7 @@ class MaisakaChatLoopService:
try:
self._chat_system_prompt = load_prompt(
"maidairy_chat",
"maisaka_chat",
file_tools_section=tools_section,
bot_name=global_config.bot.nickname,
identity=self._personality_prompt,
@@ -695,6 +695,32 @@ class MaisakaChatLoopService:
padding=(0, 1),
)
@staticmethod
def _format_token_count(token_count: int) -> str:
"""格式化 token 数量展示文本。"""
if token_count >= 10_000:
return f"{token_count / 1000:.1f}k"
return str(token_count)
@classmethod
def _build_prompt_stats_text(
cls,
*,
selected_history_count: int,
built_message_count: int,
prompt_tokens: int,
completion_tokens: int,
total_tokens: int,
) -> str:
"""构造本轮 prompt 的统计信息文本。"""
return (
f"已选上下文消息数={selected_history_count} "
f"大模型消息数={built_message_count} "
f"实际输入Token={cls._format_token_count(prompt_tokens)} "
f"输出Token={cls._format_token_count(completion_tokens)} "
f"总Token={cls._format_token_count(total_tokens)}"
)
async def chat_loop_step(self, chat_history: List[LLMContextMessage]) -> ChatResponse:
"""执行一轮 Maisaka 规划器请求。
@@ -769,6 +795,15 @@ class MaisakaChatLoopService:
request_elapsed = perf_counter() - request_started_at
logger.info(f"规划器请求完成,耗时={request_elapsed:.3f}")
prompt_stats_text = self._build_prompt_stats_text(
selected_history_count=len(selected_history),
built_message_count=len(built_messages),
prompt_tokens=generation_result.prompt_tokens,
completion_tokens=generation_result.completion_tokens,
total_tokens=generation_result.total_tokens,
)
logger.info(f"本轮Prompt统计: {prompt_stats_text}")
tool_call_summaries = [
{
"调用编号": getattr(tool_call, "call_id", getattr(tool_call, "id", None)),

View File

@@ -1,5 +1,6 @@
"""Maisaka 推理引擎。"""
from base64 import b64decode
from datetime import datetime
from typing import TYPE_CHECKING, Any, Optional, cast
@@ -15,7 +16,7 @@ from src.chat.heart_flow.heartFC_utils import CycleDetail
from src.chat.message_receive.message import SessionMessage
from src.chat.replyer.replyer_manager import replyer_manager
from src.chat.utils.utils import process_llm_response
from src.common.data_models.message_component_data_model import MessageSequence, TextComponent
from src.common.data_models.message_component_data_model import EmojiComponent, MessageSequence, TextComponent
from src.common.database.database import get_db_session
from src.common.database.database_model import PersonInfo
from src.common.logger import get_logger
@@ -82,7 +83,7 @@ class MaisakaReasoningEngine:
self._runtime._agent_state = self._runtime._STATE_RUNNING
if cached_messages:
self._append_wait_interrupted_message_if_needed()
self._clear_pending_wait_tool_call_id()
await self._ingest_messages(cached_messages)
anchor_message = cached_messages[-1]
else:
@@ -94,7 +95,7 @@ class MaisakaReasoningEngine:
self._runtime._internal_turn_queue.task_done()
continue
logger.info(f"{self._runtime.log_prefix} 等待超时后开始新一轮思考")
self._runtime._chat_history.append(self._build_wait_timeout_message())
self._clear_pending_wait_tool_call_id()
self._trim_chat_history()
try:
for round_index in range(self._runtime._max_internal_rounds):
@@ -179,6 +180,10 @@ class MaisakaReasoningEngine:
return self._runtime.message_cache[-1]
return None
def _clear_pending_wait_tool_call_id(self) -> None:
"""清理等待状态残留的 wait 工具调用编号。"""
self._runtime._pending_wait_tool_call_id = None
def _build_wait_timeout_message(self) -> ToolResultMessage:
"""构造 wait 超时后的工具结果消息。"""
tool_call_id = self._runtime._pending_wait_tool_call_id or "wait_timeout"
@@ -260,20 +265,22 @@ class MaisakaReasoningEngine:
timestamp_text = message.timestamp.strftime("%H:%M:%S")
user_name = user_info.user_nickname or user_info.user_id
group_card = user_info.user_cardname or ""
message_id = message.message_id or ""
return (
f"[时间]{timestamp_text}\n"
f"[用户]{user_name}\n"
f"[用户群昵称]{group_card}\n"
f"[msg_id]{message_id}\n"
"[发言内容]"
)
prefix_parts = [
f"[时间]{timestamp_text}\n",
f"[用户]{user_name}\n",
f"[用户群昵称]{group_card}\n",
]
if not message.is_notify and message.message_id:
prefix_parts.append(f"[msg_id]{message.message_id}\n")
prefix_parts.append("[发言内容]")
return "".join(prefix_parts)
def _build_legacy_visible_text(self, message: SessionMessage, source_sequence: MessageSequence) -> str:
user_info = message.message_info.user_info
speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id
legacy_sequence = MessageSequence([])
legacy_sequence.text(format_speaker_content(speaker_name, "", message.timestamp, message.message_id))
visible_message_id = None if message.is_notify else message.message_id
legacy_sequence.text(format_speaker_content(speaker_name, "", message.timestamp, visible_message_id))
for component in clone_message_sequence(source_sequence).components:
legacy_sequence.components.append(component)
return build_visible_text_from_sequence(legacy_sequence).strip()
@@ -281,7 +288,8 @@ class MaisakaReasoningEngine:
def _build_legacy_visible_text_from_text(self, message: SessionMessage, content: str) -> str:
user_info = message.message_info.user_info
speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id
return format_speaker_content(speaker_name, content, message.timestamp, message.message_id).strip()
visible_message_id = None if message.is_notify else message.message_id
return format_speaker_content(speaker_name, content, message.timestamp, visible_message_id).strip()
def _insert_chat_history_message(self, message: LLMContextMessage) -> int:
"""将消息按处理顺序追加到聊天历史末尾。"""
@@ -1274,7 +1282,7 @@ class MaisakaReasoningEngine:
self._runtime._chat_history.append(history_message)
return self._build_tool_success_result(
tool_call.func_name,
"可见回复已生成并发送。",
"回复已生成并发送。",
structured_content={
"msg_id": target_message_id,
"quote": quote_reply,
@@ -1385,3 +1393,154 @@ class MaisakaReasoningEngine:
tool_call.func_name,
"发送表情包失败。",
)
async def _handle_send_emoji(self, tool_call: ToolCall) -> ToolExecutionResult:
"""?????????????"""
from src.chat.emoji_system.emoji_manager import emoji_manager
from src.common.utils.utils_image import ImageUtils
import random
tool_args = tool_call.args or {}
emotion = str(tool_args.get("emotion") or "").strip()
structured_result: dict[str, Any] = {
"success": False,
"message": "",
"description": "",
"emotion": [],
"requested_emotion": emotion,
}
logger.info(f"{self._runtime.log_prefix} ??????????: ??={emotion!r}")
if not emoji_manager.emojis:
structured_result["message"] = "??????????????"
return self._build_tool_failure_result(
tool_call.func_name,
structured_result["message"],
structured_content=structured_result,
)
selected_emoji = None
if emotion:
matching_emojis = [
emoji
for emoji in emoji_manager.emojis
if emotion.lower() in (item.lower() for item in emoji.emotion)
]
if matching_emojis:
selected_emoji = random.choice(matching_emojis)
logger.info(
f"{self._runtime.log_prefix} ?? {len(matching_emojis)} ????? {emotion!r} ?????"
f"????{selected_emoji.description}"
)
if selected_emoji is None:
selected_emoji = random.choice(emoji_manager.emojis)
logger.info(
f"{self._runtime.log_prefix} ????????? {emotion!r}?"
f"??????{selected_emoji.description}"
)
emoji_description = selected_emoji.description.strip()
emoji_emotions = [str(item).strip() for item in selected_emoji.emotion if str(item).strip()]
structured_result["description"] = emoji_description
structured_result["emotion"] = emoji_emotions
emoji_manager.update_emoji_usage(selected_emoji)
try:
emoji_base64 = ImageUtils.image_path_to_base64(str(selected_emoji.full_path))
if not emoji_base64:
raise ValueError("??????? base64 ??")
except Exception as exc:
logger.error(f"{self._runtime.log_prefix} ??????? base64 ??: {exc}")
structured_result["message"] = f"????????{exc}"
return self._build_tool_failure_result(
tool_call.func_name,
structured_result["message"],
structured_content=structured_result,
)
try:
sent = await send_service.emoji_to_stream(
emoji_base64=emoji_base64,
stream_id=self._runtime.session_id,
storage_message=True,
set_reply=False,
reply_message=None,
)
except Exception as exc:
logger.exception(f"{self._runtime.log_prefix} ??????????: {exc}")
structured_result["message"] = f"???????????{exc}"
return self._build_tool_failure_result(
tool_call.func_name,
structured_result["message"],
structured_content=structured_result,
)
if sent:
success_message = (
f"???????{emoji_description}????{', '.join(emoji_emotions)}?"
if emoji_emotions
else f"???????{emoji_description}"
)
logger.info(
f"{self._runtime.log_prefix} ???????: "
f"??={selected_emoji.description!r} ????={selected_emoji.emotion}"
)
self._append_sent_emoji_to_chat_history(
emoji_base64=emoji_base64,
success_message=success_message,
)
structured_result["success"] = True
structured_result["message"] = success_message
return self._build_tool_success_result(
tool_call.func_name,
success_message,
structured_content=structured_result,
)
logger.warning(f"{self._runtime.log_prefix} ???????")
structured_result["message"] = "????????"
return self._build_tool_failure_result(
tool_call.func_name,
structured_result["message"],
structured_content=structured_result,
)
def _append_sent_emoji_to_chat_history(
self,
*,
emoji_base64: str,
success_message: str,
) -> None:
"""? bot ?????????????? Maisaka ?????"""
bot_name = global_config.bot.nickname.strip() or "MaiSaka"
reply_timestamp = datetime.now()
planner_prefix = (
f"[??]{reply_timestamp.strftime('%H:%M:%S')}\n"
f"[??]{bot_name}\n"
"[?????]\n"
"[msg_id]\n"
"[????]"
)
history_message = SessionBackedMessage(
raw_message=MessageSequence(
[
TextComponent(planner_prefix),
EmojiComponent(
binary_hash="",
content=success_message,
binary_data=b64decode(emoji_base64),
),
]
),
visible_text=format_speaker_content(
bot_name,
"[???]",
reply_timestamp,
),
timestamp=reply_timestamp,
source_kind="guided_reply",
)
self._runtime._chat_history.append(history_message)

View File

@@ -1,98 +0,0 @@
# -*- coding: utf-8 -*-
"""
记忆系统工具函数
包含模糊查找、相似度计算等工具函数
"""
import json
import re
from datetime import datetime
from typing import Tuple
from typing import List
from json_repair import repair_json
from src.common.logger import get_logger
logger = get_logger("memory_utils")
def parse_questions_json(response: str) -> Tuple[List[str], List[str]]:
"""解析问题JSON返回概念列表和问题列表
Args:
response: LLM返回的响应
Returns:
Tuple[List[str], List[str]]: (概念列表, 问题列表)
"""
try:
# 尝试提取JSON可能包含在```json代码块中
json_pattern = r"```json\s*(.*?)\s*```"
matches = re.findall(json_pattern, response, re.DOTALL)
if matches:
json_str = matches[0]
else:
# 尝试直接解析整个响应
json_str = response.strip()
# 修复可能的JSON错误
repaired_json = repair_json(json_str)
# 解析JSON
parsed = json.loads(repaired_json)
# 只支持新格式包含concepts和questions的对象
if not isinstance(parsed, dict):
logger.warning(f"解析的JSON不是对象格式: {parsed}")
return [], []
concepts_raw = parsed.get("concepts", [])
questions_raw = parsed.get("questions", [])
# 确保是列表
if not isinstance(concepts_raw, list):
concepts_raw = []
if not isinstance(questions_raw, list):
questions_raw = []
# 确保所有元素都是字符串
concepts = [c for c in concepts_raw if isinstance(c, str) and c.strip()]
questions = [q for q in questions_raw if isinstance(q, str) and q.strip()]
return concepts, questions
except Exception as e:
logger.error(f"解析问题JSON失败: {e}, 响应内容: {response[:200]}...")
return [], []
def parse_datetime_to_timestamp(value: str) -> float:
"""
接受多种常见格式并转换为时间戳(秒)
支持示例:
- 2025-09-29
- 2025-09-29 00:00:00
- 2025/09/29 00:00
- 2025-09-29T00:00:00
"""
value = value.strip()
fmts = [
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M",
"%Y/%m/%d %H:%M:%S",
"%Y/%m/%d %H:%M",
"%Y-%m-%d",
"%Y/%m/%d",
"%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M",
]
last_err = None
for fmt in fmts:
try:
dt = datetime.strptime(value, fmt)
return dt.timestamp()
except Exception as e:
last_err = e
raise ValueError(f"无法解析时间: {value} ({last_err})")

View File

@@ -103,7 +103,6 @@ async def generate_reply(
available_actions: Optional[Dict[str, ActionInfo]] = None,
chosen_actions: Optional[List["ActionPlannerInfo"]] = None,
unknown_words: Optional[List[str]] = None,
enable_tool: bool = False,
enable_splitter: bool = True,
enable_chinese_typo: bool = True,
request_type: str = "generator_api",
@@ -133,7 +132,6 @@ async def generate_reply(
extra_info=extra_info,
available_actions=available_actions,
chosen_actions=chosen_actions,
enable_tool=enable_tool,
reply_message=reply_message,
reply_reason=reply_reason,
unknown_words=unknown_words,

View File

@@ -36,7 +36,6 @@ from src.config.official_configs import (
ResponsePostProcessConfig,
ResponseSplitterConfig,
TelemetryConfig,
ToolConfig,
VoiceConfig,
)
from src.webui.config_schema import ConfigSchemaGenerator
@@ -113,7 +112,6 @@ async def get_config_section_schema(section_name: str):
- experimental: ExperimentalConfig
- maim_message: MaimMessageConfig
- lpmm_knowledge: LPMMKnowledgeConfig
- tool: ToolConfig
- memory: MemoryConfig
- debug: DebugConfig
- voice: VoiceConfig
@@ -138,7 +136,6 @@ async def get_config_section_schema(section_name: str):
"experimental": ExperimentalConfig,
"maim_message": MaimMessageConfig,
"lpmm_knowledge": LPMMKnowledgeConfig,
"tool": ToolConfig,
"memory": MemoryConfig,
"debug": DebugConfig,
"voice": VoiceConfig,