This commit is contained in:
tcmofashi
2026-01-16 07:34:11 +00:00
281 changed files with 51502 additions and 3157 deletions

View File

@@ -1,14 +1,14 @@
import time
from typing import Tuple, Optional # 增加了 Optional
from src.common.logger_manager import get_logger
from ..models.utils_model import LLMRequest
from ...config.config import global_config
from src.common.logger import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
import random
from .chat_observer import ChatObserver
from .pfc_utils import get_items_from_json
from src.individuality.individuality import Individuality
from .observation_info import ObservationInfo
from .conversation_info import ConversationInfo
from src.plugins.utils.chat_message_builder import build_readable_messages
from src.chat.utils.chat_message_builder import build_readable_messages
logger = get_logger("pfc_action_planner")
@@ -113,12 +113,27 @@ class ActionPlanner:
max_tokens=1500,
request_type="action_planning",
)
self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3)
self.personality_info = self._get_personality_prompt()
self.name = global_config.BOT_NICKNAME
self.private_name = private_name
self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
# self.action_planner_info = ActionPlannerInfo() # 移除未使用的变量
def _get_personality_prompt(self) -> str:
"""获取个性提示信息"""
prompt_personality = global_config.personality.personality
# 检查是否需要随机替换为状态
if (
global_config.personality.states
and global_config.personality.state_probability > 0
and random.random() < global_config.personality.state_probability
):
prompt_personality = random.choice(global_config.personality.states)
bot_name = global_config.BOT_NICKNAME
return f"你的名字是{bot_name},你{prompt_personality};"
# 修改 plan 方法签名,增加 last_successful_reply_action 参数
async def plan(
self,

View File

@@ -3,22 +3,22 @@ import asyncio
import datetime
# from .message_storage import MongoDBMessageStorage
from src.plugins.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
# from ...config.config import global_config
# from src.config.config import global_config
from typing import Dict, Any, Optional
from ..chat.message import Message
from src.chat.message_receive.message import Message
from .pfc_types import ConversationState
from .pfc import ChatObserver, GoalAnalyzer
from .message_sender import DirectMessageSender
from src.common.logger_manager import get_logger
from src.common.logger import get_logger
from .action_planner import ActionPlanner
from .observation_info import ObservationInfo
from .conversation_info import ConversationInfo # 确保导入 ConversationInfo
from .reply_generator import ReplyGenerator
from ..chat.chat_stream import ChatStream
from src.chat.message_receive.chat_stream import ChatStream
from maim_message import UserInfo
from src.plugins.chat.chat_stream import chat_manager
from src.chat.message_receive.chat_stream import get_chat_manager
from .pfc_KnowledgeFetcher import KnowledgeFetcher
from .waiter import Waiter
@@ -60,7 +60,7 @@ class Conversation:
self.direct_sender = DirectMessageSender(self.private_name)
# 获取聊天流信息
self.chat_stream = chat_manager.get_stream(self.stream_id)
self.chat_stream = get_chat_manager().get_stream(self.stream_id)
self.stop_action_planner = False
except Exception as e:
@@ -248,14 +248,14 @@ class Conversation:
def _convert_to_message(self, msg_dict: Dict[str, Any]) -> Message:
"""将消息字典转换为Message对象"""
try:
# 尝试从 msg_dict 直接获取 chat_stream如果失败则从全局 chat_manager 获取
# 尝试从 msg_dict 直接获取 chat_stream如果失败则从全局 get_chat_manager 获取
chat_info = msg_dict.get("chat_info")
if chat_info and isinstance(chat_info, dict):
chat_stream = ChatStream.from_dict(chat_info)
elif self.chat_stream: # 使用实例变量中的 chat_stream
chat_stream = self.chat_stream
else: # Fallback: 尝试从 manager 获取 (可能需要 stream_id)
chat_stream = chat_manager.get_stream(self.stream_id)
chat_stream = get_chat_manager().get_stream(self.stream_id)
if not chat_stream:
raise ValueError(f"无法确定 ChatStream for stream_id {self.stream_id}")

View File

@@ -1,13 +1,11 @@
import time
from typing import Optional
from src.common.logger import get_module_logger
from ..chat.chat_stream import ChatStream
from ..chat.message import Message
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.message_receive.message import Message, MessageSending
from maim_message import UserInfo, Seg
from src.plugins.chat.message import MessageSending, MessageSet
from src.plugins.chat.message_sender import message_manager
from ..storage.storage import MessageStorage
from ...config.config import global_config
from src.chat.message_receive.storage import MessageStorage
from src.config.config import global_config
from rich.traceback import install
install(extra_lines=3)
@@ -66,15 +64,18 @@ class DirectMessageSender:
# 处理消息
await message.process()
# 不知道有什么用先留下来了和之前那套sender一样
_message_json = message.to_dict()
# 发送消息
message_set = MessageSet(chat_stream, message_id)
message_set.add_message(message)
await message_manager.add_message(message_set)
await self.storage.store_message(message, chat_stream)
logger.info(f"[私聊][{self.private_name}]PFC消息已发送: {content}")
# 发送消息(直接调用底层 API
from src.chat.message_receive.uni_message_sender import _send_message
sent = await _send_message(message, show_log=True)
if sent:
# 存储消息
await self.storage.store_message(message, chat_stream)
logger.info(f"[私聊][{self.private_name}]PFC消息已发送: {content}")
else:
logger.error(f"[私聊][{self.private_name}]PFC消息发送失败")
raise RuntimeError("消息发送失败")
except Exception as e:
logger.error(f"[私聊][{self.private_name}]PFC消息发送失败: {str(e)}")

View File

@@ -4,7 +4,7 @@ import time
from src.common.logger import get_module_logger
from .chat_observer import ChatObserver
from .chat_states import NotificationHandler, NotificationType, Notification
from src.plugins.utils.chat_message_builder import build_readable_messages
from src.chat.utils.chat_message_builder import build_readable_messages
import traceback # 导入 traceback 用于调试
logger = get_module_logger("observation_info")

View File

@@ -1,13 +1,13 @@
from typing import List, Tuple, TYPE_CHECKING
from src.common.logger import get_module_logger
from ..models.utils_model import LLMRequest
from ...config.config import global_config
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
import random
from .chat_observer import ChatObserver
from .pfc_utils import get_items_from_json
from src.individuality.individuality import Individuality
from .conversation_info import ConversationInfo
from .observation_info import ObservationInfo
from src.plugins.utils.chat_message_builder import build_readable_messages
from src.chat.utils.chat_message_builder import build_readable_messages
from rich.traceback import install
install(extra_lines=3)
@@ -46,7 +46,7 @@ class GoalAnalyzer:
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
)
self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3)
self.personality_info = self._get_personality_prompt()
self.name = global_config.BOT_NICKNAME
self.nick_name = global_config.BOT_ALIAS_NAMES
self.private_name = private_name
@@ -57,6 +57,21 @@ class GoalAnalyzer:
self.max_goals = 3 # 同时保持的最大目标数量
self.current_goal_and_reason = None
def _get_personality_prompt(self) -> str:
"""获取个性提示信息"""
prompt_personality = global_config.personality.personality
# 检查是否需要随机替换为状态
if (
global_config.personality.states
and global_config.personality.state_probability > 0
and random.random() < global_config.personality.state_probability
):
prompt_personality = random.choice(global_config.personality.states)
bot_name = global_config.bot.nickname
return f"你的名字是{bot_name},你{prompt_personality};"
async def analyze_goal(self, conversation_info: ConversationInfo, observation_info: ObservationInfo):
"""分析对话历史并设定目标

View File

@@ -1,11 +1,11 @@
from typing import List, Tuple
from src.common.logger import get_module_logger
from src.plugins.memory_system.Hippocampus import HippocampusManager
from ..models.utils_model import LLMRequest
from ...config.config import global_config
from ..chat.message import Message
from ..knowledge.knowledge_lib import qa_manager
from ..utils.chat_message_builder import build_readable_messages
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.message_receive.message import Message
from src.chat.knowledge.knowledge_lib import qa_manager
from src.chat.utils.chat_message_builder import build_readable_messages
logger = get_module_logger("knowledge_fetcher")

View File

@@ -1,8 +1,8 @@
import json
from typing import Tuple, List, Dict, Any
from src.common.logger import get_module_logger
from ..models.utils_model import LLMRequest
from ...config.config import global_config
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from .chat_observer import ChatObserver
from maim_message import UserInfo

View File

@@ -1,15 +1,15 @@
from typing import Tuple, List, Dict, Any
from src.common.logger import get_module_logger
from ..models.utils_model import LLMRequest
from ...config.config import global_config
from src.common.logger import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
import random
from .chat_observer import ChatObserver
from .reply_checker import ReplyChecker
from src.individuality.individuality import Individuality
from .observation_info import ObservationInfo
from .conversation_info import ConversationInfo
from src.plugins.utils.chat_message_builder import build_readable_messages
from src.chat.utils.chat_message_builder import build_readable_messages
logger = get_module_logger("reply_generator")
logger = get_logger("reply_generator")
# --- 定义 Prompt 模板 ---
@@ -92,12 +92,27 @@ class ReplyGenerator:
max_tokens=300,
request_type="reply_generation",
)
self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3)
self.personality_info = self._get_personality_prompt()
self.name = global_config.BOT_NICKNAME
self.private_name = private_name
self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
self.reply_checker = ReplyChecker(stream_id, private_name)
def _get_personality_prompt(self) -> str:
"""获取个性提示信息"""
prompt_personality = global_config.personality.personality
# 检查是否需要随机替换为状态
if (
global_config.personality.states
and global_config.personality.state_probability > 0
and random.random() < global_config.personality.state_probability
):
prompt_personality = random.choice(global_config.personality.states)
bot_name = global_config.BOT_NICKNAME
return f"你的名字是{bot_name},你{prompt_personality};"
# 修改 generate 方法签名,增加 action_type 参数
async def generate(
self, observation_info: ObservationInfo, conversation_info: ConversationInfo, action_type: str

View File

@@ -3,7 +3,7 @@ from .chat_observer import ChatObserver
from .conversation_info import ConversationInfo
# from src.individuality.individuality import Individuality # 不再需要
from ...config.config import global_config
from src.config.config import global_config
import time
import asyncio

View File

@@ -87,6 +87,7 @@ class BrainChatting:
# 循环控制内部状态
self.running: bool = False
self._loop_task: Optional[asyncio.Task] = None # 主循环任务
self._new_message_event = asyncio.Event() # 新消息事件,用于打断 wait
# 添加循环信息管理相关的属性
self.history_loop: List[CycleDetail] = []
@@ -173,9 +174,10 @@ class BrainChatting:
filter_intercept_message_level=1,
)
# 如果有新消息,更新 last_read_time
# 如果有新消息,更新 last_read_time 并触发事件以打断正在进行的 wait
if len(recent_messages_list) >= 1:
self.last_read_time = time.time()
self._new_message_event.set() # 触发新消息事件,打断 wait
# 总是执行一次思考迭代(不管有没有新消息)
# wait 动作会在其内部等待,不需要在这里处理
@@ -434,6 +436,9 @@ class BrainChatting:
last_check_time = self.last_read_time
check_interval = 1.0 # 每秒检查一次
# 清除事件状态,准备等待新消息
self._new_message_event.clear()
while self.running:
# 检查是否有新消息
recent_messages_list = message_api.get_messages_by_time_in_chat(
@@ -453,8 +458,15 @@ class BrainChatting:
logger.info(f"{self.log_prefix} 检测到新消息,恢复循环")
return
# 等待一段时间后再次检查
await asyncio.sleep(check_interval)
# 等待新消息事件或超时后再次检查
try:
await asyncio.wait_for(self._new_message_event.wait(), timeout=check_interval)
# 事件被触发,说明有新消息
logger.info(f"{self.log_prefix} 检测到新消息事件,恢复循环")
return
except asyncio.TimeoutError:
# 超时后继续检查
continue
async def _handle_action(
self,
@@ -674,7 +686,10 @@ class BrainChatting:
logger.warning(f"{self.log_prefix} wait_seconds 参数格式错误,使用默认值 5 秒")
wait_seconds = 5
logger.info(f"{self.log_prefix} 执行 wait 动作,等待 {wait_seconds}")
logger.info(f"{self.log_prefix} 执行 wait 动作,等待 {wait_seconds}(可被新消息打断)")
# 清除事件状态,准备等待新消息
self._new_message_event.clear()
# 记录动作信息
await database_api.store_action_info(
@@ -687,8 +702,17 @@ class BrainChatting:
action_name="wait",
)
# 等待指定时间
await asyncio.sleep(wait_seconds)
# 等待指定时间,但可被新消息打断
try:
await asyncio.wait_for(
self._new_message_event.wait(),
timeout=wait_seconds
)
# 如果事件被触发,说明有新消息到达
logger.info(f"{self.log_prefix} wait 动作被新消息打断,提前结束等待")
except asyncio.TimeoutError:
# 超时正常完成
pass
logger.info(f"{self.log_prefix} wait 动作完成,继续下一次思考")
@@ -707,7 +731,10 @@ class BrainChatting:
# 使用默认等待时间
wait_seconds = 3
logger.info(f"{self.log_prefix} 执行 listening转换为 wait动作等待 {wait_seconds}")
logger.info(f"{self.log_prefix} 执行 listening转换为 wait动作等待 {wait_seconds}(可被新消息打断)")
# 清除事件状态,准备等待新消息
self._new_message_event.clear()
# 记录动作信息
await database_api.store_action_info(
@@ -720,8 +747,17 @@ class BrainChatting:
action_name="listening",
)
# 等待指定时间
await asyncio.sleep(wait_seconds)
# 等待指定时间,但可被新消息打断
try:
await asyncio.wait_for(
self._new_message_event.wait(),
timeout=wait_seconds
)
# 如果事件被触发,说明有新消息到达
logger.info(f"{self.log_prefix} listening 动作被新消息打断,提前结束等待")
except asyncio.TimeoutError:
# 超时正常完成
pass
logger.info(f"{self.log_prefix} listening 动作完成,继续下一次思考")

View File

@@ -402,7 +402,7 @@ class BrainPlanner:
moderation_prompt=moderation_prompt_block,
name_block=name_block,
interest=interest,
plan_style=global_config.personality.private_plan_style,
plan_style=global_config.experimental.private_plan_style,
)
return prompt, message_id_list

View File

@@ -244,12 +244,14 @@ class HeartFChatting:
thinking_id,
actions,
selected_expressions: Optional[List[int]] = None,
quote_message: Optional[bool] = None,
) -> Tuple[Dict[str, Any], str, Dict[str, float]]:
with Timer("回复发送", cycle_timers):
reply_text = await self._send_response(
reply_set=response_set,
message_data=action_message,
selected_expressions=selected_expressions,
quote_message=quote_message,
)
# 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
@@ -526,15 +528,26 @@ class HeartFChatting:
reply_set: "ReplySetModel",
message_data: "DatabaseMessages",
selected_expressions: Optional[List[int]] = None,
quote_message: Optional[bool] = None,
) -> str:
new_message_count = message_api.count_new_messages(
chat_id=self.chat_stream.stream_id, start_time=self.last_read_time, end_time=time.time()
)
need_reply = new_message_count >= random.randint(2, 3)
if need_reply:
logger.info(f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,使用引用回复")
# 根据 llm_quote 配置决定是否使用 quote_message 参数
if global_config.chat.llm_quote:
# 如果配置为 true使用 llm_quote 参数决定是否引用回复
if quote_message is None:
logger.warning(f"{self.log_prefix} quote_message 参数为空,不引用")
need_reply = False
else:
need_reply = quote_message
if need_reply:
logger.info(f"{self.log_prefix} LLM 决定使用引用回复")
else:
# 如果配置为 false使用原来的模式
new_message_count = message_api.count_new_messages(
chat_id=self.chat_stream.stream_id, start_time=self.last_read_time, end_time=time.time()
)
need_reply = new_message_count >= random.randint(2, 3) or time.time() - self.last_read_time > 90
if need_reply:
logger.info(f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息使用引用回复或者上次回复时间超过90秒")
reply_text = ""
first_replied = False
@@ -640,6 +653,7 @@ class HeartFChatting:
# 从 Planner 的 action_data 中提取未知词语列表(仅在 reply 时使用)
unknown_words = None
quote_message = None
if isinstance(action_planner_info.action_data, dict):
uw = action_planner_info.action_data.get("unknown_words")
if isinstance(uw, list):
@@ -651,6 +665,19 @@ class HeartFChatting:
cleaned_uw.append(s)
if cleaned_uw:
unknown_words = cleaned_uw
# 从 Planner 的 action_data 中提取 quote_message 参数
qm = action_planner_info.action_data.get("quote")
if qm is not None:
# 支持多种格式true/false, "true"/"false", 1/0
if isinstance(qm, bool):
quote_message = qm
elif isinstance(qm, str):
quote_message = qm.lower() in ("true", "1", "yes")
elif isinstance(qm, (int, float)):
quote_message = bool(qm)
logger.info(f"{self.log_prefix} {qm}引用回复设置: {quote_message}")
success, llm_response = await generator_api.generate_reply(
chat_stream=self.chat_stream,
@@ -682,6 +709,7 @@ class HeartFChatting:
thinking_id=thinking_id,
actions=chosen_action_plan_infos,
selected_expressions=selected_expressions,
quote_message=quote_message,
)
self.last_active_time = time.time()
return {

View File

@@ -1,7 +1,7 @@
import asyncio
import json
import time
from typing import List, Union
from typing import List, Union, Dict, Any
from .global_logger import logger
from . import prompt_template
@@ -173,3 +173,50 @@ def info_extract_from_str(
return None, None
return entity_extract_result, rdf_triple_extract_result
class IEProcess:
"""
信息抽取处理器类,提供更方便的批次处理接口。
"""
def __init__(self, llm_ner: LLMRequest, llm_rdf: LLMRequest = None):
self.llm_ner = llm_ner
self.llm_rdf = llm_rdf or llm_ner
async def process_paragraphs(self, paragraphs: List[str]) -> List[dict]:
"""
异步处理多个段落。
"""
from .utils.hash import get_sha256
results = []
total = len(paragraphs)
for i, pg in enumerate(paragraphs, start=1):
# 打印进度日志,让用户知道没有卡死
logger.info(f"[IEProcess] 正在处理第 {i}/{total} 段文本 (长度: {len(pg)})...")
# 使用 asyncio.to_thread 包装同步阻塞调用,防止死锁
# 这样 info_extract_from_str 内部的 asyncio.run 会在独立线程的新 loop 中运行
try:
entities, triples = await asyncio.to_thread(
info_extract_from_str, self.llm_ner, self.llm_rdf, pg
)
if entities is not None:
results.append(
{
"idx": get_sha256(pg),
"passage": pg,
"extracted_entities": entities,
"extracted_triples": triples,
}
)
logger.info(f"[IEProcess] 第 {i}/{total} 段处理完成,提取到 {len(entities)} 个实体")
else:
logger.warning(f"[IEProcess] 第 {i}/{total} 段提取失败(返回为空)")
except Exception as e:
logger.error(f"[IEProcess] 处理第 {i}/{total} 段时发生异常: {e}")
return results

View File

@@ -0,0 +1,388 @@
import asyncio
import os
from functools import partial
from typing import List, Callable, Any
from src.chat.knowledge.embedding_store import EmbeddingManager
from src.chat.knowledge.kg_manager import KGManager
from src.chat.knowledge.qa_manager import QAManager
from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.knowledge import get_qa_manager, lpmm_start_up
logger = get_logger("LPMM-Plugin-API")
class LPMMOperations:
"""
LPMM 内部操作接口。
封装了 LPMM 的核心操作,供插件系统 API 或其他内部组件调用。
"""
def __init__(self):
self._initialized = False
async def _run_cancellable_executor(
self, func: Callable, *args, **kwargs
) -> Any:
"""
在线程池中执行可取消的同步操作。
当任务被取消时(如 Ctrl+C会立即响应并抛出 CancelledError。
注意:线程池中的操作可能仍在运行,但协程会立即返回,不会阻塞主进程。
Args:
func: 要执行的同步函数
*args: 函数的位置参数
**kwargs: 函数的关键字参数
Returns:
函数的返回值
Raises:
asyncio.CancelledError: 当任务被取消时
"""
loop = asyncio.get_event_loop()
# 在线程池中执行,当协程被取消时会立即响应
# 虽然线程池中的操作可能仍在运行,但协程不会阻塞
return await loop.run_in_executor(None, func, *args, **kwargs)
async def _get_managers(self) -> tuple[EmbeddingManager, KGManager, QAManager]:
"""获取并确保 LPMM 管理器已初始化"""
qa_mgr = get_qa_manager()
if qa_mgr is None:
# 如果全局没初始化,尝试初始化
if not global_config.lpmm_knowledge.enable:
logger.warning("LPMM 知识库在全局配置中未启用,操作可能受限。")
lpmm_start_up()
qa_mgr = get_qa_manager()
if qa_mgr is None:
raise RuntimeError("无法获取 LPMM QAManager请检查 LPMM 是否已正确安装和配置。")
return qa_mgr.embed_manager, qa_mgr.kg_manager, qa_mgr
async def add_content(self, text: str, auto_split: bool = True) -> dict:
"""
向知识库添加新内容。
Args:
text: 原始文本。
auto_split: 是否自动按双换行符分割段落。
- True: 自动分割(默认),支持多段文本(用双换行分隔)
- False: 不分割,将整个文本作为完整一段处理
Returns:
dict: {"status": "success/error", "count": 导入段落数, "message": "描述"}
"""
try:
embed_mgr, kg_mgr, _ = await self._get_managers()
# 1. 分段处理
if auto_split:
# 自动按双换行符分割
paragraphs = [p.strip() for p in text.split('\n\n') if p.strip()]
else:
# 不分割,作为完整一段
text_stripped = text.strip()
if not text_stripped:
return {"status": "error", "message": "文本内容为空"}
paragraphs = [text_stripped]
if not paragraphs:
return {"status": "error", "message": "文本内容为空"}
# 2. 实体与三元组抽取 (内部调用大模型)
from src.chat.knowledge.ie_process import IEProcess
from src.llm_models.utils_model import LLMRequest
from src.config.config import model_config
llm_ner = LLMRequest(model_set=model_config.model_task_config.lpmm_entity_extract, request_type="lpmm.entity_extract")
llm_rdf = LLMRequest(model_set=model_config.model_task_config.lpmm_rdf_build, request_type="lpmm.rdf_build")
ie_process = IEProcess(llm_ner, llm_rdf)
logger.info(f"[Plugin API] 正在对 {len(paragraphs)} 段文本执行信息抽取...")
extracted_docs = await ie_process.process_paragraphs(paragraphs)
# 3. 构造并导入数据
# 这里我们手动实现导入逻辑,不依赖外部脚本
# a. 准备段落
raw_paragraphs = {doc["idx"]: doc["passage"] for doc in extracted_docs}
# b. 准备三元组
triple_list_data = {doc["idx"]: doc["extracted_triples"] for doc in extracted_docs}
# 向量化并入库
# 注意:此处模仿 import_openie.py 的核心逻辑
# 1. 先进行去重检查,只处理新段落
# store_new_data_set 期望的格式raw_paragraphs 的键是段落hash不带前缀值是段落文本
new_raw_paragraphs = {}
new_triple_list_data = {}
for pg_hash, passage in raw_paragraphs.items():
key = f"paragraph-{pg_hash}"
if key not in embed_mgr.stored_pg_hashes:
new_raw_paragraphs[pg_hash] = passage
new_triple_list_data[pg_hash] = triple_list_data[pg_hash]
if not new_raw_paragraphs:
return {"status": "success", "count": 0, "message": "内容已存在,无需重复导入"}
# 2. 使用 EmbeddingManager 的标准方法存储段落、实体和关系的嵌入
# store_new_data_set 会自动处理嵌入生成和存储
# 将同步阻塞操作放到线程池中执行,避免阻塞事件循环
await self._run_cancellable_executor(
embed_mgr.store_new_data_set,
new_raw_paragraphs,
new_triple_list_data
)
# 3. 构建知识图谱只需要三元组数据和embedding_manager
await self._run_cancellable_executor(
kg_mgr.build_kg,
new_triple_list_data,
embed_mgr
)
# 4. 持久化
await self._run_cancellable_executor(embed_mgr.rebuild_faiss_index)
await self._run_cancellable_executor(embed_mgr.save_to_file)
await self._run_cancellable_executor(kg_mgr.save_to_file)
return {"status": "success", "count": len(new_raw_paragraphs), "message": f"成功导入 {len(new_raw_paragraphs)} 条知识"}
except asyncio.CancelledError:
logger.warning("[Plugin API] 导入操作被用户中断")
return {"status": "cancelled", "message": "导入操作已被用户中断"}
except Exception as e:
logger.error(f"[Plugin API] 导入知识失败: {e}", exc_info=True)
return {"status": "error", "message": str(e)}
async def search(self, query: str, top_k: int = 3) -> List[str]:
"""
检索知识库。
Args:
query: 查询问题。
top_k: 返回最相关的条目数。
Returns:
List[str]: 相关文段列表。
"""
try:
_, _, qa_mgr = await self._get_managers()
# 直接调用 QAManager 的检索接口
knowledge = qa_mgr.get_knowledge(query, top_k=top_k)
# 返回通常是拼接好的字符串,这里我们可以尝试按其内部规则切分回列表,或者直接返回
return [knowledge] if knowledge else []
except Exception as e:
logger.error(f"[Plugin API] 检索知识失败: {e}")
return []
async def delete(self, keyword: str, exact_match: bool = False) -> dict:
"""
根据关键词或完整文段删除知识库内容。
Args:
keyword: 匹配关键词或完整文段。
exact_match: 是否使用完整文段匹配True=完全匹配False=关键词模糊匹配)。
Returns:
dict: {"status": "success/info", "deleted_count": 删除条数, "message": "描述"}
"""
try:
embed_mgr, kg_mgr, _ = await self._get_managers()
# 1. 查找匹配的段落
to_delete_keys = []
to_delete_hashes = []
for key, item in embed_mgr.paragraphs_embedding_store.store.items():
if exact_match:
# 完整文段匹配
if item.str.strip() == keyword.strip():
to_delete_keys.append(key)
to_delete_hashes.append(key.replace("paragraph-", "", 1))
else:
# 关键词模糊匹配
if keyword in item.str:
to_delete_keys.append(key)
to_delete_hashes.append(key.replace("paragraph-", "", 1))
if not to_delete_keys:
match_type = "完整文段" if exact_match else "关键词"
return {"status": "info", "deleted_count": 0, "message": f"未找到匹配的内容({match_type}匹配)"}
# 2. 执行删除
# 将同步阻塞操作放到线程池中执行,避免阻塞事件循环
# a. 从向量库删除
deleted_count, _ = await self._run_cancellable_executor(
embed_mgr.paragraphs_embedding_store.delete_items,
to_delete_keys
)
embed_mgr.stored_pg_hashes = set(embed_mgr.paragraphs_embedding_store.store.keys())
# b. 从知识图谱删除
# 注意:必须使用关键字参数,避免 True 被误当作 ent_hashes 参数
# 使用 partial 来传递关键字参数,因为 run_in_executor 不支持 **kwargs
delete_func = partial(
kg_mgr.delete_paragraphs,
to_delete_hashes,
ent_hashes=None,
remove_orphan_entities=True
)
await self._run_cancellable_executor(delete_func)
# 3. 持久化
await self._run_cancellable_executor(embed_mgr.rebuild_faiss_index)
await self._run_cancellable_executor(embed_mgr.save_to_file)
await self._run_cancellable_executor(kg_mgr.save_to_file)
match_type = "完整文段" if exact_match else "关键词"
return {"status": "success", "deleted_count": deleted_count, "message": f"已成功删除 {deleted_count} 条相关知识({match_type}匹配)"}
except asyncio.CancelledError:
logger.warning("[Plugin API] 删除操作被用户中断")
return {"status": "cancelled", "message": "删除操作已被用户中断"}
except Exception as e:
logger.error(f"[Plugin API] 删除知识失败: {e}", exc_info=True)
return {"status": "error", "message": str(e)}
async def clear_all(self) -> dict:
"""
清空整个LPMM知识库删除所有段落、实体、关系和知识图谱数据
Returns:
dict: {"status": "success/error", "message": "描述", "stats": {...}}
"""
try:
embed_mgr, kg_mgr, _ = await self._get_managers()
# 记录清空前的统计信息
before_stats = {
"paragraphs": len(embed_mgr.paragraphs_embedding_store.store),
"entities": len(embed_mgr.entities_embedding_store.store),
"relations": len(embed_mgr.relation_embedding_store.store),
"kg_nodes": len(kg_mgr.graph.get_node_list()),
"kg_edges": len(kg_mgr.graph.get_edge_list()),
}
# 将同步阻塞操作放到线程池中执行,避免阻塞事件循环
# 1. 清空所有向量库
# 获取所有keys
para_keys = list(embed_mgr.paragraphs_embedding_store.store.keys())
ent_keys = list(embed_mgr.entities_embedding_store.store.keys())
rel_keys = list(embed_mgr.relation_embedding_store.store.keys())
# 删除所有段落向量
para_deleted, _ = await self._run_cancellable_executor(
embed_mgr.paragraphs_embedding_store.delete_items,
para_keys
)
embed_mgr.stored_pg_hashes.clear()
# 删除所有实体向量
if ent_keys:
ent_deleted, _ = await self._run_cancellable_executor(
embed_mgr.entities_embedding_store.delete_items,
ent_keys
)
else:
ent_deleted = 0
# 删除所有关系向量
if rel_keys:
rel_deleted, _ = await self._run_cancellable_executor(
embed_mgr.relation_embedding_store.delete_items,
rel_keys
)
else:
rel_deleted = 0
# 2. 清空所有 embedding store 的索引和映射
# 确保 faiss_index 和 idx2hash 也被重置,并删除旧的索引文件
def _clear_embedding_indices():
# 清空段落索引
embed_mgr.paragraphs_embedding_store.faiss_index = None
embed_mgr.paragraphs_embedding_store.idx2hash = None
embed_mgr.paragraphs_embedding_store.dirty = False
# 删除旧的索引文件
if os.path.exists(embed_mgr.paragraphs_embedding_store.index_file_path):
os.remove(embed_mgr.paragraphs_embedding_store.index_file_path)
if os.path.exists(embed_mgr.paragraphs_embedding_store.idx2hash_file_path):
os.remove(embed_mgr.paragraphs_embedding_store.idx2hash_file_path)
# 清空实体索引
embed_mgr.entities_embedding_store.faiss_index = None
embed_mgr.entities_embedding_store.idx2hash = None
embed_mgr.entities_embedding_store.dirty = False
# 删除旧的索引文件
if os.path.exists(embed_mgr.entities_embedding_store.index_file_path):
os.remove(embed_mgr.entities_embedding_store.index_file_path)
if os.path.exists(embed_mgr.entities_embedding_store.idx2hash_file_path):
os.remove(embed_mgr.entities_embedding_store.idx2hash_file_path)
# 清空关系索引
embed_mgr.relation_embedding_store.faiss_index = None
embed_mgr.relation_embedding_store.idx2hash = None
embed_mgr.relation_embedding_store.dirty = False
# 删除旧的索引文件
if os.path.exists(embed_mgr.relation_embedding_store.index_file_path):
os.remove(embed_mgr.relation_embedding_store.index_file_path)
if os.path.exists(embed_mgr.relation_embedding_store.idx2hash_file_path):
os.remove(embed_mgr.relation_embedding_store.idx2hash_file_path)
await self._run_cancellable_executor(_clear_embedding_indices)
# 3. 清空知识图谱
# 获取所有段落hash
all_pg_hashes = list(kg_mgr.stored_paragraph_hashes)
if all_pg_hashes:
# 删除所有段落节点(这会自动清理相关的边和孤立实体)
# 注意:必须使用关键字参数,避免 True 被误当作 ent_hashes 参数
# 使用 partial 来传递关键字参数,因为 run_in_executor 不支持 **kwargs
delete_func = partial(
kg_mgr.delete_paragraphs,
all_pg_hashes,
ent_hashes=None,
remove_orphan_entities=True
)
await self._run_cancellable_executor(delete_func)
# 完全清空KG创建新的空图无论是否有段落hash都要执行
from quick_algo import di_graph
kg_mgr.graph = di_graph.DiGraph()
kg_mgr.stored_paragraph_hashes.clear()
kg_mgr.ent_appear_cnt.clear()
# 4. 保存所有数据此时所有store都是空的索引也是None
# 注意即使store为空save_to_file也会保存空的DataFrame这是正确的
await self._run_cancellable_executor(embed_mgr.save_to_file)
await self._run_cancellable_executor(kg_mgr.save_to_file)
after_stats = {
"paragraphs": len(embed_mgr.paragraphs_embedding_store.store),
"entities": len(embed_mgr.entities_embedding_store.store),
"relations": len(embed_mgr.relation_embedding_store.store),
"kg_nodes": len(kg_mgr.graph.get_node_list()),
"kg_edges": len(kg_mgr.graph.get_edge_list()),
}
return {
"status": "success",
"message": f"已成功清空LPMM知识库删除 {para_deleted} 个段落、{ent_deleted} 个实体、{rel_deleted} 个关系)",
"stats": {
"before": before_stats,
"after": after_stats,
}
}
except asyncio.CancelledError:
logger.warning("[Plugin API] 清空操作被用户中断")
return {"status": "cancelled", "message": "清空操作已被用户中断"}
except Exception as e:
logger.error(f"[Plugin API] 清空知识库失败: {e}", exc_info=True)
return {"status": "error", "message": str(e)}
# 内部使用的单例
lpmm_ops = LPMMOperations()

View File

@@ -7,9 +7,8 @@ from .kg_manager import KGManager
# from .lpmmconfig import global_config
from .utils.dyn_topk import dyn_select_top_k
from src.llm_models.utils_model import LLMRequest
from src.chat.utils.utils import get_embedding
from src.config.config import global_config, model_config
from src.config.config import global_config
MAX_KNOWLEDGE_LENGTH = 10000 # 最大知识长度

View File

@@ -4,6 +4,8 @@ from pathlib import Path
from typing import Any, Dict, List, Optional
from uuid import uuid4
from src.config.config import global_config
class PlanReplyLogger:
"""独立的Plan/Reply日志记录器负责落盘和容量控制。"""
@@ -11,9 +13,13 @@ class PlanReplyLogger:
_BASE_DIR = Path("logs")
_PLAN_DIR = _BASE_DIR / "plan"
_REPLY_DIR = _BASE_DIR / "reply"
_MAX_PER_CHAT = 1000
_TRIM_COUNT = 100
@classmethod
def _get_max_per_chat(cls) -> int:
"""从配置中获取每个聊天流最大保存的日志数量"""
return getattr(global_config.chat, "plan_reply_log_max_per_chat", 1000)
@classmethod
def log_plan(
cls,
@@ -85,7 +91,8 @@ class PlanReplyLogger:
def _trim_overflow(cls, chat_dir: Path) -> None:
"""超过阈值时删除最老的若干文件,避免目录无限增长。"""
files = sorted(chat_dir.glob("*.json"), key=lambda p: p.stat().st_mtime)
if len(files) <= cls._MAX_PER_CHAT:
max_per_chat = cls._get_max_per_chat()
if len(files) <= max_per_chat:
return
# 删除最老的 TRIM_COUNT 条
for old_file in files[: cls._TRIM_COUNT]:

View File

@@ -1,4 +1,5 @@
import time
import asyncio
import urllib3
from abc import abstractmethod
@@ -20,6 +21,9 @@ logger = get_logger("chat_message")
# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# VLM 处理并发限制(避免同时处理太多图片导致卡死)
_vlm_semaphore = asyncio.Semaphore(3)
# 这个类是消息数据类,用于存储和管理消息数据。
# 它定义了消息的属性包括群组ID、用户ID、消息ID、原始消息内容、纯文本内容和时间戳。
# 它还定义了两个辅助属性keywords用于提取消息的关键词is_plain_text用于判断消息是否为纯文本。
@@ -73,20 +77,35 @@ class Message(MessageBase):
str: 处理后的文本
"""
if segment.type == "seglist":
# 处理消息段列表
# 处理消息段列表 - 使用并行处理提升性能
tasks = [self._process_message_segments(seg) for seg in segment.data] # type: ignore
results = await asyncio.gather(*tasks, return_exceptions=True)
segments_text = []
for seg in segment.data:
processed = await self._process_message_segments(seg) # type: ignore
if processed:
segments_text.append(processed)
for result in results:
if isinstance(result, Exception):
logger.error(f"处理消息段时出错: {result}")
continue
if result:
segments_text.append(result)
return " ".join(segments_text)
elif segment.type == "forward":
segments_text = []
for node_dict in segment.data:
# 处理转发消息 - 使用并行处理
async def process_forward_node(node_dict):
message = MessageBase.from_dict(node_dict) # type: ignore
processed_text = await self._process_message_segments(message.message_segment)
if processed_text:
segments_text.append(f"{global_config.bot.nickname}: {processed_text}")
return f"{global_config.bot.nickname}: {processed_text}"
return None
tasks = [process_forward_node(node_dict) for node_dict in segment.data]
results = await asyncio.gather(*tasks, return_exceptions=True)
segments_text = []
for result in results:
if isinstance(result, Exception):
logger.error(f"处理转发节点时出错: {result}")
continue
if result:
segments_text.append(result)
return "[合并消息]: " + "\n-- ".join(segments_text)
else:
# 处理单个消息段
@@ -173,8 +192,9 @@ class MessageRecv(Message):
self.is_picid = True
self.is_emoji = False
image_manager = get_image_manager()
# print(f"segment.data: {segment.data}")
_, processed_text = await image_manager.process_image(segment.data)
# 使用 semaphore 限制 VLM 并发,避免同时处理太多图片
async with _vlm_semaphore:
_, processed_text = await image_manager.process_image(segment.data)
return processed_text
return "[发了一张图片,网卡了加载不出来]"
elif segment.type == "emoji":
@@ -183,7 +203,9 @@ class MessageRecv(Message):
self.is_picid = False
self.is_voice = False
if isinstance(segment.data, str):
return await get_image_manager().get_emoji_description(segment.data)
# 使用 semaphore 限制 VLM 并发
async with _vlm_semaphore:
return await get_image_manager().get_emoji_description(segment.data)
return "[发了一个表情包,网卡了加载不出来]"
elif segment.type == "voice":
self.is_picid = False

View File

@@ -4,6 +4,7 @@ import traceback
import random
import re
from typing import Dict, Optional, Tuple, List, TYPE_CHECKING, Union
from collections import OrderedDict
from rich.traceback import install
from datetime import datetime
from json_repair import repair_json
@@ -53,7 +54,6 @@ reply
4.不要选择回复你自己发送的消息
5.不要单独对表情包进行回复
6.将上下文中所有含义不明的疑似黑话的缩写词均写入unknown_words中
7.如果你对上下文存在疑问有需要查询的问题写入question中
{reply_action_example}
no_reply
@@ -111,6 +111,10 @@ class ActionPlanner:
self.last_obs_time_mark = 0.0
self.plan_log: List[Tuple[str, float, Union[List[ActionPlannerInfo], str]]] = []
# 黑话缓存:使用 OrderedDict 实现 LRU最多缓存10个
self.unknown_words_cache: OrderedDict[str, None] = OrderedDict()
self.unknown_words_cache_limit = 10
def find_message_by_id(
self, message_id: str, message_id_list: List[Tuple[str, "DatabaseMessages"]]
@@ -225,24 +229,6 @@ class ActionPlanner:
reasoning = "未提供原因"
action_data = {key: value for key, value in action_json.items() if key not in ["action"]}
# 验证和清理 question
if "question" in action_data:
q = action_data.get("question")
if isinstance(q, str):
cleaned_q = q.strip()
if cleaned_q:
action_data["question"] = cleaned_q
else:
# 如果清理后为空字符串,移除该字段
action_data.pop("question", None)
elif q is None:
# 如果为 None移除该字段
action_data.pop("question", None)
else:
# 如果不是字符串类型,记录警告并移除
logger.warning(f"{self.log_prefix}question 格式不正确,应为字符串类型,已忽略")
action_data.pop("question", None)
# 非no_reply动作需要target_message_id
target_message = None
@@ -318,6 +304,136 @@ class ActionPlanner:
logger.warning(f"{self.log_prefix}检测消息发送者失败,缺少必要字段")
return False
def _update_unknown_words_cache(self, new_words: List[str]) -> None:
"""
更新黑话缓存,将新的黑话加入缓存
Args:
new_words: 新提取的黑话列表
"""
for word in new_words:
if not isinstance(word, str):
continue
word = word.strip()
if not word:
continue
# 如果已存在移到末尾LRU
if word in self.unknown_words_cache:
self.unknown_words_cache.move_to_end(word)
else:
# 添加新词
self.unknown_words_cache[word] = None
# 如果超过限制,移除最老的
if len(self.unknown_words_cache) > self.unknown_words_cache_limit:
self.unknown_words_cache.popitem(last=False)
logger.debug(f"{self.log_prefix}黑话缓存已满,移除最老的黑话")
def _merge_unknown_words_with_cache(self, new_words: Optional[List[str]]) -> List[str]:
"""
合并新提取的黑话和缓存中的黑话
Args:
new_words: 新提取的黑话列表可能为None
Returns:
合并后的黑话列表(去重)
"""
# 清理新提取的黑话
cleaned_new_words: List[str] = []
if new_words:
for word in new_words:
if isinstance(word, str):
word = word.strip()
if word:
cleaned_new_words.append(word)
# 获取缓存中的黑话列表
cached_words = list(self.unknown_words_cache.keys())
# 合并并去重(保留顺序:新提取的在前,缓存的在后)
merged_words: List[str] = []
seen = set()
# 先添加新提取的
for word in cleaned_new_words:
if word not in seen:
merged_words.append(word)
seen.add(word)
# 再添加缓存的(如果不在新提取的列表中)
for word in cached_words:
if word not in seen:
merged_words.append(word)
seen.add(word)
return merged_words
def _process_unknown_words_cache(
self, actions: List[ActionPlannerInfo]
) -> None:
"""
处理黑话缓存逻辑:
1. 检查是否有 reply action 提取了 unknown_words
2. 如果没有提取移除最老的1个
3. 如果缓存数量大于5移除最老的2个
4. 对于每个 reply action合并缓存和新提取的黑话
5. 更新缓存
Args:
actions: 解析后的动作列表
"""
# 先检查缓存数量如果大于5移除最老的2个
if len(self.unknown_words_cache) > 5:
# 移除最老的2个
removed_count = 0
for _ in range(2):
if len(self.unknown_words_cache) > 0:
self.unknown_words_cache.popitem(last=False)
removed_count += 1
if removed_count > 0:
logger.debug(f"{self.log_prefix}缓存数量大于5移除最老的{removed_count}个缓存")
# 检查是否有 reply action 提取了 unknown_words
has_extracted_unknown_words = False
for action in actions:
if action.action_type == "reply":
action_data = action.action_data or {}
unknown_words = action_data.get("unknown_words")
if unknown_words and isinstance(unknown_words, list) and len(unknown_words) > 0:
has_extracted_unknown_words = True
break
# 如果当前 plan 的 reply 没有提取移除最老的1个
if not has_extracted_unknown_words:
if len(self.unknown_words_cache) > 0:
self.unknown_words_cache.popitem(last=False)
logger.debug(f"{self.log_prefix}当前 plan 的 reply 没有提取黑话移除最老的1个缓存")
# 对于每个 reply action合并缓存和新提取的黑话
for action in actions:
if action.action_type == "reply":
action_data = action.action_data or {}
new_words = action_data.get("unknown_words")
# 合并新提取的和缓存的黑话列表
merged_words = self._merge_unknown_words_with_cache(new_words)
# 更新 action_data
if merged_words:
action_data["unknown_words"] = merged_words
logger.debug(
f"{self.log_prefix}合并黑话:新提取 {len(new_words) if new_words else 0} 个,"
f"缓存 {len(self.unknown_words_cache)} 个,合并后 {len(merged_words)}"
)
else:
# 如果没有合并后的黑话,移除 unknown_words 字段
action_data.pop("unknown_words", None)
# 更新缓存(将新提取的黑话加入缓存)
if new_words:
self._update_unknown_words_cache(new_words)
async def plan(
self,
available_actions: Dict[str, ActionInfo],
@@ -520,21 +636,32 @@ class ActionPlanner:
name_block = f"你的名字是{bot_name}{bot_nickname},请注意哪些是你自己的发言。"
# 根据 think_mode 配置决定 reply action 的示例 JSON
# 在 JSON 中直接作为 action 参数携带 unknown_words 和 question
# 在 JSON 中直接作为 action 参数携带 unknown_words
if global_config.chat.think_mode == "classic":
reply_action_example = (
reply_action_example = ""
if global_config.chat.llm_quote:
reply_action_example += "5.如果要明确回复消息使用quote如果消息不多不需要明确回复设置quote为false\n"
reply_action_example += (
'{{"action":"reply", "target_message_id":"消息id(m+数字)", '
'"unknown_words":["词语1","词语2"], '
'"question":"需要查询的问题"}'
'"unknown_words":["词语1","词语2"]'
)
if global_config.chat.llm_quote:
reply_action_example += ', "quote":"如果需要引用该message设置为true"'
reply_action_example += "}"
else:
reply_action_example = (
"5.think_level表示思考深度0表示该回复不需要思考和回忆1表示该回复需要进行回忆和思考\n"
+ '{{"action":"reply", "think_level":数值等级(0或1), '
'"target_message_id":"消息id(m+数字)", '
'"unknown_words":["词语1","词语2"], '
'"question":"需要查询的问题"}'
)
if global_config.chat.llm_quote:
reply_action_example += "6.如果要明确回复消息使用quote如果消息不多不需要明确回复设置quote为false\n"
reply_action_example += (
'{{"action":"reply", "think_level":数值等级(0或1), '
'"target_message_id":"消息id(m+数字)", '
'"unknown_words":["词语1","词语2"]'
)
if global_config.chat.llm_quote:
reply_action_example += ', "quote":"如果需要引用该message设置为true"'
reply_action_example += "}"
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
prompt = planner_prompt_template.format(
@@ -730,6 +857,9 @@ class ActionPlanner:
random.shuffle(shuffled)
actions = list({a.action_type: a for a in shuffled}.values())
# 处理黑话缓存逻辑
self._process_unknown_words_cache(actions)
logger.debug(f"{self.log_prefix}规划器选择了{len(actions)}个动作: {' '.join([a.action_type for a in actions])}")
return extracted_reasoning, actions, llm_content, llm_reasoning, llm_duration_ms

View File

@@ -845,18 +845,6 @@ class DefaultReplyer:
chat_id, message_list_before_short, chat_talking_prompt_short, unknown_words
)
# 从 chosen_actions 中提取 question仅在 reply 动作中)
question = None
if chosen_actions:
for action_info in chosen_actions:
if action_info.action_type == "reply" and isinstance(action_info.action_data, dict):
q = action_info.action_data.get("question")
if isinstance(q, str):
cleaned_q = q.strip()
if cleaned_q:
question = cleaned_q
break
# 并行执行构建任务(包括黑话解释,可配置关闭)
task_results = await asyncio.gather(
self._time_and_run_task(
@@ -871,7 +859,7 @@ class DefaultReplyer:
self._time_and_run_task(self.build_personality_prompt(), "personality_prompt"),
self._time_and_run_task(
build_memory_retrieval_prompt(
chat_talking_prompt_short, sender, target, self.chat_stream, think_level=think_level, unknown_words=unknown_words, question=question
chat_talking_prompt_short, sender, target, self.chat_stream, think_level=think_level, unknown_words=unknown_words
),
"memory_retrieval",
),

View File

@@ -710,18 +710,6 @@ class PrivateReplyer:
else:
jargon_coroutine = self._build_disabled_jargon_explanation()
# 从 chosen_actions 中提取 question仅在 reply 动作中)
question = None
if chosen_actions:
for action_info in chosen_actions:
if action_info.action_type == "reply" and isinstance(action_info.action_data, dict):
q = action_info.action_data.get("question")
if isinstance(q, str):
cleaned_q = q.strip()
if cleaned_q:
question = cleaned_q
break
# 并行执行九个构建任务(包括黑话解释,可配置关闭)
task_results = await asyncio.gather(
self._time_and_run_task(
@@ -736,7 +724,7 @@ class PrivateReplyer:
self._time_and_run_task(self.build_personality_prompt(), "personality_prompt"),
self._time_and_run_task(
build_memory_retrieval_prompt(
chat_talking_prompt_short, sender, target, self.chat_stream, think_level=1, unknown_words=unknown_words, question=question
chat_talking_prompt_short, sender, target, self.chat_stream, think_level=1, unknown_words=unknown_words
),
"memory_retrieval",
),