Ruff Format

This commit is contained in:
DrSmoothl
2026-02-21 16:24:24 +08:00
parent 2cb512120b
commit eaef7f0e98
82 changed files with 1881 additions and 1900 deletions

View File

@@ -1,5 +1,5 @@
import time
from typing import Tuple, Optional, Dict, Any # 增加了 Optional
from typing import Tuple, Optional # 增加了 Optional
from src.common.logger import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config, model_config
@@ -120,7 +120,7 @@ class ActionPlanner:
def _get_personality_prompt(self) -> str:
"""获取个性提示信息"""
prompt_personality = global_config.personality.personality
# 检查是否需要随机替换为状态
if (
global_config.personality.states
@@ -128,7 +128,7 @@ class ActionPlanner:
and random.random() < global_config.personality.state_probability
):
prompt_personality = random.choice(global_config.personality.states)
bot_name = global_config.bot.nickname
return f"你的名字是{bot_name},你{prompt_personality};"
@@ -170,13 +170,10 @@ class ActionPlanner:
)
break
else:
logger.debug(
f"[私聊][{self.private_name}]聊天历史为空或尚未加载,跳过 Bot 发言时间检查。"
)
logger.debug(f"[私聊][{self.private_name}]聊天历史为空或尚未加载,跳过 Bot 发言时间检查。")
except Exception as e:
logger.debug(f"[私聊][{self.private_name}]获取 Bot 上次发言时间时出错: {e}")
# --- 获取超时提示信息 ---
# (这部分逻辑不变)
timeout_context = ""

View File

@@ -112,10 +112,10 @@ class Conversation:
"user_nickname": msg.user_info.user_nickname if msg.user_info else "",
"user_cardname": msg.user_info.user_cardname if msg.user_info else None,
"platform": msg.user_info.platform if msg.user_info else "",
}
},
}
initial_messages_dict.append(msg_dict)
# 将加载的消息填充到 ObservationInfo 的 chat_history
self.observation_info.chat_history = initial_messages_dict
self.observation_info.chat_history_str = chat_talking_prompt + "\n"

View File

@@ -66,9 +66,9 @@ class DirectMessageSender:
# 发送消息(直接调用底层 API
from src.chat.message_receive.uni_message_sender import _send_message
sent = await _send_message(message, show_log=True)
if sent:
# 存储消息
await self.storage.store_message(message, chat_stream)

View File

@@ -5,7 +5,7 @@ from src.common.logger import get_logger
from .chat_observer import ChatObserver
from .chat_states import NotificationHandler, NotificationType, Notification
from src.chat.utils.chat_message_builder import build_readable_messages
from src.common.data_models.database_data_model import DatabaseMessages, DatabaseUserInfo
from src.common.data_models.database_data_model import DatabaseMessages
import traceback # 导入 traceback 用于调试
logger = get_logger("observation_info")
@@ -13,15 +13,15 @@ logger = get_logger("observation_info")
def dict_to_database_message(msg_dict: Dict[str, Any]) -> DatabaseMessages:
"""Convert PFC dict format to DatabaseMessages object
Args:
msg_dict: Message in PFC dict format with nested user_info
Returns:
DatabaseMessages object compatible with build_readable_messages()
"""
user_info_dict: Dict[str, Any] = msg_dict.get("user_info", {})
return DatabaseMessages(
message_id=msg_dict.get("message_id", ""),
time=msg_dict.get("time", 0.0),

View File

@@ -42,9 +42,7 @@ class GoalAnalyzer:
"""对话目标分析器"""
def __init__(self, stream_id: str, private_name: str):
self.llm = LLMRequest(
model_set=model_config.model_task_config.planner, request_type="conversation_goal"
)
self.llm = LLMRequest(model_set=model_config.model_task_config.planner, request_type="conversation_goal")
self.personality_info = self._get_personality_prompt()
self.name = global_config.bot.nickname
@@ -60,7 +58,7 @@ class GoalAnalyzer:
def _get_personality_prompt(self) -> str:
"""获取个性提示信息"""
prompt_personality = global_config.personality.personality
# 检查是否需要随机替换为状态
if (
global_config.personality.states
@@ -68,7 +66,7 @@ class GoalAnalyzer:
and random.random() < global_config.personality.state_probability
):
prompt_personality = random.choice(global_config.personality.states)
bot_name = global_config.bot.nickname
return f"你的名字是{bot_name},你{prompt_personality};"

View File

@@ -1,10 +1,10 @@
from typing import List, Tuple, Dict, Any
from src.common.logger import get_logger
# NOTE: HippocampusManager doesn't exist in v0.12.2 - memory system was redesigned
# from src.plugins.memory_system.Hippocampus import HippocampusManager
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config, model_config
from src.chat.message_receive.message import Message
from src.config.config import model_config
from src.chat.knowledge import qa_manager
from src.chat.utils.chat_message_builder import build_readable_messages
from src.chat.brain_chat.PFC.observation_info import dict_to_database_message
@@ -16,9 +16,7 @@ class KnowledgeFetcher:
"""知识调取器"""
def __init__(self, private_name: str):
self.llm = LLMRequest(
model_set=model_config.model_task_config.utils
)
self.llm = LLMRequest(model_set=model_config.model_task_config.utils)
self.private_name = private_name
def _lpmm_get_knowledge(self, query: str) -> str:
@@ -64,7 +62,7 @@ class KnowledgeFetcher:
# TODO: Integrate with new memory system if needed
knowledge_text = ""
sources_text = "无记忆匹配" # 默认值
# # 从记忆中获取相关知识 (DISABLED - old Hippocampus API)
# related_memory = await HippocampusManager.get_instance().get_memory_from_text(
# text=f"{query}\n{chat_history_text}",

View File

@@ -14,10 +14,7 @@ class ReplyChecker:
"""回复检查器"""
def __init__(self, stream_id: str, private_name: str):
self.llm = LLMRequest(
model_set=model_config.model_task_config.utils,
request_type="reply_check"
)
self.llm = LLMRequest(model_set=model_config.model_task_config.utils, request_type="reply_check")
self.personality_info = self._get_personality_prompt()
self.name = global_config.bot.nickname
self.private_name = private_name
@@ -27,7 +24,7 @@ class ReplyChecker:
def _get_personality_prompt(self) -> str:
"""获取个性提示信息"""
prompt_personality = global_config.personality.personality
# 检查是否需要随机替换为状态
if (
global_config.personality.states
@@ -35,7 +32,7 @@ class ReplyChecker:
and random.random() < global_config.personality.state_probability
):
prompt_personality = random.choice(global_config.personality.states)
bot_name = global_config.bot.nickname
return f"你的名字是{bot_name},你{prompt_personality};"

View File

@@ -99,7 +99,7 @@ class ReplyGenerator:
def _get_personality_prompt(self) -> str:
"""获取个性提示信息"""
prompt_personality = global_config.personality.personality
# 检查是否需要随机替换为状态
if (
global_config.personality.states
@@ -107,7 +107,7 @@ class ReplyGenerator:
and random.random() < global_config.personality.state_probability
):
prompt_personality = random.choice(global_config.personality.states)
bot_name = global_config.bot.nickname
return f"你的名字是{bot_name},你{prompt_personality};"

View File

@@ -704,10 +704,7 @@ class BrainChatting:
# 等待指定时间,但可被新消息打断
try:
await asyncio.wait_for(
self._new_message_event.wait(),
timeout=wait_seconds
)
await asyncio.wait_for(self._new_message_event.wait(), timeout=wait_seconds)
# 如果事件被触发,说明有新消息到达
logger.info(f"{self.log_prefix} wait 动作被新消息打断,提前结束等待")
except asyncio.TimeoutError:
@@ -731,7 +728,9 @@ class BrainChatting:
# 使用默认等待时间
wait_seconds = 3
logger.info(f"{self.log_prefix} 执行 listening转换为 wait动作等待 {wait_seconds} 秒(可被新消息打断)")
logger.info(
f"{self.log_prefix} 执行 listening转换为 wait动作等待 {wait_seconds} 秒(可被新消息打断)"
)
# 清除事件状态,准备等待新消息
self._new_message_event.clear()
@@ -749,10 +748,7 @@ class BrainChatting:
# 等待指定时间,但可被新消息打断
try:
await asyncio.wait_for(
self._new_message_event.wait(),
timeout=wait_seconds
)
await asyncio.wait_for(self._new_message_event.wait(), timeout=wait_seconds)
# 如果事件被触发,说明有新消息到达
logger.info(f"{self.log_prefix} listening 动作被新消息打断,提前结束等待")
except asyncio.TimeoutError:

View File

@@ -431,15 +431,21 @@ class BrainPlanner:
except Exception as req_e:
logger.error(f"{self.log_prefix}LLM 请求执行失败: {req_e}")
extracted_reasoning = f"LLM 请求失败,模型出现问题: {req_e}"
return extracted_reasoning, [
ActionPlannerInfo(
action_type="complete_talk",
reasoning=extracted_reasoning,
action_data={},
action_message=None,
available_actions=available_actions,
)
], llm_content, llm_reasoning, llm_duration_ms
return (
extracted_reasoning,
[
ActionPlannerInfo(
action_type="complete_talk",
reasoning=extracted_reasoning,
action_data={},
action_message=None,
available_actions=available_actions,
)
],
llm_content,
llm_reasoning,
llm_duration_ms,
)
# 解析LLM响应
if llm_content:

View File

@@ -105,7 +105,7 @@ class EmbeddingStore:
self.embedding_file_path = f"{dir_path}/{namespace}.parquet"
self.index_file_path = f"{dir_path}/{namespace}.index"
self.idx2hash_file_path = f"{dir_path}/{namespace}_i2h.json"
self.dirty = False # 标记是否有新增数据需要重建索引
# 多线程配置参数验证和设置

View File

@@ -1,7 +1,7 @@
import asyncio
import json
import time
from typing import List, Union, Dict, Any
from typing import List, Union
from .global_logger import logger
from . import prompt_template
@@ -192,17 +192,15 @@ class IEProcess:
results = []
total = len(paragraphs)
for i, pg in enumerate(paragraphs, start=1):
# 打印进度日志,让用户知道没有卡死
logger.info(f"[IEProcess] 正在处理第 {i}/{total} 段文本 (长度: {len(pg)})...")
# 使用 asyncio.to_thread 包装同步阻塞调用,防止死锁
# 这样 info_extract_from_str 内部的 asyncio.run 会在独立线程的新 loop 中运行
try:
entities, triples = await asyncio.to_thread(
info_extract_from_str, self.llm_ner, self.llm_rdf, pg
)
entities, triples = await asyncio.to_thread(info_extract_from_str, self.llm_ner, self.llm_rdf, pg)
if entities is not None:
results.append(

View File

@@ -395,8 +395,7 @@ class KGManager:
appear_cnt = self.ent_appear_cnt.get(ent_hash)
if not appear_cnt or appear_cnt <= 0:
logger.debug(
f"实体 {ent_hash} 在 ent_appear_cnt 中不存在或计数为 0"
f"将使用 1.0 作为默认出现次数参与权重计算"
f"实体 {ent_hash} 在 ent_appear_cnt 中不存在或计数为 0将使用 1.0 作为默认出现次数参与权重计算"
)
appear_cnt = 1.0
ent_weights[ent_hash] = float(np.sum(scores)) / float(appear_cnt)

View File

@@ -11,31 +11,30 @@ from src.chat.knowledge import get_qa_manager, lpmm_start_up
logger = get_logger("LPMM-Plugin-API")
class LPMMOperations:
"""
LPMM 内部操作接口。
封装了 LPMM 的核心操作,供插件系统 API 或其他内部组件调用。
"""
def __init__(self):
self._initialized = False
async def _run_cancellable_executor(
self, func: Callable, *args, **kwargs
) -> Any:
async def _run_cancellable_executor(self, func: Callable, *args, **kwargs) -> Any:
"""
在线程池中执行可取消的同步操作。
当任务被取消时(如 Ctrl+C会立即响应并抛出 CancelledError。
注意:线程池中的操作可能仍在运行,但协程会立即返回,不会阻塞主进程。
Args:
func: 要执行的同步函数
*args: 函数的位置参数
**kwargs: 函数的关键字参数
Returns:
函数的返回值
Raises:
asyncio.CancelledError: 当任务被取消时
"""
@@ -51,42 +50,42 @@ class LPMMOperations:
# 如果全局没初始化,尝试初始化
if not global_config.lpmm_knowledge.enable:
logger.warning("LPMM 知识库在全局配置中未启用,操作可能受限。")
lpmm_start_up()
qa_mgr = get_qa_manager()
if qa_mgr is None:
raise RuntimeError("无法获取 LPMM QAManager请检查 LPMM 是否已正确安装和配置。")
return qa_mgr.embed_manager, qa_mgr.kg_manager, qa_mgr
async def add_content(self, text: str, auto_split: bool = True) -> dict:
"""
向知识库添加新内容。
Args:
text: 原始文本。
auto_split: 是否自动按双换行符分割段落。
- True: 自动分割(默认),支持多段文本(用双换行分隔)
- False: 不分割,将整个文本作为完整一段处理
Returns:
dict: {"status": "success/error", "count": 导入段落数, "message": "描述"}
"""
try:
embed_mgr, kg_mgr, _ = await self._get_managers()
# 1. 分段处理
if auto_split:
# 自动按双换行符分割
paragraphs = [p.strip() for p in text.split('\n\n') if p.strip()]
paragraphs = [p.strip() for p in text.split("\n\n") if p.strip()]
else:
# 不分割,作为完整一段
text_stripped = text.strip()
if not text_stripped:
return {"status": "error", "message": "文本内容为空"}
paragraphs = [text_stripped]
if not paragraphs:
return {"status": "error", "message": "文本内容为空"}
@@ -94,14 +93,16 @@ class LPMMOperations:
from src.chat.knowledge.ie_process import IEProcess
from src.llm_models.utils_model import LLMRequest
from src.config.config import model_config
llm_ner = LLMRequest(model_set=model_config.model_task_config.lpmm_entity_extract, request_type="lpmm.entity_extract")
llm_ner = LLMRequest(
model_set=model_config.model_task_config.lpmm_entity_extract, request_type="lpmm.entity_extract"
)
llm_rdf = LLMRequest(model_set=model_config.model_task_config.lpmm_rdf_build, request_type="lpmm.rdf_build")
ie_process = IEProcess(llm_ner, llm_rdf)
logger.info(f"[Plugin API] 正在对 {len(paragraphs)} 段文本执行信息抽取...")
extracted_docs = await ie_process.process_paragraphs(paragraphs)
# 3. 构造并导入数据
# 这里我们手动实现导入逻辑,不依赖外部脚本
# a. 准备段落
@@ -115,7 +116,7 @@ class LPMMOperations:
# store_new_data_set 期望的格式raw_paragraphs 的键是段落hash不带前缀值是段落文本
new_raw_paragraphs = {}
new_triple_list_data = {}
for pg_hash, passage in raw_paragraphs.items():
key = f"paragraph-{pg_hash}"
if key not in embed_mgr.stored_pg_hashes:
@@ -128,26 +129,22 @@ class LPMMOperations:
# 2. 使用 EmbeddingManager 的标准方法存储段落、实体和关系的嵌入
# store_new_data_set 会自动处理嵌入生成和存储
# 将同步阻塞操作放到线程池中执行,避免阻塞事件循环
await self._run_cancellable_executor(
embed_mgr.store_new_data_set,
new_raw_paragraphs,
new_triple_list_data
)
await self._run_cancellable_executor(embed_mgr.store_new_data_set, new_raw_paragraphs, new_triple_list_data)
# 3. 构建知识图谱只需要三元组数据和embedding_manager
await self._run_cancellable_executor(
kg_mgr.build_kg,
new_triple_list_data,
embed_mgr
)
await self._run_cancellable_executor(kg_mgr.build_kg, new_triple_list_data, embed_mgr)
# 4. 持久化
await self._run_cancellable_executor(embed_mgr.rebuild_faiss_index)
await self._run_cancellable_executor(embed_mgr.save_to_file)
await self._run_cancellable_executor(kg_mgr.save_to_file)
return {"status": "success", "count": len(new_raw_paragraphs), "message": f"成功导入 {len(new_raw_paragraphs)} 条知识"}
return {
"status": "success",
"count": len(new_raw_paragraphs),
"message": f"成功导入 {len(new_raw_paragraphs)} 条知识",
}
except asyncio.CancelledError:
logger.warning("[Plugin API] 导入操作被用户中断")
return {"status": "cancelled", "message": "导入操作已被用户中断"}
@@ -158,11 +155,11 @@ class LPMMOperations:
async def search(self, query: str, top_k: int = 3) -> List[str]:
"""
检索知识库。
Args:
query: 查询问题。
top_k: 返回最相关的条目数。
Returns:
List[str]: 相关文段列表。
"""
@@ -179,21 +176,21 @@ class LPMMOperations:
async def delete(self, keyword: str, exact_match: bool = False) -> dict:
"""
根据关键词或完整文段删除知识库内容。
Args:
keyword: 匹配关键词或完整文段。
exact_match: 是否使用完整文段匹配True=完全匹配False=关键词模糊匹配)。
Returns:
dict: {"status": "success/info", "deleted_count": 删除条数, "message": "描述"}
"""
try:
embed_mgr, kg_mgr, _ = await self._get_managers()
# 1. 查找匹配的段落
to_delete_keys = []
to_delete_hashes = []
for key, item in embed_mgr.paragraphs_embedding_store.store.items():
if exact_match:
# 完整文段匹配
@@ -205,29 +202,25 @@ class LPMMOperations:
if keyword in item.str:
to_delete_keys.append(key)
to_delete_hashes.append(key.replace("paragraph-", "", 1))
if not to_delete_keys:
match_type = "完整文段" if exact_match else "关键词"
return {"status": "info", "deleted_count": 0, "message": f"未找到匹配的内容({match_type}匹配)"}
# 2. 执行删除
# 将同步阻塞操作放到线程池中执行,避免阻塞事件循环
# a. 从向量库删除
deleted_count, _ = await self._run_cancellable_executor(
embed_mgr.paragraphs_embedding_store.delete_items,
to_delete_keys
embed_mgr.paragraphs_embedding_store.delete_items, to_delete_keys
)
embed_mgr.stored_pg_hashes = set(embed_mgr.paragraphs_embedding_store.store.keys())
# b. 从知识图谱删除
# 注意:必须使用关键字参数,避免 True 被误当作 ent_hashes 参数
# 使用 partial 来传递关键字参数,因为 run_in_executor 不支持 **kwargs
delete_func = partial(
kg_mgr.delete_paragraphs,
to_delete_hashes,
ent_hashes=None,
remove_orphan_entities=True
kg_mgr.delete_paragraphs, to_delete_hashes, ent_hashes=None, remove_orphan_entities=True
)
await self._run_cancellable_executor(delete_func)
@@ -235,9 +228,13 @@ class LPMMOperations:
await self._run_cancellable_executor(embed_mgr.rebuild_faiss_index)
await self._run_cancellable_executor(embed_mgr.save_to_file)
await self._run_cancellable_executor(kg_mgr.save_to_file)
match_type = "完整文段" if exact_match else "关键词"
return {"status": "success", "deleted_count": deleted_count, "message": f"已成功删除 {deleted_count} 条相关知识({match_type}匹配)"}
return {
"status": "success",
"deleted_count": deleted_count,
"message": f"已成功删除 {deleted_count} 条相关知识({match_type}匹配)",
}
except asyncio.CancelledError:
logger.warning("[Plugin API] 删除操作被用户中断")
@@ -249,13 +246,13 @@ class LPMMOperations:
async def clear_all(self) -> dict:
"""
清空整个LPMM知识库删除所有段落、实体、关系和知识图谱数据
Returns:
dict: {"status": "success/error", "message": "描述", "stats": {...}}
"""
try:
embed_mgr, kg_mgr, _ = await self._get_managers()
# 记录清空前的统计信息
before_stats = {
"paragraphs": len(embed_mgr.paragraphs_embedding_store.store),
@@ -264,40 +261,37 @@ class LPMMOperations:
"kg_nodes": len(kg_mgr.graph.get_node_list()),
"kg_edges": len(kg_mgr.graph.get_edge_list()),
}
# 将同步阻塞操作放到线程池中执行,避免阻塞事件循环
# 1. 清空所有向量库
# 获取所有keys
para_keys = list(embed_mgr.paragraphs_embedding_store.store.keys())
ent_keys = list(embed_mgr.entities_embedding_store.store.keys())
rel_keys = list(embed_mgr.relation_embedding_store.store.keys())
# 删除所有段落向量
para_deleted, _ = await self._run_cancellable_executor(
embed_mgr.paragraphs_embedding_store.delete_items,
para_keys
embed_mgr.paragraphs_embedding_store.delete_items, para_keys
)
embed_mgr.stored_pg_hashes.clear()
# 删除所有实体向量
if ent_keys:
ent_deleted, _ = await self._run_cancellable_executor(
embed_mgr.entities_embedding_store.delete_items,
ent_keys
embed_mgr.entities_embedding_store.delete_items, ent_keys
)
else:
ent_deleted = 0
# 删除所有关系向量
if rel_keys:
rel_deleted, _ = await self._run_cancellable_executor(
embed_mgr.relation_embedding_store.delete_items,
rel_keys
embed_mgr.relation_embedding_store.delete_items, rel_keys
)
else:
rel_deleted = 0
# 2. 清空所有 embedding store 的索引和映射
# 确保 faiss_index 和 idx2hash 也被重置,并删除旧的索引文件
def _clear_embedding_indices():
@@ -310,7 +304,7 @@ class LPMMOperations:
os.remove(embed_mgr.paragraphs_embedding_store.index_file_path)
if os.path.exists(embed_mgr.paragraphs_embedding_store.idx2hash_file_path):
os.remove(embed_mgr.paragraphs_embedding_store.idx2hash_file_path)
# 清空实体索引
embed_mgr.entities_embedding_store.faiss_index = None
embed_mgr.entities_embedding_store.idx2hash = None
@@ -320,7 +314,7 @@ class LPMMOperations:
os.remove(embed_mgr.entities_embedding_store.index_file_path)
if os.path.exists(embed_mgr.entities_embedding_store.idx2hash_file_path):
os.remove(embed_mgr.entities_embedding_store.idx2hash_file_path)
# 清空关系索引
embed_mgr.relation_embedding_store.faiss_index = None
embed_mgr.relation_embedding_store.idx2hash = None
@@ -330,9 +324,9 @@ class LPMMOperations:
os.remove(embed_mgr.relation_embedding_store.index_file_path)
if os.path.exists(embed_mgr.relation_embedding_store.idx2hash_file_path):
os.remove(embed_mgr.relation_embedding_store.idx2hash_file_path)
await self._run_cancellable_executor(_clear_embedding_indices)
# 3. 清空知识图谱
# 获取所有段落hash
all_pg_hashes = list(kg_mgr.stored_paragraph_hashes)
@@ -341,24 +335,22 @@ class LPMMOperations:
# 注意:必须使用关键字参数,避免 True 被误当作 ent_hashes 参数
# 使用 partial 来传递关键字参数,因为 run_in_executor 不支持 **kwargs
delete_func = partial(
kg_mgr.delete_paragraphs,
all_pg_hashes,
ent_hashes=None,
remove_orphan_entities=True
kg_mgr.delete_paragraphs, all_pg_hashes, ent_hashes=None, remove_orphan_entities=True
)
await self._run_cancellable_executor(delete_func)
# 完全清空KG创建新的空图无论是否有段落hash都要执行
from quick_algo import di_graph
kg_mgr.graph = di_graph.DiGraph()
kg_mgr.stored_paragraph_hashes.clear()
kg_mgr.ent_appear_cnt.clear()
# 4. 保存所有数据此时所有store都是空的索引也是None
# 注意即使store为空save_to_file也会保存空的DataFrame这是正确的
await self._run_cancellable_executor(embed_mgr.save_to_file)
await self._run_cancellable_executor(kg_mgr.save_to_file)
after_stats = {
"paragraphs": len(embed_mgr.paragraphs_embedding_store.store),
"entities": len(embed_mgr.entities_embedding_store.store),
@@ -366,14 +358,14 @@ class LPMMOperations:
"kg_nodes": len(kg_mgr.graph.get_node_list()),
"kg_edges": len(kg_mgr.graph.get_edge_list()),
}
return {
"status": "success",
"message": f"已成功清空LPMM知识库删除 {para_deleted} 个段落、{ent_deleted} 个实体、{rel_deleted} 个关系)",
"stats": {
"before": before_stats,
"after": after_stats,
}
},
}
except asyncio.CancelledError:
@@ -383,6 +375,6 @@ class LPMMOperations:
logger.error(f"[Plugin API] 清空知识库失败: {e}", exc_info=True)
return {"status": "error", "message": str(e)}
# 内部使用的单例
lpmm_ops = LPMMOperations()

View File

@@ -136,4 +136,3 @@ class PlanReplyLogger:
return str(value)
# Fallback to string for other complex types
return str(value)

View File

@@ -85,17 +85,17 @@ class ChatBot:
async def _create_pfc_chat(self, message: MessageRecv):
"""创建或获取PFC对话实例
Args:
message: 消息对象
"""
try:
chat_id = str(message.chat_stream.stream_id)
private_name = str(message.message_info.user_info.user_nickname)
logger.debug(f"[私聊][{private_name}]创建或获取PFC对话: {chat_id}")
await self.pfc_manager.get_or_create_conversation(chat_id, private_name)
except Exception as e:
logger.error(f"创建PFC聊天失败: {e}")
logger.error(traceback.format_exc())

View File

@@ -96,7 +96,7 @@ class Message(MessageBase):
if processed_text:
return f"{global_config.bot.nickname}: {processed_text}"
return None
tasks = [process_forward_node(node_dict) for node_dict in segment.data]
results = await asyncio.gather(*tasks, return_exceptions=True)
segments_text = []

View File

@@ -189,7 +189,7 @@ async def _send_message(message: MessageSending, show_log=True) -> bool:
# 如果未开启 API Server直接跳过 Fallback
if not global_config.maim_message.enable_api_server:
logger.debug(f"[API Server Fallback] API Server未开启跳过fallback")
logger.debug("[API Server Fallback] API Server未开启跳过fallback")
if legacy_exception:
raise legacy_exception
return False
@@ -198,13 +198,13 @@ async def _send_message(message: MessageSending, show_log=True) -> bool:
extra_server = getattr(global_api, "extra_server", None)
if not extra_server:
logger.warning(f"[API Server Fallback] extra_server不存在")
logger.warning("[API Server Fallback] extra_server不存在")
if legacy_exception:
raise legacy_exception
return False
if not extra_server.is_running():
logger.warning(f"[API Server Fallback] extra_server未运行")
logger.warning("[API Server Fallback] extra_server未运行")
if legacy_exception:
raise legacy_exception
return False
@@ -253,7 +253,7 @@ async def _send_message(message: MessageSending, show_log=True) -> bool:
)
# 直接调用 Server 的 send_message 接口,它会自动处理路由
logger.debug(f"[API Server Fallback] 正在通过extra_server发送消息...")
logger.debug("[API Server Fallback] 正在通过extra_server发送消息...")
results = await extra_server.send_message(api_message)
logger.debug(f"[API Server Fallback] 发送结果: {results}")

View File

@@ -35,6 +35,7 @@ logger = get_logger("planner")
install(extra_lines=3)
class ActionPlanner:
def __init__(self, chat_id: str, action_manager: ActionManager):
self.chat_id = chat_id
@@ -48,7 +49,7 @@ class ActionPlanner:
self.last_obs_time_mark = 0.0
self.plan_log: List[Tuple[str, float, Union[List[ActionPlannerInfo], str]]] = []
# 黑话缓存:使用 OrderedDict 实现 LRU最多缓存10个
self.unknown_words_cache: OrderedDict[str, None] = OrderedDict()
self.unknown_words_cache_limit = 10
@@ -111,20 +112,29 @@ class ActionPlanner:
# 替换 [picid:xxx] 为 [图片:描述]
pic_pattern = r"\[picid:([^\]]+)\]"
def replace_pic_id(pic_match: re.Match) -> str:
pic_id = pic_match.group(1)
description = translate_pid_to_description(pic_id)
return f"[图片:{description}]"
msg_text = re.sub(pic_pattern, replace_pic_id, msg_text)
# 替换用户引用格式:回复<aaa:bbb> 和 @<aaa:bbb>
platform = getattr(message, "user_info", None) and message.user_info.platform or getattr(message, "chat_info", None) and message.chat_info.platform or "qq"
platform = (
getattr(message, "user_info", None)
and message.user_info.platform
or getattr(message, "chat_info", None)
and message.chat_info.platform
or "qq"
)
msg_text = replace_user_references(msg_text, platform, replace_bot_name=True)
# 替换单独的 <用户名:用户ID> 格式replace_user_references 已处理回复<和@<格式)
# 匹配所有 <aaa:bbb> 格式,由于 replace_user_references 已经替换了回复<和@<格式,
# 这里匹配到的应该都是单独的格式
user_ref_pattern = r"<([^:<>]+):([^:<>]+)>"
def replace_user_ref(user_match: re.Match) -> str:
user_name = user_match.group(1)
user_id = user_match.group(2)
@@ -137,6 +147,7 @@ class ActionPlanner:
except Exception:
# 如果解析失败,使用原始昵称
return user_name
msg_text = re.sub(user_ref_pattern, replace_user_ref, msg_text)
preview = msg_text if len(msg_text) <= 100 else f"{msg_text[:97]}..."
@@ -165,7 +176,7 @@ class ActionPlanner:
else:
reasoning = "未提供原因"
action_data = {key: value for key, value in action_json.items() if key not in ["action"]}
# 非no_reply动作需要target_message_id
target_message = None
@@ -244,7 +255,7 @@ class ActionPlanner:
def _update_unknown_words_cache(self, new_words: List[str]) -> None:
"""
更新黑话缓存,将新的黑话加入缓存
Args:
new_words: 新提取的黑话列表
"""
@@ -254,7 +265,7 @@ class ActionPlanner:
word = word.strip()
if not word:
continue
# 如果已存在移到末尾LRU
if word in self.unknown_words_cache:
self.unknown_words_cache.move_to_end(word)
@@ -269,10 +280,10 @@ class ActionPlanner:
def _merge_unknown_words_with_cache(self, new_words: Optional[List[str]]) -> List[str]:
"""
合并新提取的黑话和缓存中的黑话
Args:
new_words: 新提取的黑话列表可能为None
Returns:
合并后的黑话列表(去重)
"""
@@ -284,31 +295,29 @@ class ActionPlanner:
word = word.strip()
if word:
cleaned_new_words.append(word)
# 获取缓存中的黑话列表
cached_words = list(self.unknown_words_cache.keys())
# 合并并去重(保留顺序:新提取的在前,缓存的在后)
merged_words: List[str] = []
seen = set()
# 先添加新提取的
for word in cleaned_new_words:
if word not in seen:
merged_words.append(word)
seen.add(word)
# 再添加缓存的(如果不在新提取的列表中)
for word in cached_words:
if word not in seen:
merged_words.append(word)
seen.add(word)
return merged_words
def _process_unknown_words_cache(
self, actions: List[ActionPlannerInfo]
) -> None:
def _process_unknown_words_cache(self, actions: List[ActionPlannerInfo]) -> None:
"""
处理黑话缓存逻辑:
1. 检查是否有 reply action 提取了 unknown_words
@@ -316,7 +325,7 @@ class ActionPlanner:
3. 如果缓存数量大于5移除最老的2个
4. 对于每个 reply action合并缓存和新提取的黑话
5. 更新缓存
Args:
actions: 解析后的动作列表
"""
@@ -330,7 +339,7 @@ class ActionPlanner:
removed_count += 1
if removed_count > 0:
logger.debug(f"{self.log_prefix}缓存数量大于5移除最老的{removed_count}个缓存")
# 检查是否有 reply action 提取了 unknown_words
has_extracted_unknown_words = False
for action in actions:
@@ -340,22 +349,22 @@ class ActionPlanner:
if unknown_words and isinstance(unknown_words, list) and len(unknown_words) > 0:
has_extracted_unknown_words = True
break
# 如果当前 plan 的 reply 没有提取移除最老的1个
if not has_extracted_unknown_words:
if len(self.unknown_words_cache) > 0:
self.unknown_words_cache.popitem(last=False)
logger.debug(f"{self.log_prefix}当前 plan 的 reply 没有提取黑话移除最老的1个缓存")
# 对于每个 reply action合并缓存和新提取的黑话
for action in actions:
if action.action_type == "reply":
action_data = action.action_data or {}
new_words = action_data.get("unknown_words")
# 合并新提取的和缓存的黑话列表
merged_words = self._merge_unknown_words_with_cache(new_words)
# 更新 action_data
if merged_words:
action_data["unknown_words"] = merged_words
@@ -366,7 +375,7 @@ class ActionPlanner:
else:
# 如果没有合并后的黑话,移除 unknown_words 字段
action_data.pop("unknown_words", None)
# 更新缓存(将新提取的黑话加入缓存)
if new_words:
self._update_unknown_words_cache(new_words)
@@ -442,15 +451,19 @@ class ActionPlanner:
# 检查是否已经有回复该消息的 action
has_reply_to_force_message = False
for action in actions:
if action.action_type == "reply" and action.action_message and action.action_message.message_id == force_reply_message.message_id:
if (
action.action_type == "reply"
and action.action_message
and action.action_message.message_id == force_reply_message.message_id
):
has_reply_to_force_message = True
break
# 如果没有回复该消息,强制添加回复 action
if not has_reply_to_force_message:
# 移除所有 no_reply action如果有
actions = [a for a in actions if a.action_type != "no_reply"]
# 创建强制回复 action
available_actions_dict = dict(current_available_actions)
force_reply_action = ActionPlannerInfo(
@@ -577,10 +590,11 @@ class ActionPlanner:
if global_config.chat.think_mode == "classic":
reply_action_example = ""
if global_config.chat.llm_quote:
reply_action_example += "5.如果要明确回复消息使用quote如果消息不多不需要明确回复设置quote为false\n"
reply_action_example += (
"5.如果要明确回复消息使用quote如果消息不多不需要明确回复设置quote为false\n"
)
reply_action_example += (
'{{"action":"reply", "target_message_id":"消息id(m+数字)", '
'"unknown_words":["词语1","词语2"]'
'{{"action":"reply", "target_message_id":"消息id(m+数字)", "unknown_words":["词语1","词语2"]'
)
if global_config.chat.llm_quote:
reply_action_example += ', "quote":"如果需要引用该message设置为true"'
@@ -590,7 +604,9 @@ class ActionPlanner:
"5.think_level表示思考深度0表示该回复不需要思考和回忆1表示该回复需要进行回忆和思考\n"
)
if global_config.chat.llm_quote:
reply_action_example += "6.如果要明确回复消息使用quote如果消息不多不需要明确回复设置quote为false\n"
reply_action_example += (
"6.如果要明确回复消息使用quote如果消息不多不需要明确回复设置quote为false\n"
)
reply_action_example += (
'{{"action":"reply", "think_level":数值等级(0或1), '
'"target_message_id":"消息id(m+数字)", '
@@ -741,15 +757,21 @@ class ActionPlanner:
except Exception as req_e:
logger.error(f"{self.log_prefix}LLM 请求执行失败: {req_e}")
return f"LLM 请求失败,模型出现问题: {req_e}", [
ActionPlannerInfo(
action_type="no_reply",
reasoning=f"LLM 请求失败,模型出现问题: {req_e}",
action_data={},
action_message=None,
available_actions=available_actions,
)
], llm_content, llm_reasoning, llm_duration_ms
return (
f"LLM 请求失败,模型出现问题: {req_e}",
[
ActionPlannerInfo(
action_type="no_reply",
reasoning=f"LLM 请求失败,模型出现问题: {req_e}",
action_data={},
action_message=None,
available_actions=available_actions,
)
],
llm_content,
llm_reasoning,
llm_duration_ms,
)
# 解析LLM响应
extracted_reasoning = ""

View File

@@ -1071,7 +1071,6 @@ class DefaultReplyer:
chat_target_2_prompt = prompt_manager.get_prompt("chat_target_group2")
chat_target_2 = await prompt_manager.render_prompt(chat_target_2_prompt)
# 根据配置构建最终的 reply_style支持 multiple_reply_style 按概率随机替换
reply_style = global_config.personality.reply_style
multi_styles = global_config.personality.multiple_reply_style

View File

@@ -26,6 +26,7 @@ from src.chat.utils.chat_message_builder import (
)
from src.bw_learner.expression_selector import expression_selector
from src.plugin_system.apis.message_api import translate_pid_to_description
# from src.memory_system.memory_activator import MemoryActivator
from src.person_info.person_info import Person, is_person_known
from src.plugin_system.base.component_types import ActionInfo, EventType
@@ -807,7 +808,7 @@ class PrivateReplyer:
reply_style = global_config.personality.reply_style
# 使用统一的 is_bot_self 函数判断是否是机器人自己(支持多平台,包括 WebUI
if is_bot_self(platform, user_id):
prompt_template = prompt_manager.get_prompt("private_replyer_self")
prompt_template.add_context("target", target)

View File

@@ -5,6 +5,7 @@ from src.common.logger import get_logger
logger = get_logger("common_utils")
class TempMethodsExpression:
"""用于临时存放一些方法的类"""