This commit is contained in:
UnCLAS-Prommer
2026-03-08 11:37:54 +08:00
committed by DrSmoothl
parent 3ea14a85c3
commit cd81f943e3
32 changed files with 4427 additions and 1917 deletions

View File

@@ -16,8 +16,8 @@ from src.chat.brain_chat.brain_planner import BrainPlanner
from src.chat.planner_actions.action_modifier import ActionModifier
from src.chat.planner_actions.action_manager import ActionManager
from src.chat.heart_flow.hfc_utils import CycleDetail
from src.bw_learner.expression_learner import expression_learner_manager
from src.bw_learner.message_recorder import extract_and_distribute_messages
from src.bw_learner.expression_learner_old import expression_learner_manager
from src.bw_learner.message_recorder_old import extract_and_distribute_messages
from src.person_info.person_info import Person
from src.core.types import ActionInfo, EventType
from src.core.event_bus import event_bus
@@ -63,7 +63,7 @@ class BrainChatting:
用于在特定聊天流中生成回复。
"""
def __init__(self, chat_id: str):
def __init__(self, session_id: str):
"""
BrainChatting 初始化函数
@@ -73,8 +73,8 @@ class BrainChatting:
performance_version: 性能记录版本号,用于区分不同启动版本
"""
# 基础属性
self.stream_id: str = chat_id # 聊天流ID
self.chat_stream: BotChatSession = _chat_manager.get_session_by_session_id(self.stream_id) # type: ignore
self.stream_id: str = session_id # 聊天流ID
self.chat_stream: ChatStream = get_chat_manager().get_stream(self.stream_id) # type: ignore
if not self.chat_stream:
raise ValueError(f"无法找到聊天流: {self.stream_id}")
self.log_prefix = f"[{_chat_manager.get_session_name(self.stream_id) or self.stream_id}]"
@@ -269,7 +269,7 @@ class BrainChatting:
# Expression Reflection Check
# 检查是否需要提问表达反思
# -------------------------------------------------------------------------
from src.bw_learner.expression_reflector import expression_reflector_manager
from src.bw_learner.expression_reflector_old import expression_reflector_manager
reflector = expression_reflector_manager.get_or_create_reflector(self.stream_id)
asyncio.create_task(reflector.check_and_ask())

View File

@@ -10,6 +10,7 @@ from src.common.logger import get_logger
from src.common.utils.utils_session import SessionUtils
from src.config.config import global_config
from src.chat.message_receive.chat_manager import chat_manager
from src.bw_learner.expression_reflector import ExpressionReflector
if TYPE_CHECKING:
from src.chat.message_receive.message import SessionMessage
@@ -52,6 +53,9 @@ class HeartFChatting:
# Asyncio Event 用于控制循环的开始和结束
self._cycle_event = asyncio.Event()
# 反思器
self.reflector = ExpressionReflector(session_id)
async def start(self):
"""启动 HeartFChatting 的主循环"""
# 先检查是否已经启动运行
@@ -160,7 +164,12 @@ class HeartFChatting:
async def _judge_and_response(self, mentioned_message: Optional["SessionMessage"] = None):
"""判定和生成回复"""
# TODO: 在expression和reflector重构完成后完成这里的逻辑
await self.reflector.check_and_ask()
if self.reflector.reflect_tracker.tracking and await self.reflector.reflect_tracker.trigger_tracker():
logger.info(f"{self.log_prefix} 追踪检查已解决,结束追踪器")
self.reflector.reflect_tracker.reset_tracker() # 结束当前追踪器
# TODO: 完成反思器之后的逻辑
def _handle_loop_completion(self, task: asyncio.Task):
"""当 _hfc_func 任务完成时执行的回调。"""

View File

@@ -0,0 +1,814 @@
import asyncio
import time
import traceback
import random
from typing import List, Optional, Dict, Any, Tuple, TYPE_CHECKING
from rich.traceback import install
from src.config.config import global_config
from src.common.logger import get_logger
from src.common.data_models.info_data_model import ActionPlannerInfo
from src.common.data_models.message_data_model import ReplyContentType
from src.chat.message_receive.chat_manager import chat_manager, BotChatSession
from src.chat.utils.prompt_builder import global_prompt_manager
from src.chat.utils.timer_calculator import Timer
from src.chat.planner_actions.planner import ActionPlanner
from src.chat.planner_actions.action_modifier import ActionModifier
from src.chat.planner_actions.action_manager import ActionManager
from src.chat.heart_flow.hfc_utils import CycleDetail
from src.bw_learner.expression_learner_old import expression_learner_manager
from src.chat.heart_flow.frequency_control import frequency_control_manager
from src.bw_learner.reflect_tracker import reflect_tracker_manager
from src.bw_learner.expression_reflector_old import expression_reflector_manager
from src.bw_learner.message_recorder_old import extract_and_distribute_messages
from src.person_info.person_info import Person
from src.plugin_system.base.component_types import EventType, ActionInfo
from src.plugin_system.core import events_manager
from src.plugin_system.apis import generator_api, send_api, message_api, database_api
from src.chat.utils.chat_message_builder import (
build_readable_messages_with_id,
get_raw_msg_before_timestamp_with_chat,
)
from src.chat.utils.utils import record_replyer_action_temp
from src.memory_system.chat_history_summarizer import ChatHistorySummarizer
if TYPE_CHECKING:
from src.common.data_models.database_data_model import DatabaseMessages
from src.common.data_models.message_data_model import ReplySetModel
ERROR_LOOP_INFO = {
"loop_plan_info": {
"action_result": {
"action_type": "error",
"action_data": {},
"reasoning": "循环处理失败",
},
},
"loop_action_info": {
"action_taken": False,
"reply_text": "",
"command": "",
"taken_time": time.time(),
},
}
install(extra_lines=3)
# 注释:原来的动作修改超时常量已移除,因为改为顺序执行
logger = get_logger("hfc") # Logger Name Changed
class HeartFChatting:
"""
管理一个连续的Focus Chat循环
用于在特定聊天流中生成回复。
其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。
"""
def __init__(self, session_id: str):
"""
HeartFChatting 初始化函数
参数:
session_id: 聊天会话唯一标识符(如session_id)
on_stop_focus_chat: 当收到stop_focus_chat命令时调用的回调函数
performance_version: 性能记录版本号,用于区分不同启动版本
"""
# 基础属性
self.session_id: str = session_id # 聊天会话ID
session = chat_manager.get_session_by_session_id(session_id)
if not session:
raise ValueError(f"未找到 session_id={session_id} 的聊天会话")
self.chat_session: BotChatSession = session
self.log_prefix = f"[{chat_manager.get_session_name(self.session_id) or self.session_id}]"
self.expression_learner = expression_learner_manager.get_expression_learner(self.session_id)
self.action_manager = ActionManager()
self.action_planner = ActionPlanner(chat_id=self.session_id, action_manager=self.action_manager)
self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.session_id)
# 循环控制内部状态
self.running: bool = False
self._loop_task: Optional[asyncio.Task] = None # 主循环任务
# 添加循环信息管理相关的属性
self.history_loop: List[CycleDetail] = []
self._cycle_counter = 0
self._current_cycle_detail: CycleDetail = None # type: ignore
self.last_read_time = time.time() - 2
self.is_mute = False
self.last_active_time = time.time() # 记录上一次非noreply时间
self.question_probability_multiplier = 1
self.questioned = False
# 跟踪连续 no_reply 次数,用于动态调整阈值
self.consecutive_no_reply_count = 0
# 聊天内容概括器
self.chat_history_summarizer = ChatHistorySummarizer(session_id=self.session_id)
async def start(self):
"""检查是否需要启动主循环,如果未激活则启动。"""
# 如果循环已经激活,直接返回
if self.running:
logger.debug(f"{self.log_prefix} HeartFChatting 已激活,无需重复启动")
return
try:
# 标记为活动状态,防止重复启动
self.running = True
self._loop_task = asyncio.create_task(self._main_chat_loop())
self._loop_task.add_done_callback(self._handle_loop_completion)
# 启动聊天内容概括器的后台定期检查循环
await self.chat_history_summarizer.start()
logger.info(f"{self.log_prefix} HeartFChatting 启动完成")
except Exception as e:
# 启动失败时重置状态
self.running = False
self._loop_task = None
logger.error(f"{self.log_prefix} HeartFChatting 启动失败: {e}")
raise
def _handle_loop_completion(self, task: asyncio.Task):
"""当 _hfc_loop 任务完成时执行的回调。"""
try:
if exception := task.exception():
logger.error(f"{self.log_prefix} HeartFChatting: 脱离了聊天(异常): {exception}")
logger.error(traceback.format_exc()) # Log full traceback for exceptions
else:
logger.info(f"{self.log_prefix} HeartFChatting: 脱离了聊天 (外部停止)")
except asyncio.CancelledError:
logger.info(f"{self.log_prefix} HeartFChatting: 结束了聊天")
def start_cycle(self) -> Tuple[Dict[str, float], str]:
self._cycle_counter += 1
self._current_cycle_detail = CycleDetail(self._cycle_counter)
self._current_cycle_detail.thinking_id = f"tid{str(round(time.time(), 2))}"
cycle_timers = {}
return cycle_timers, self._current_cycle_detail.thinking_id
def end_cycle(self, loop_info, cycle_timers):
self._current_cycle_detail.set_loop_info(loop_info)
self.history_loop.append(self._current_cycle_detail)
self._current_cycle_detail.timers = cycle_timers
self._current_cycle_detail.end_time = time.time()
def print_cycle_info(self, cycle_timers):
# 记录循环信息和计时器结果
timer_strings = []
for name, elapsed in cycle_timers.items():
if elapsed < 0.1:
# 不显示小于0.1秒的计时器
continue
formatted_time = f"{elapsed:.2f}"
timer_strings.append(f"{name}: {formatted_time}")
logger.info(
f"{self.log_prefix}{self._current_cycle_detail.cycle_id}次思考,"
f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒;" # type: ignore
+ (f"详情: {'; '.join(timer_strings)}" if timer_strings else "")
)
async def _loopbody(self):
recent_messages_list = message_api.get_messages_by_time_in_chat(
chat_id=self.session_id,
start_time=self.last_read_time,
end_time=time.time(),
limit=20,
limit_mode="latest",
filter_mai=True,
filter_command=False,
filter_intercept_message_level=0,
)
# 根据连续 no_reply 次数动态调整阈值
# 3次 no_reply 时,阈值调高到 1.550%概率为150%概率为2
# 5次 no_reply 时,提高到 2大于等于两条消息的阈值
if self.consecutive_no_reply_count >= 5:
threshold = 2
elif self.consecutive_no_reply_count >= 3:
# 1.5 的含义50%概率为150%概率为2
threshold = 2 if random.random() < 0.5 else 1
else:
threshold = 1
if len(recent_messages_list) >= threshold:
# for message in recent_messages_list:
# print(message.processed_plain_text)
self.last_read_time = time.time()
# !此处使at或者提及必定回复
mentioned_message = None
for message in recent_messages_list:
if (message.is_mentioned or message.is_at) and global_config.chat.mentioned_bot_reply:
mentioned_message = message
# logger.info(f"{self.log_prefix} 当前talk_value: {TempMethods.get_talk_value(self.stream_id)}")
# *控制频率用
if mentioned_message:
await self._observe(recent_messages_list=recent_messages_list, force_reply_message=mentioned_message)
elif (
random.random()
< TempMethodsHFC.get_talk_value(self.session_id)
* frequency_control_manager.get_or_create_frequency_control(self.session_id).get_talk_frequency_adjust()
):
await self._observe(recent_messages_list=recent_messages_list)
else:
# 没有提到继续保持沉默等待5秒防止频繁触发
await asyncio.sleep(10)
return True
else:
await asyncio.sleep(0.2)
return True
return True
async def _send_and_store_reply(
self,
response_set: "ReplySetModel",
action_message: "DatabaseMessages",
cycle_timers: Dict[str, float],
thinking_id,
actions,
selected_expressions: Optional[List[int]] = None,
quote_message: Optional[bool] = None,
) -> Tuple[Dict[str, Any], str, Dict[str, float]]:
with Timer("回复发送", cycle_timers):
reply_text = await self._send_response(
reply_set=response_set,
message_data=action_message,
selected_expressions=selected_expressions,
quote_message=quote_message,
)
# 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
platform = action_message.chat_info.platform
if platform is None:
platform = getattr(self.chat_stream, "platform", "unknown")
person = Person(platform=platform, user_id=action_message.user_info.user_id)
person_name = person.person_name
action_prompt_display = f"你对{person_name}进行了回复:{reply_text}"
await database_api.store_action_info(
chat_stream=self.chat_stream,
action_build_into_prompt=False,
action_prompt_display=action_prompt_display,
action_done=True,
thinking_id=thinking_id,
action_data={"reply_text": reply_text},
action_name="reply",
)
# 构建循环信息
loop_info: Dict[str, Any] = {
"loop_plan_info": {
"action_result": actions,
},
"loop_action_info": {
"action_taken": True,
"reply_text": reply_text,
"command": "",
"taken_time": time.time(),
},
}
return loop_info, reply_text, cycle_timers
async def _observe(
self, # interest_value: float = 0.0,
recent_messages_list: Optional[List["DatabaseMessages"]] = None,
force_reply_message: Optional["DatabaseMessages"] = None,
) -> bool: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
if recent_messages_list is None:
recent_messages_list = []
_reply_text = "" # 初始化reply_text变量避免UnboundLocalError
# -------------------------------------------------------------------------
# ReflectTracker Check
# 在每次回复前检查一次上下文,看是否有反思问题得到了解答
# -------------------------------------------------------------------------
reflector = expression_reflector_manager.get_or_create_reflector(self.session_id)
await reflector.check_and_ask()
tracker = reflect_tracker_manager.get_tracker(self.session_id)
if tracker:
resolved = await tracker.trigger_tracker()
if resolved:
reflect_tracker_manager.remove_tracker(self.session_id)
logger.info(f"{self.log_prefix} ReflectTracker resolved and removed.")
start_time = time.time()
async with global_prompt_manager.async_message_scope(self.chat_stream.context.get_template_name()):
# 通过 MessageRecorder 统一提取消息并分发给 expression_learner 和 jargon_miner
# 在 replyer 执行时触发,统一管理时间窗口,避免重复获取消息
asyncio.create_task(extract_and_distribute_messages(self.session_id))
# 添加curious检测任务 - 检测聊天记录中的矛盾、冲突或需要提问的内容
# asyncio.create_task(check_and_make_question(self.stream_id))
# 添加聊天内容概括任务 - 累积、打包和压缩聊天记录
# 注意后台循环已在start()中启动,这里作为额外触发点,在有思考时立即处理
# asyncio.create_task(self.chat_history_summarizer.process())
cycle_timers, thinking_id = self.start_cycle()
logger.info(
f"{self.log_prefix} 开始第{self._cycle_counter}次思考(频率: {TempMethodsHFC.get_talk_value(self.session_id)})"
)
# 第一步:动作检查
available_actions: Dict[str, ActionInfo] = {}
try:
await self.action_modifier.modify_actions()
available_actions = self.action_manager.get_using_actions()
except Exception as e:
logger.error(f"{self.log_prefix} 动作修改失败: {e}")
# 执行planner
is_group_chat, chat_target_info, _ = self.action_planner.get_necessary_info()
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=self.session_id,
timestamp=time.time(),
limit=int(global_config.chat.max_context_size * 0.6),
filter_intercept_message_level=1,
)
chat_content_block, message_id_list = build_readable_messages_with_id(
messages=message_list_before_now,
timestamp_mode="normal_no_YMD",
read_mark=self.action_planner.last_obs_time_mark,
truncate=True,
show_actions=True,
)
prompt_info = await self.action_planner.build_planner_prompt(
is_group_chat=is_group_chat,
chat_target_info=chat_target_info,
current_available_actions=available_actions,
chat_content_block=chat_content_block,
message_id_list=message_id_list,
)
continue_flag, modified_message = await events_manager.handle_mai_events(
EventType.ON_PLAN, None, prompt_info[0], None, self.chat_stream.stream_id
)
if not continue_flag:
return False
if modified_message and modified_message._modify_flags.modify_llm_prompt:
prompt_info = (modified_message.llm_prompt, prompt_info[1])
with Timer("规划器", cycle_timers):
action_to_use_info = await self.action_planner.plan(
loop_start_time=self.last_read_time,
available_actions=available_actions,
force_reply_message=force_reply_message,
)
logger.info(
f"{self.log_prefix} 决定执行{len(action_to_use_info)}个动作: {' '.join([a.action_type for a in action_to_use_info])}"
)
# 3. 并行执行所有动作
action_tasks = [
asyncio.create_task(
self._execute_action(action, action_to_use_info, thinking_id, available_actions, cycle_timers)
)
for action in action_to_use_info
]
# 并行执行所有任务
results = await asyncio.gather(*action_tasks, return_exceptions=True)
# 处理执行结果
reply_loop_info = None
reply_text_from_reply = ""
action_success = False
action_reply_text = ""
excute_result_str = ""
for result in results:
excute_result_str += f"{result['action_type']} 执行结果:{result['result']}\n"
if isinstance(result, BaseException):
logger.error(f"{self.log_prefix} 动作执行异常: {result}")
continue
if result["action_type"] != "reply":
action_success = result["success"]
action_reply_text = result["result"]
elif result["action_type"] == "reply":
if result["success"]:
reply_loop_info = result["loop_info"]
reply_text_from_reply = result["result"]
else:
logger.warning(f"{self.log_prefix} 回复动作执行失败")
self.action_planner.add_plan_excute_log(result=excute_result_str)
# 构建最终的循环信息
if reply_loop_info:
# 如果有回复信息使用回复的loop_info作为基础
loop_info = reply_loop_info
# 更新动作执行信息
loop_info["loop_action_info"].update(
{
"action_taken": action_success,
"taken_time": time.time(),
}
)
_reply_text = reply_text_from_reply
else:
# 没有回复信息构建纯动作的loop_info
loop_info = {
"loop_plan_info": {
"action_result": action_to_use_info,
},
"loop_action_info": {
"action_taken": action_success,
"reply_text": action_reply_text,
"taken_time": time.time(),
},
}
_reply_text = action_reply_text
self.end_cycle(loop_info, cycle_timers)
self.print_cycle_info(cycle_timers)
end_time = time.time()
if end_time - start_time < global_config.chat.planner_smooth:
wait_time = global_config.chat.planner_smooth - (end_time - start_time)
await asyncio.sleep(wait_time)
else:
await asyncio.sleep(0.1)
return True
# async def _main_chat_loop(self):
# """主循环,持续进行计划并可能回复消息,直到被外部取消。"""
# try:
# while self.running:
# # 主循环
# success = await self._loopbody()
# await asyncio.sleep(0.1)
# if not success:
# break
# except asyncio.CancelledError:
# # 设置了关闭标志位后被取消是正常流程
# logger.info(f"{self.log_prefix} 麦麦已关闭聊天")
# except Exception:
# logger.error(f"{self.log_prefix} 麦麦聊天意外错误将于3s后尝试重新启动")
# print(traceback.format_exc())
# await asyncio.sleep(3)
# self._loop_task = asyncio.create_task(self._main_chat_loop())
# logger.error(f"{self.log_prefix} 结束了当前聊天循环")
async def _handle_action(
self,
action: str,
action_reasoning: str,
action_data: dict,
cycle_timers: Dict[str, float],
thinking_id: str,
action_message: Optional["DatabaseMessages"] = None,
) -> tuple[bool, str, str]:
"""
处理规划动作,使用动作工厂创建相应的动作处理器
参数:
action: 动作类型
action_reasoning: 决策理由
action_data: 动作数据,包含不同动作需要的参数
cycle_timers: 计时器字典
thinking_id: 思考ID
action_message: 消息数据
返回:
tuple[bool, str, str]: (是否执行了动作, 思考消息ID, 命令)
"""
try:
# 使用工厂创建动作处理器实例
try:
action_handler = self.action_manager.create_action(
action_name=action,
action_data=action_data,
cycle_timers=cycle_timers,
thinking_id=thinking_id,
chat_stream=self.chat_stream,
log_prefix=self.log_prefix,
action_reasoning=action_reasoning,
action_message=action_message,
)
except Exception as e:
logger.error(f"{self.log_prefix} 创建动作处理器时出错: {e}")
traceback.print_exc()
return False, ""
# 处理动作并获取结果(固定记录一次动作信息)
result = await action_handler.execute()
success, action_text = result
return success, action_text
except Exception as e:
logger.error(f"{self.log_prefix} 处理{action}时出错: {e}")
traceback.print_exc()
return False, ""
async def _send_response(
self,
reply_set: "ReplySetModel",
message_data: "DatabaseMessages",
selected_expressions: Optional[List[int]] = None,
quote_message: Optional[bool] = None,
) -> str:
# 根据 llm_quote 配置决定是否使用 quote_message 参数
if global_config.chat.llm_quote:
# 如果配置为 true使用 llm_quote 参数决定是否引用回复
if quote_message is None:
logger.warning(f"{self.log_prefix} quote_message 参数为空,不引用")
need_reply = False
else:
need_reply = quote_message
if need_reply:
logger.info(f"{self.log_prefix} LLM 决定使用引用回复")
else:
# 如果配置为 false使用原来的模式
new_message_count = message_api.count_new_messages(
chat_id=self.chat_stream.stream_id, start_time=self.last_read_time, end_time=time.time()
)
need_reply = new_message_count >= random.randint(2, 3) or time.time() - self.last_read_time > 90
if need_reply:
logger.info(
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息使用引用回复或者上次回复时间超过90秒"
)
reply_text = ""
first_replied = False
for reply_content in reply_set.reply_data:
if reply_content.content_type != ReplyContentType.TEXT:
continue
data: str = reply_content.content # type: ignore
if not first_replied:
await send_api.text_to_stream(
text=data,
stream_id=self.chat_stream.stream_id,
reply_message=message_data,
set_reply=need_reply,
typing=False,
selected_expressions=selected_expressions,
)
first_replied = True
else:
await send_api.text_to_stream(
text=data,
stream_id=self.chat_stream.stream_id,
reply_message=message_data,
set_reply=False,
typing=True,
selected_expressions=selected_expressions,
)
reply_text += data
return reply_text
async def _execute_action(
self,
action_planner_info: ActionPlannerInfo,
chosen_action_plan_infos: List[ActionPlannerInfo],
thinking_id: str,
available_actions: Dict[str, ActionInfo],
cycle_timers: Dict[str, float],
):
"""执行单个动作的通用函数"""
try:
with Timer(f"动作{action_planner_info.action_type}", cycle_timers):
# 直接当场执行no_reply逻辑
if action_planner_info.action_type == "no_reply":
# 直接处理no_reply逻辑不再通过动作系统
reason = action_planner_info.reasoning or "选择不回复"
# logger.info(f"{self.log_prefix} 选择不回复,原因: {reason}")
# 增加连续 no_reply 计数
self.consecutive_no_reply_count += 1
await database_api.store_action_info(
chat_stream=self.chat_stream,
action_build_into_prompt=False,
action_prompt_display=reason,
action_done=True,
thinking_id=thinking_id,
action_data={},
action_name="no_reply",
action_reasoning=reason,
)
return {"action_type": "no_reply", "success": True, "result": "选择不回复", "command": ""}
elif action_planner_info.action_type == "reply":
# 直接当场执行reply逻辑
self.questioned = False
# 刷新主动发言状态
# 重置连续 no_reply 计数
self.consecutive_no_reply_count = 0
reason = action_planner_info.reasoning or ""
# 根据 think_mode 配置决定 think_level 的值
think_mode = global_config.chat.think_mode
if think_mode == "default":
think_level = 0
elif think_mode == "deep":
think_level = 1
elif think_mode == "dynamic":
# dynamic 模式:从 planner 返回的 action_data 中获取
think_level = action_planner_info.action_data.get("think_level", 1)
else:
# 默认使用 default 模式
think_level = 0
# 使用 action_reasoningplanner 的整体思考理由)作为 reply_reason
planner_reasoning = action_planner_info.action_reasoning or reason
record_replyer_action_temp(
chat_id=self.session_id,
reason=reason,
think_level=think_level,
)
await database_api.store_action_info(
chat_stream=self.chat_stream,
action_build_into_prompt=False,
action_prompt_display=reason,
action_done=True,
thinking_id=thinking_id,
action_data={},
action_name="reply",
action_reasoning=reason,
)
# 从 Planner 的 action_data 中提取未知词语列表(仅在 reply 时使用)
unknown_words = None
quote_message = None
if isinstance(action_planner_info.action_data, dict):
uw = action_planner_info.action_data.get("unknown_words")
if isinstance(uw, list):
cleaned_uw: List[str] = []
for item in uw:
if isinstance(item, str):
s = item.strip()
if s:
cleaned_uw.append(s)
if cleaned_uw:
unknown_words = cleaned_uw
# 从 Planner 的 action_data 中提取 quote_message 参数
qm = action_planner_info.action_data.get("quote")
if qm is not None:
# 支持多种格式true/false, "true"/"false", 1/0
if isinstance(qm, bool):
quote_message = qm
elif isinstance(qm, str):
quote_message = qm.lower() in ("true", "1", "yes")
elif isinstance(qm, (int, float)):
quote_message = bool(qm)
logger.info(f"{self.log_prefix} {qm}引用回复设置: {quote_message}")
success, llm_response = await generator_api.generate_reply(
chat_stream=self.chat_stream,
reply_message=action_planner_info.action_message,
available_actions=available_actions,
chosen_actions=chosen_action_plan_infos,
reply_reason=planner_reasoning,
unknown_words=unknown_words,
enable_tool=global_config.tool.enable_tool,
request_type="replyer",
from_plugin=False,
reply_time_point=action_planner_info.action_data.get("loop_start_time", time.time()),
think_level=think_level,
)
if not success or not llm_response or not llm_response.reply_set:
if action_planner_info.action_message:
logger.info(f"{action_planner_info.action_message.processed_plain_text} 的回复生成失败")
else:
logger.info("回复生成失败")
return {"action_type": "reply", "success": False, "result": "回复生成失败", "loop_info": None}
response_set = llm_response.reply_set
selected_expressions = llm_response.selected_expressions
loop_info, reply_text, _ = await self._send_and_store_reply(
response_set=response_set,
action_message=action_planner_info.action_message, # type: ignore
cycle_timers=cycle_timers,
thinking_id=thinking_id,
actions=chosen_action_plan_infos,
selected_expressions=selected_expressions,
quote_message=quote_message,
)
self.last_active_time = time.time()
return {
"action_type": "reply",
"success": True,
"result": f"你使用reply动作' {action_planner_info.action_message.processed_plain_text} '这句话进行了回复,回复内容为: '{reply_text}'",
"loop_info": loop_info,
}
else:
# 执行普通动作
with Timer("动作执行", cycle_timers):
success, result = await self._handle_action(
action=action_planner_info.action_type,
action_reasoning=action_planner_info.action_reasoning or "",
action_data=action_planner_info.action_data or {},
cycle_timers=cycle_timers,
thinking_id=thinking_id,
action_message=action_planner_info.action_message,
)
self.last_active_time = time.time()
return {
"action_type": action_planner_info.action_type,
"success": success,
"result": result,
}
except Exception as e:
logger.error(f"{self.log_prefix} 执行动作时出错: {e}")
logger.error(f"{self.log_prefix} 错误信息: {traceback.format_exc()}")
return {
"action_type": action_planner_info.action_type,
"success": False,
"result": "",
"loop_info": None,
"error": str(e),
}
class TempMethodsHFC:
@staticmethod
def get_talk_value(chat_id: Optional[str]) -> float:
result = global_config.chat.talk_value or 0.0000001
if not global_config.chat.enable_talk_value_rules or not global_config.chat.talk_value_rules:
return result
import time
local_time = time.localtime()
now_min = local_time.tm_hour * 60 + local_time.tm_min
# 先处理特定规则
if chat_id:
for rule in global_config.chat.talk_value_rules:
if not rule.platform and not rule.item_id:
continue # 一起留空表示全局,跳过
is_group = rule.rule_type == "group"
from src.chat.message_receive.chat_stream import get_chat_manager
stream_id = get_chat_manager().get_stream_id(rule.platform, str(rule.item_id), is_group)
if stream_id != chat_id:
continue
parsed_range = TempMethodsHFC._parse_range(rule.time)
if not parsed_range:
continue
start_min, end_min = parsed_range
in_range: bool = False
if start_min <= end_min:
in_range = start_min <= now_min <= end_min
else:
in_range = now_min >= start_min or now_min <= end_min
if in_range:
return rule.value or 0.0
# 再处理全局规则
for rule in global_config.chat.talk_value_rules:
if rule.platform or rule.item_id:
continue # 有指定表示特定,跳过
parsed_range = TempMethodsHFC._parse_range(rule.time)
if not parsed_range:
continue
start_min, end_min = parsed_range
in_range: bool = False
if start_min <= end_min:
in_range = start_min <= now_min <= end_min
else:
in_range = now_min >= start_min or now_min <= end_min
if in_range:
return rule.value or 0.0000001
return result
@staticmethod
def _parse_range(range_str: str) -> Optional[tuple[int, int]]:
"""解析 "HH:MM-HH:MM" 到 (start_min, end_min)。"""
try:
start_str, end_str = [s.strip() for s in range_str.split("-")]
sh, sm = [int(x) for x in start_str.split(":")]
eh, em = [int(x) for x in end_str.split(":")]
return sh * 60 + sm, eh * 60 + em
except Exception:
return None

View File

@@ -12,13 +12,14 @@ from src.common.message_repository import count_messages
logger = get_logger(__name__)
@dataclass
class CyclePlanInfo:
...
class CyclePlanInfo: ...
@dataclass
class CycleActionInfo:
...
class CycleActionInfo: ...
class CycleDetail:
"""循环信息记录类"""

View File

@@ -0,0 +1,561 @@
import time
import asyncio
import urllib3
from abc import abstractmethod
from dataclasses import dataclass
from rich.traceback import install
from typing import Optional, Any, List
from maim_message import Seg, UserInfo, BaseMessageInfo, MessageBase
from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.utils.utils_image import get_image_manager
from src.common.utils.utils_voice import get_voice_text
from .chat_stream import ChatStream
install(extra_lines=3)
logger = get_logger("chat_message")
# 禁用SSL警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# VLM 处理并发限制(避免同时处理太多图片导致卡死)
_vlm_semaphore = asyncio.Semaphore(3)
# 这个类是消息数据类,用于存储和管理消息数据。
# 它定义了消息的属性包括群组ID、用户ID、消息ID、原始消息内容、纯文本内容和时间戳。
# 它还定义了两个辅助属性keywords用于提取消息的关键词is_plain_text用于判断消息是否为纯文本。
@dataclass
class Message(MessageBase):
chat_stream: "ChatStream" = None # type: ignore
reply: Optional["Message"] = None
processed_plain_text: str = ""
def __init__(
self,
message_id: str,
chat_stream: "ChatStream",
user_info: UserInfo,
message_segment: Optional[Seg] = None,
timestamp: Optional[float] = None,
reply: Optional["MessageRecv"] = None,
processed_plain_text: str = "",
):
# 使用传入的时间戳或当前时间
current_timestamp = timestamp if timestamp is not None else round(time.time(), 3)
# 构造基础消息信息
message_info = BaseMessageInfo(
platform=chat_stream.platform,
message_id=message_id,
time=current_timestamp,
group_info=chat_stream.group_info,
user_info=user_info,
)
# 调用父类初始化
super().__init__(message_info=message_info, message_segment=message_segment, raw_message=None) # type: ignore
self.chat_stream = chat_stream
# 文本处理相关属性
self.processed_plain_text = processed_plain_text
# 回复消息
self.reply = reply
# async def _process_message_segments(self, segment: Seg) -> str:
# # sourcery skip: remove-unnecessary-else, swap-if-else-branches
# """递归处理消息段,转换为文字描述
# Args:
# segment: 要处理的消息段
# Returns:
# str: 处理后的文本
# """
# if segment.type == "seglist":
# # 处理消息段列表 - 使用并行处理提升性能
# tasks = [self._process_message_segments(seg) for seg in segment.data] # type: ignore
# results = await asyncio.gather(*tasks, return_exceptions=True)
# segments_text = []
# for result in results:
# if isinstance(result, Exception):
# logger.error(f"处理消息段时出错: {result}")
# continue
# if result:
# segments_text.append(result)
# return " ".join(segments_text)
# elif segment.type == "forward":
# # 处理转发消息 - 使用并行处理
# async def process_forward_node(node_dict):
# message = MessageBase.from_dict(node_dict) # type: ignore
# processed_text = await self._process_message_segments(message.message_segment)
# if processed_text:
# return f"{global_config.bot.nickname}: {processed_text}"
# return None
# tasks = [process_forward_node(node_dict) for node_dict in segment.data]
# results = await asyncio.gather(*tasks, return_exceptions=True)
# segments_text = []
# for result in results:
# if isinstance(result, Exception):
# logger.error(f"处理转发节点时出错: {result}")
# continue
# if result:
# segments_text.append(result)
# return "[合并消息]: " + "\n-- ".join(segments_text)
# else:
# # 处理单个消息段
# return await self._process_single_segment(segment) # type: ignore
# @abstractmethod
# async def _process_single_segment(self, segment) -> str:
# pass
@dataclass
class MessageRecv(Message):
"""接收消息类用于处理从MessageCQ序列化的消息"""
def __init__(self, message_dict: dict[str, Any]):
"""从MessageCQ的字典初始化
Args:
message_dict: MessageCQ序列化后的字典
"""
self.message_info = BaseMessageInfo.from_dict(message_dict.get("message_info", {}))
self.message_segment = Seg.from_dict(message_dict.get("message_segment", {}))
self.raw_message = message_dict.get("raw_message")
self.processed_plain_text = message_dict.get("processed_plain_text", "")
self.is_emoji = False
self.has_emoji = False
self.is_picid = False
self.has_picid = False
self.is_voice = False
self.is_mentioned = None
self.is_at = False
self.reply_probability_boost = 0.0
self.is_notify = False
self.is_command = False
self.intercept_message_level = 0
self.priority_mode = "interest"
self.priority_info = None
self.interest_value: float = None # type: ignore
self.key_words = []
self.key_words_lite = []
# 兼容适配器通过 additional_config 传入的 @ 标记
try:
msg_info_dict = message_dict.get("message_info", {})
add_cfg = msg_info_dict.get("additional_config") or {}
if isinstance(add_cfg, dict) and add_cfg.get("at_bot"):
# 标记为被提及,提高后续回复优先级
self.is_mentioned = True # type: ignore
except Exception:
pass
def update_chat_stream(self, chat_stream: "ChatStream"):
self.chat_stream = chat_stream
# async def process(self) -> None:
# """处理消息内容,生成纯文本和详细文本
# 这个方法必须在创建实例后显式调用,因为它包含异步操作。
# """
# # print(f"self.message_segment: {self.message_segment}")
# self.processed_plain_text = await self._process_message_segments(self.message_segment)
# async def _process_single_segment(self, segment: Seg) -> str:
# """处理单个消息段
# Args:
# segment: 消息段
# Returns:
# str: 处理后的文本
# """
# try:
# if segment.type == "text":
# self.is_picid = False
# self.is_emoji = False
# return segment.data # type: ignore
# elif segment.type == "image":
# # 如果是base64图片数据
# if isinstance(segment.data, str):
# self.has_picid = True
# self.is_picid = True
# self.is_emoji = False
# image_manager = get_image_manager()
# # 使用 semaphore 限制 VLM 并发,避免同时处理太多图片
# async with _vlm_semaphore:
# _, processed_text = await image_manager.process_image(segment.data)
# return processed_text
# return "[发了一张图片,网卡了加载不出来]"
# elif segment.type == "emoji":
# self.has_emoji = True
# self.is_emoji = True
# self.is_picid = False
# self.is_voice = False
# if isinstance(segment.data, str):
# # 使用 semaphore 限制 VLM 并发
# async with _vlm_semaphore:
# return await get_image_manager().get_emoji_description(segment.data)
# return "[发了一个表情包,网卡了加载不出来]"
# elif segment.type == "voice":
# self.is_picid = False
# self.is_emoji = False
# self.is_voice = True
# if isinstance(segment.data, str):
# return await get_voice_text(segment.data)
# return "[发了一段语音,网卡了加载不出来]"
# elif segment.type == "mention_bot":
# self.is_picid = False
# self.is_emoji = False
# self.is_voice = False
# self.is_mentioned = float(segment.data) # type: ignore
# return ""
# elif segment.type == "priority_info":
# self.is_picid = False
# self.is_emoji = False
# self.is_voice = False
# if isinstance(segment.data, dict):
# # 处理优先级信息
# self.priority_mode = "priority"
# self.priority_info = segment.data
# """
# {
# 'message_type': 'vip', # vip or normal
# 'message_priority': 1.0, # 优先级大为优先float
# }
# """
# return ""
# elif segment.type == "video_card":
# # 处理视频卡片消息
# self.is_picid = False
# self.is_emoji = False
# self.is_voice = False
# if isinstance(segment.data, dict):
# file_name = segment.data.get("file", "未知视频")
# file_size = segment.data.get("file_size", "")
# url = segment.data.get("url", "")
# text = f"[视频: {file_name}"
# if file_size:
# text += f", 大小: {file_size}字节"
# text += "]"
# if url:
# text += f" 链接: {url}"
# return text
# return "[视频]"
# elif segment.type == "music_card":
# # 处理音乐卡片消息
# self.is_picid = False
# self.is_emoji = False
# self.is_voice = False
# if isinstance(segment.data, dict):
# title = segment.data.get("title", "未知歌曲")
# singer = segment.data.get("singer", "")
# tag = segment.data.get("tag", "") # 音乐来源,如"网易云音乐"
# jump_url = segment.data.get("jump_url", "")
# music_url = segment.data.get("music_url", "")
# text = f"[音乐: {title}"
# if singer:
# text += f" - {singer}"
# if tag:
# text += f" ({tag})"
# text += "]"
# if jump_url:
# text += f" 跳转链接: {jump_url}"
# if music_url:
# text += f" 音乐链接: {music_url}"
# return text
# return "[音乐]"
# elif segment.type == "miniapp_card":
# # 处理小程序分享卡片如B站视频分享
# self.is_picid = False
# self.is_emoji = False
# self.is_voice = False
# if isinstance(segment.data, dict):
# title = segment.data.get("title", "") # 小程序名称
# desc = segment.data.get("desc", "") # 内容描述
# source_url = segment.data.get("source_url", "") # 原始链接
# url = segment.data.get("url", "") # 小程序链接
# text = "[小程序分享"
# if title:
# text += f" - {title}"
# text += "]"
# if desc:
# text += f" {desc}"
# if source_url:
# text += f" 链接: {source_url}"
# elif url:
# text += f" 链接: {url}"
# return text
# return "[小程序分享]"
# else:
# return ""
# except Exception as e:
# logger.error(f"处理消息段失败: {str(e)}, 类型: {segment.type}, 数据: {segment.data}")
# return f"[处理失败的{segment.type}消息]"
@dataclass
class MessageProcessBase(Message):
"""消息处理基类,用于处理中和发送中的消息"""
def __init__(
self,
message_id: str,
chat_stream: "ChatStream",
bot_user_info: UserInfo,
message_segment: Optional[Seg] = None,
reply: Optional["MessageRecv"] = None,
thinking_start_time: float = 0,
timestamp: Optional[float] = None,
):
# 调用父类初始化,传递时间戳
super().__init__(
message_id=message_id,
timestamp=timestamp,
chat_stream=chat_stream,
user_info=bot_user_info,
message_segment=message_segment,
reply=reply,
)
# 处理状态相关属性
self.thinking_start_time = thinking_start_time
self.thinking_time = 0
# def update_thinking_time(self) -> float:
# """更新思考时间"""
# self.thinking_time = round(time.time() - self.thinking_start_time, 2)
# return self.thinking_time
# async def _process_single_segment(self, segment: Seg) -> str:
# """处理单个消息段
# Args:
# segment: 要处理的消息段
# Returns:
# str: 处理后的文本
# """
# try:
# if segment.type == "text":
# return segment.data # type: ignore
# elif segment.type == "image":
# # 如果是base64图片数据
# if isinstance(segment.data, str):
# return await get_image_manager().get_image_description(segment.data)
# return "[图片,网卡了加载不出来]"
# elif segment.type == "emoji":
# if isinstance(segment.data, str):
# return await get_image_manager().get_emoji_tag(segment.data)
# return "[表情,网卡了加载不出来]"
# elif segment.type == "voice":
# if isinstance(segment.data, str):
# return await get_voice_text(segment.data)
# return "[发了一段语音,网卡了加载不出来]"
# elif segment.type == "at":
# return f"[@{segment.data}]"
# elif segment.type == "reply":
# if self.reply and hasattr(self.reply, "processed_plain_text"):
# # print(f"self.reply.processed_plain_text: {self.reply.processed_plain_text}")
# # print(f"reply: {self.reply}")
# return f"[回复<{self.reply.message_info.user_info.user_nickname}:{self.reply.message_info.user_info.user_id}> 的消息:{self.reply.processed_plain_text}]" # type: ignore
# return ""
# else:
# return f"[{segment.type}:{str(segment.data)}]"
# except Exception as e:
# logger.error(f"处理消息段失败: {str(e)}, 类型: {segment.type}, 数据: {segment.data}")
# return f"[处理失败的{segment.type}消息]"
# def _generate_detailed_text(self) -> str:
# """生成详细文本,包含时间和用户信息"""
# # time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
# timestamp = self.message_info.time
# user_info = self.message_info.user_info
# name = f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>" # type: ignore
# return f"[{timestamp}]{name} 说:{self.processed_plain_text}\n"
@dataclass
class MessageSending(MessageProcessBase):
"""发送状态的消息类"""
def __init__(
self,
message_id: str,
chat_stream: "ChatStream",
bot_user_info: UserInfo,
sender_info: UserInfo | None, # 用来记录发送者信息
message_segment: Seg,
display_message: str = "",
reply: Optional["MessageRecv"] = None,
is_head: bool = False,
is_emoji: bool = False,
thinking_start_time: float = 0,
apply_set_reply_logic: bool = False,
reply_to: Optional[str] = None,
selected_expressions: Optional[List[int]] = None,
):
# 调用父类初始化
super().__init__(
message_id=message_id,
chat_stream=chat_stream,
bot_user_info=bot_user_info,
message_segment=message_segment,
reply=reply,
thinking_start_time=thinking_start_time,
)
# 发送状态特有属性
self.sender_info = sender_info
self.reply_to_message_id = reply.message_info.message_id if reply else None
self.is_head = is_head
self.is_emoji = is_emoji
self.apply_set_reply_logic = apply_set_reply_logic
self.reply_to = reply_to
# 用于显示发送内容与显示不一致的情况
self.display_message = display_message
self.interest_value = 0.0
self.selected_expressions = selected_expressions
def build_reply(self):
"""设置回复消息"""
if self.reply:
self.reply_to_message_id = self.reply.message_info.message_id
self.message_segment = Seg(
type="seglist",
data=[
Seg(type="reply", data=self.reply.message_info.message_id), # type: ignore
self.message_segment,
],
)
async def process(self) -> None:
"""处理消息内容,生成纯文本和详细文本"""
if self.message_segment:
self.processed_plain_text = await self._process_message_segments(self.message_segment)
# def to_dict(self):
# ret = super().to_dict()
# ret["message_info"]["user_info"] = self.chat_stream.user_info.to_dict()
# return ret
# def is_private_message(self) -> bool:
# """判断是否为私聊消息"""
# return self.message_info.group_info is None or self.message_info.group_info.group_id is None
# @dataclass
# class MessageSet:
# """消息集合类,可以存储多个发送消息"""
# def __init__(self, chat_stream: "ChatStream", message_id: str):
# self.chat_stream = chat_stream
# self.message_id = message_id
# self.messages: list[MessageSending] = []
# self.time = round(time.time(), 3) # 保留3位小数
# def add_message(self, message: MessageSending) -> None:
# """添加消息到集合"""
# if not isinstance(message, MessageSending):
# raise TypeError("MessageSet只能添加MessageSending类型的消息")
# self.messages.append(message)
# self.messages.sort(key=lambda x: x.message_info.time) # type: ignore
# def get_message_by_index(self, index: int) -> Optional[MessageSending]:
# """通过索引获取消息"""
# return self.messages[index] if 0 <= index < len(self.messages) else None
# def get_message_by_time(self, target_time: float) -> Optional[MessageSending]:
# """获取最接近指定时间的消息"""
# if not self.messages:
# return None
# left, right = 0, len(self.messages) - 1
# while left < right:
# mid = (left + right) // 2
# if self.messages[mid].message_info.time < target_time: # type: ignore
# left = mid + 1
# else:
# right = mid
# return self.messages[left]
# def clear_messages(self) -> None:
# """清空所有消息"""
# self.messages.clear()
# def remove_message(self, message: MessageSending) -> bool:
# """移除指定消息"""
# if message in self.messages:
# self.messages.remove(message)
# return True
# return False
# def __str__(self) -> str:
# return f"MessageSet(id={self.message_id}, count={len(self.messages)})"
# def __len__(self) -> int:
# return len(self.messages)
# def message_recv_from_dict(message_dict: dict) -> MessageRecv:
# return MessageRecv(message_dict)
# def message_from_db_dict(db_dict: dict) -> MessageRecv:
# """从数据库字典创建MessageRecv实例"""
# # 转换扁平的数据库字典为嵌套结构
# message_info_dict = {
# "platform": db_dict.get("chat_info_platform"),
# "message_id": db_dict.get("message_id"),
# "time": db_dict.get("time"),
# "group_info": {
# "platform": db_dict.get("chat_info_group_platform"),
# "group_id": db_dict.get("chat_info_group_id"),
# "group_name": db_dict.get("chat_info_group_name"),
# },
# "user_info": {
# "platform": db_dict.get("user_platform"),
# "user_id": db_dict.get("user_id"),
# "user_nickname": db_dict.get("user_nickname"),
# "user_cardname": db_dict.get("user_cardname"),
# },
# }
# processed_text = db_dict.get("processed_plain_text", "")
# # 构建 MessageRecv 需要的字典
# recv_dict = {
# "message_info": message_info_dict,
# "message_segment": {"type": "text", "data": processed_text}, # 从纯文本重建消息段
# "raw_message": None, # 数据库中未存储原始消息
# "processed_plain_text": processed_text,
# }
# # 创建 MessageRecv 实例
# msg = MessageRecv(recv_dict)
# # 从数据库字典中填充其他可选字段
# msg.interest_value = db_dict.get("interest_value", 0.0)
# msg.is_mentioned = db_dict.get("is_mentioned")
# msg.priority_mode = db_dict.get("priority_mode", "interest")
# msg.priority_info = db_dict.get("priority_info")
# msg.is_emoji = db_dict.get("is_emoji", False)
# msg.is_picid = db_dict.get("is_picid", False)
# return msg

View File

@@ -7,7 +7,7 @@ from maim_message import Seg
from src.common.message_server.api import get_global_api
from src.common.logger import get_logger
from src.common.database.database import get_db_session
from src.chat.message_receive.message import MessageSending
from src.chat.message_receive.message_old import MessageSending
from src.chat.utils.utils import truncate_message
from src.chat.utils.utils import calculate_typing_time

View File

@@ -12,11 +12,8 @@ from src.common.data_models.info_data_model import ActionPlannerInfo
from src.common.data_models.llm_data_model import LLMGenerationDataModel
from src.config.config import global_config, model_config
from src.llm_models.utils_model import LLMRequest
from maim_message import Seg
from src.common.data_models.mai_message_data_model import MaiMessage, UserInfo
from src.chat.message_receive.message import MessageSending
from src.chat.message_receive.chat_manager import BotChatSession
from src.chat.message_receive.message_old import UserInfo, Seg, MessageRecv, MessageSending
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.message_receive.uni_message_sender import UniversalMessageSender
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
from src.chat.utils.utils import get_chat_type_and_target_info, is_bot_self
@@ -27,16 +24,16 @@ from src.chat.utils.chat_message_builder import (
replace_user_references,
)
from src.bw_learner.expression_selector import expression_selector
from src.services.message_service import translate_pid_to_description
from src.plugin_system.apis.message_api import translate_pid_to_description
# from src.memory_system.memory_activator import MemoryActivator
from src.person_info.person_info import Person
from src.core.types import ActionInfo, EventType
from src.services import llm_service as llm_api
from src.plugin_system.base.component_types import ActionInfo, EventType
from src.plugin_system.apis import llm_api
from src.chat.logger.plan_reply_logger import PlanReplyLogger
from src.memory_system.memory_retrieval import init_memory_retrieval_sys, build_memory_retrieval_prompt
from src.bw_learner.jargon_explainer import explain_jargon_in_context, retrieve_concepts_with_jargon
from src.bw_learner.jargon_explainer_old import explain_jargon_in_context, retrieve_concepts_with_jargon
from src.chat.utils.common_utils import TempMethodsExpression
init_memory_retrieval_sys()
@@ -48,17 +45,17 @@ logger = get_logger("replyer")
class DefaultReplyer:
def __init__(
self,
chat_stream: BotChatSession,
chat_stream: ChatStream,
request_type: str = "replyer",
):
self.express_model = LLMRequest(model_set=model_config.model_task_config.replyer, request_type=request_type)
self.chat_stream = chat_stream
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.session_id)
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id)
self.heart_fc_sender = UniversalMessageSender()
from src.chat.tool_executor import ToolExecutor
from src.plugin_system.core.tool_use import ToolExecutor # 延迟导入ToolExecutor不然会循环依赖
self.tool_executor = ToolExecutor(chat_id=self.chat_stream.session_id, enable_cache=True, cache_ttl=3)
self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id, enable_cache=True, cache_ttl=3)
async def generate_reply_with_context(
self,
@@ -135,7 +132,7 @@ class DefaultReplyer:
if log_reply:
try:
PlanReplyLogger.log_reply(
chat_id=self.chat_stream.session_id,
chat_id=self.chat_stream.stream_id,
prompt="",
output=None,
processed_output=None,
@@ -149,13 +146,11 @@ class DefaultReplyer:
except Exception:
logger.exception("记录reply日志失败")
return False, llm_response
from src.core.event_bus import event_bus
from src.chat.event_helpers import build_event_message
from src.plugin_system.core.events_manager import events_manager
if not from_plugin:
_event_msg = build_event_message(EventType.POST_LLM, llm_prompt=prompt, stream_id=stream_id)
continue_flag, modified_message = await event_bus.emit(
EventType.POST_LLM, _event_msg
continue_flag, modified_message = await events_manager.handle_mai_events(
EventType.POST_LLM, None, prompt, None, stream_id=stream_id
)
if not continue_flag:
raise UserWarning("插件于请求前中断了内容生成")
@@ -207,7 +202,7 @@ class DefaultReplyer:
try:
if log_reply:
PlanReplyLogger.log_reply(
chat_id=self.chat_stream.session_id,
chat_id=self.chat_stream.stream_id,
prompt=prompt,
output=content,
processed_output=None,
@@ -219,9 +214,8 @@ class DefaultReplyer:
)
except Exception:
logger.exception("记录reply日志失败")
_event_msg = build_event_message(EventType.AFTER_LLM, llm_prompt=prompt, llm_response=llm_response, stream_id=stream_id)
continue_flag, modified_message = await event_bus.emit(
EventType.AFTER_LLM, _event_msg
continue_flag, modified_message = await events_manager.handle_mai_events(
EventType.AFTER_LLM, None, prompt, llm_response, stream_id=stream_id
)
if not from_plugin and not continue_flag:
raise UserWarning("插件于请求后取消了内容生成")
@@ -265,7 +259,7 @@ class DefaultReplyer:
if log_reply:
try:
PlanReplyLogger.log_reply(
chat_id=self.chat_stream.session_id,
chat_id=self.chat_stream.stream_id,
prompt=prompt or "",
output=None,
processed_output=None,
@@ -359,14 +353,14 @@ class DefaultReplyer:
str: 表达习惯信息字符串
"""
# 检查是否允许在此聊天流中使用表达
use_expression, _, _ = TempMethodsExpression.get_expression_config_for_chat(self.chat_stream.session_id)
use_expression, _, _ = TempMethodsExpression.get_expression_config_for_chat(self.chat_stream.stream_id)
if not use_expression:
return "", []
style_habits = []
# 使用从处理器传来的选中表达方式
# 使用模型预测选择表达方式
selected_expressions, selected_ids = await expression_selector.select_suitable_expressions(
self.chat_stream.session_id,
self.chat_stream.stream_id,
chat_history,
max_num=8,
target_message=target,
@@ -708,11 +702,10 @@ class DefaultReplyer:
# 判断是否为群聊
is_group = stream_type == "group"
from src.common.utils.utils_session import SessionUtils
# 使用 ChatManager 提供的接口生成 chat_id避免在此重复实现逻辑
from src.chat.message_receive.chat_stream import get_chat_manager
chat_id = SessionUtils.calculate_session_id(
platform, group_id=str(id_str) if is_group else None, user_id=str(id_str) if not is_group else None
)
chat_id = get_chat_manager().get_stream_id(platform, str(id_str), is_group=is_group)
return chat_id, prompt_content
except (ValueError, IndexError):
@@ -785,7 +778,7 @@ class DefaultReplyer:
if available_actions is None:
available_actions = {}
chat_stream = self.chat_stream
chat_id = chat_stream.session_id
chat_id = chat_stream.stream_id
_is_group_chat = bool(chat_stream.group_info)
platform = chat_stream.platform
@@ -1012,7 +1005,7 @@ class DefaultReplyer:
reply_to: str,
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
chat_stream = self.chat_stream
chat_id = chat_stream.session_id
chat_id = chat_stream.stream_id
sender, target = self._parse_reply_target(reply_to)
target = replace_user_references(target, chat_stream.platform, replace_bot_name=True)
@@ -1112,27 +1105,29 @@ class DefaultReplyer:
is_emoji: bool,
thinking_start_time: float,
display_message: str,
anchor_message: Optional[MaiMessage] = None,
anchor_message: Optional[MessageRecv] = None,
) -> MessageSending:
"""构建单个发送消息"""
bot_user_info = UserInfo(
user_id=str(global_config.bot.qq_account),
user_nickname=global_config.bot.nickname,
platform=self.chat_stream.platform,
)
# await anchor_message.process()
sender_info = anchor_message.message_info.user_info if anchor_message else None
return MessageSending(
message_id=message_id,
session=self.chat_stream,
message_id=message_id, # 使用片段的唯一ID
chat_stream=self.chat_stream,
bot_user_info=bot_user_info,
sender_info=sender_info,
message_segment=message_segment,
reply=anchor_message,
reply=anchor_message, # 回复原始锚点
is_head=reply_to,
is_emoji=is_emoji,
thinking_start_time=thinking_start_time,
thinking_start_time=thinking_start_time, # 传递原始思考开始时间
display_message=display_message,
)

View File

@@ -15,7 +15,7 @@ from src.llm_models.utils_model import LLMRequest
from maim_message import Seg
from src.common.data_models.mai_message_data_model import MaiMessage, UserInfo
from src.chat.message_receive.message import MessageSending
from src.chat.message_receive.message_old import MessageSending
from src.chat.message_receive.chat_manager import BotChatSession
from src.chat.message_receive.uni_message_sender import UniversalMessageSender
from src.chat.utils.timer_calculator import Timer
@@ -35,7 +35,7 @@ from src.person_info.person_info import Person, is_person_known
from src.core.types import ActionInfo, EventType
from src.services import llm_service as llm_api
from src.memory_system.memory_retrieval import init_memory_retrieval_sys, build_memory_retrieval_prompt
from src.bw_learner.jargon_explainer import explain_jargon_in_context
from src.bw_learner.jargon_explainer_old import explain_jargon_in_context
init_memory_retrieval_sys()