This commit is contained in:
SengokuCola
2025-09-13 14:01:22 +08:00
11 changed files with 365 additions and 236 deletions

View File

@@ -108,9 +108,9 @@ class HeartFChatting:
self._current_cycle_detail: CycleDetail = None # type: ignore
self.last_read_time = time.time() - 10
self.talk_threshold = global_config.chat.talk_value
self.no_reply_until_call = False
async def start(self):
@@ -172,7 +172,7 @@ class HeartFChatting:
f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}" # type: ignore
+ (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "")
)
def get_talk_threshold(self):
talk_value = global_config.chat.talk_value
# 处理talk_value取整数部分和小数部分
@@ -183,7 +183,7 @@ class HeartFChatting:
self.talk_threshold = think_len
logger.info(f"{self.log_prefix} 思考频率阈值: {self.talk_threshold}")
async def _loopbody(self):
async def _loopbody(self): # sourcery skip: hoist-if-from-if
recent_messages_list = message_api.get_messages_by_time_in_chat(
chat_id=self.stream_id,
start_time=self.last_read_time,
@@ -195,11 +195,15 @@ class HeartFChatting:
)
if len(recent_messages_list) >= self.talk_threshold:
# !处理no_reply_until_call逻辑
if self.no_reply_until_call:
for message in recent_messages_list:
if message.is_mentioned or message.is_at or len(recent_messages_list) >= 8 or time.time() - self.last_read_time > 600:
if (
message.is_mentioned
or message.is_at
or len(recent_messages_list) >= 8
or time.time() - self.last_read_time > 600
):
self.no_reply_until_call = False
break
# 没有提到,继续保持沉默
@@ -207,8 +211,7 @@ class HeartFChatting:
# logger.info(f"{self.log_prefix} 没有提到,继续保持沉默")
await asyncio.sleep(1)
return True
self.last_read_time = time.time()
await self._observe(
recent_messages_list=recent_messages_list,
@@ -271,9 +274,9 @@ class HeartFChatting:
return loop_info, reply_text, cycle_timers
async def _observe(
self, # interest_value: float = 0.0,
recent_messages_list: Optional[List["DatabaseMessages"]] = None
) -> bool:
self, # interest_value: float = 0.0,
recent_messages_list: Optional[List["DatabaseMessages"]] = None,
) -> bool: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
if recent_messages_list is None:
recent_messages_list = []
reply_text = "" # 初始化reply_text变量避免UnboundLocalError
@@ -283,7 +286,7 @@ class HeartFChatting:
async with global_prompt_manager.async_message_scope(self.chat_stream.context.get_template_name()):
await self.expression_learner.trigger_learning_for_chat()
cycle_timers, thinking_id = self.start_cycle()
logger.info(f"{self.log_prefix} 开始第{self._cycle_counter}次思考")
@@ -326,27 +329,25 @@ class HeartFChatting:
return False
if modified_message and modified_message._modify_flags.modify_llm_prompt:
prompt_info = (modified_message.llm_prompt, prompt_info[1])
with Timer("规划器", cycle_timers):
action_to_use_info, _ = await self.action_planner.plan(
loop_start_time=self.last_read_time,
available_actions=available_actions,
)
# !此处使at或者提及必定回复
metioned_message = None
for message in recent_messages_list:
if (message.is_mentioned or message.is_at) and global_config.chat.mentioned_bot_reply:
metioned_message = message
has_reply = False
for action in action_to_use_info:
if action.action_type == "reply":
has_reply =True
has_reply = True
break
if not has_reply and metioned_message:
action_to_use_info.append(
ActionPlannerInfo(
@@ -357,7 +358,6 @@ class HeartFChatting:
available_actions=available_actions,
)
)
# 3. 并行执行所有动作
action_tasks = [
@@ -521,10 +521,9 @@ class HeartFChatting:
reply_text = ""
first_replied = False
for reply_content in reply_set.reply_data:
if reply_content.content_type != ReplyContentType.TEXT:
continue
data: str = reply_content.content # type: ignore
data: str = reply_content.content # type: ignore
if not first_replied:
await send_api.text_to_stream(
text=data,
@@ -574,17 +573,18 @@ class HeartFChatting:
action_name="no_action",
)
return {"action_type": "no_action", "success": True, "reply_text": "", "command": ""}
elif action_planner_info.action_type == "wait_time":
action_planner_info.action_data = action_planner_info.action_data or {}
logger.info(f"{self.log_prefix} 等待{action_planner_info.action_data['time']}秒后回复")
await asyncio.sleep(action_planner_info.action_data["time"])
return {"action_type": "wait_time", "success": True, "reply_text": "", "command": ""}
elif action_planner_info.action_type == "no_reply_until_call":
logger.info(f"{self.log_prefix} 保持沉默,直到有人直接叫的名字")
self.no_reply_until_call = True
return {"action_type": "no_reply_until_call", "success": True, "reply_text": "", "command": ""}
elif action_planner_info.action_type == "reply":
try:
success, llm_response = await generator_api.generate_reply(
@@ -624,7 +624,7 @@ class HeartFChatting:
"reply_text": reply_text,
"loop_info": loop_info,
}
# 其他动作
else:
# 执行普通动作
@@ -643,7 +643,7 @@ class HeartFChatting:
"reply_text": reply_text,
"command": command,
}
except Exception as e:
logger.error(f"{self.log_prefix} 执行动作时出错: {e}")
logger.error(f"{self.log_prefix} 错误信息: {traceback.format_exc()}")

View File

@@ -10,7 +10,6 @@ from src.chat.message_receive.message import MessageRecv
from src.chat.message_receive.storage import MessageStorage
from src.chat.heart_flow.heartflow import heartflow
from src.chat.utils.utils import is_mentioned_bot_in_message
from src.chat.utils.timer_calculator import Timer
from src.chat.utils.chat_message_builder import replace_user_references
from src.common.logger import get_logger
from src.mood.mood_manager import mood_manager
@@ -36,7 +35,7 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, list[str]]:
return 0.0, []
is_mentioned, is_at, reply_probability_boost = is_mentioned_bot_in_message(message)
interested_rate = 0.0
# interested_rate = 0.0
keywords = []
message.interest_value = 1
@@ -113,10 +112,10 @@ class HeartFCMessageReceiver:
logger.info(f"[{mes_name}]{userinfo.user_nickname}:{processed_plain_text}[{interested_rate:.2f}]") # type: ignore
_ = Person.register_person(
platform=message.message_info.platform,
user_id=message.message_info.user_info.user_id,
nickname=userinfo.user_nickname,
) # type: ignore
platform=message.message_info.platform, # type: ignore
user_id=message.message_info.user_info.user_id, # type: ignore
nickname=userinfo.user_nickname, # type: ignore
)
except Exception as e:
logger.error(f"消息处理失败: {e}")

View File

@@ -22,12 +22,12 @@ from src.chat.utils.chat_message_builder import (
from src.chat.utils.utils import get_chat_type_and_target_info
from src.chat.planner_actions.action_manager import ActionManager
from src.chat.message_receive.chat_stream import get_chat_manager
from src.plugin_system.base.component_types import ActionInfo, ChatMode, ComponentType, ActionActivationType
from src.plugin_system.base.component_types import ActionInfo, ComponentType, ActionActivationType
from src.plugin_system.core.component_registry import component_registry
if TYPE_CHECKING:
from src.common.data_models.info_data_model import TargetPersonInfo
from src.common.data_models.database_data_model import DatabaseMessages, DatabaseActionRecords
from src.common.data_models.database_data_model import DatabaseMessages
logger = get_logger("planner")
@@ -121,7 +121,6 @@ no_reply_until_call
)
class ActionPlanner:
def __init__(self, chat_id: str, action_manager: ActionManager):
self.chat_id = chat_id
@@ -168,7 +167,7 @@ class ActionPlanner:
action_data = {key: value for key, value in action_json.items() if key not in ["action", "reason"]}
# 非no_action动作需要target_message_id
target_message = None
if target_message_id := action_json.get("target_message_id"):
# 根据target_message_id查找原始消息
target_message = self.find_message_by_id(target_message_id, message_id_list)
@@ -179,12 +178,11 @@ class ActionPlanner:
else:
target_message = message_id_list[-1][1]
logger.info(f"{self.log_prefix}动作'{action}'缺少target_message_id使用最新消息作为target_message")
# 验证action是否可用
available_action_names = [action_name for action_name, _ in current_available_actions]
internal_action_names = ["no_reply", "reply", "wait_time", "no_reply_until_call"]
if action not in internal_action_names and action not in available_action_names:
logger.warning(
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {available_action_names}),将强制使用 'no_reply'"
@@ -223,18 +221,17 @@ class ActionPlanner:
return action_planner_infos
async def plan(
self,
available_actions: Dict[str, ActionInfo],
loop_start_time: float = 0.0,
) -> Tuple[List[ActionPlannerInfo], Optional["DatabaseMessages"]]:
# sourcery skip: use-named-expression
"""
规划器 (Planner): 使用LLM根据上下文决定做出什么动作。
"""
target_message: Optional["DatabaseMessages"] = None
# 获取聊天上下文
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=self.chat_id,
@@ -249,7 +246,7 @@ class ActionPlanner:
truncate=True,
show_actions=True,
)
message_list_before_now_short = message_list_before_now[-int(global_config.chat.max_context_size * 0.3) :]
chat_content_block_short, message_id_list_short = build_readable_messages_with_id(
messages=message_list_before_now_short,
@@ -257,17 +254,15 @@ class ActionPlanner:
truncate=False,
show_actions=False,
)
self.last_obs_time_mark = time.time()
# 获取必要信息
is_group_chat, chat_target_info, current_available_actions = self.get_necessary_info()
# 应用激活类型过滤
filtered_actions = self._filter_actions_by_activation_type(
available_actions, chat_content_block_short
)
filtered_actions = self._filter_actions_by_activation_type(available_actions, chat_content_block_short)
logger.info(f"{self.log_prefix}过滤后有{len(filtered_actions)}个可用动作")
# 构建包含所有动作的提示词
@@ -279,21 +274,21 @@ class ActionPlanner:
message_id_list=message_id_list,
interest=global_config.personality.interest,
)
# 调用LLM获取决策
actions = await self._execute_main_planner(
prompt=prompt,
message_id_list=message_id_list,
filtered_actions=filtered_actions,
available_actions=available_actions,
loop_start_time=loop_start_time
loop_start_time=loop_start_time,
)
# 获取target_message如果有非no_action的动作
non_no_actions = [a for a in actions if a.action_type != "no_reply"]
if non_no_actions:
target_message = non_no_actions[0].action_message
return actions, target_message
async def build_planner_prompt(
@@ -333,7 +328,9 @@ class ActionPlanner:
moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
bot_name = global_config.bot.nickname
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}" if global_config.bot.alias_names else ""
bot_nickname = (
f",也有人叫你{','.join(global_config.bot.alias_names)}" if global_config.bot.alias_names else ""
)
name_block = f"你的名字是{bot_name}{bot_nickname},请注意哪些是你自己的发言。"
# 获取主规划器模板并填充
@@ -379,15 +376,12 @@ class ActionPlanner:
return is_group_chat, chat_target_info, current_available_actions
def _filter_actions_by_activation_type(
self,
available_actions: Dict[str, ActionInfo],
chat_content_block: str
self, available_actions: Dict[str, ActionInfo], chat_content_block: str
) -> Dict[str, ActionInfo]:
"""根据激活类型过滤动作"""
filtered_actions = {}
for action_name, action_info in available_actions.items():
if action_info.activation_type == ActionActivationType.NEVER:
logger.debug(f"{self.log_prefix}动作 {action_name} 设置为 NEVER 激活类型,跳过")
@@ -405,14 +399,15 @@ class ActionPlanner:
break
else:
logger.warning(f"{self.log_prefix}未知的激活类型: {action_info.activation_type},跳过处理")
return filtered_actions
async def _build_action_options_block(self, current_available_actions: Dict[str, ActionInfo]) -> str:
# sourcery skip: use-join
"""构建动作选项块"""
if not current_available_actions:
return ""
action_options_block = ""
for action_name, action_info in current_available_actions.items():
# 构建参数文本
@@ -422,13 +417,13 @@ class ActionPlanner:
for param_name, param_description in action_info.action_parameters.items():
param_text += f' "{param_name}":"{param_description}"\n'
param_text = param_text.rstrip("\n")
# 构建要求文本
require_text = ""
for require_item in action_info.action_require:
require_text += f"- {require_item}\n"
require_text = require_text.rstrip("\n")
# 获取动作提示模板并填充
using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt")
using_action_prompt = using_action_prompt.format(
@@ -437,30 +432,30 @@ class ActionPlanner:
action_parameters=param_text,
action_require=require_text,
)
action_options_block += using_action_prompt
return action_options_block
async def _execute_main_planner(
self,
prompt: str,
message_id_list: List[Tuple[str, "DatabaseMessages"]],
filtered_actions: Dict[str, ActionInfo],
available_actions: Dict[str, ActionInfo],
loop_start_time: float
loop_start_time: float,
) -> List[ActionPlannerInfo]:
"""执行主规划器"""
llm_content = None
actions: List[ActionPlannerInfo] = []
try:
# 调用LLM
llm_content, (reasoning_content, _, _) = await self.planner_llm.generate_response_async(prompt=prompt)
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
if global_config.debug.show_prompt:
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
@@ -471,7 +466,7 @@ class ActionPlanner:
logger.debug(f"{self.log_prefix}规划器原始响应: {llm_content}")
if reasoning_content:
logger.debug(f"{self.log_prefix}规划器推理: {reasoning_content}")
except Exception as req_e:
logger.error(f"{self.log_prefix}LLM 请求执行失败: {req_e}")
return [
@@ -483,41 +478,38 @@ class ActionPlanner:
available_actions=available_actions,
)
]
# 解析LLM响应
if llm_content:
try:
# 处理新的格式:多个```json包裹的JSON对象
json_objects = self._extract_json_from_markdown(llm_content)
if json_objects:
if json_objects := self._extract_json_from_markdown(llm_content):
logger.info(f"{self.log_prefix}从响应中提取到{len(json_objects)}个JSON对象")
filtered_actions_list = list(filtered_actions.items())
for json_obj in json_objects:
actions.extend(
self._parse_single_action(json_obj, message_id_list, filtered_actions_list)
)
actions.extend(self._parse_single_action(json_obj, message_id_list, filtered_actions_list))
else:
# 尝试解析为直接的JSON
logger.warning(f"{self.log_prefix}LLM没有返回可用动作: {llm_content}")
actions = self._create_no_reply("LLM没有返回可用动作", available_actions)
except Exception as json_e:
logger.warning(f"{self.log_prefix}解析LLM响应JSON失败 {json_e}. LLM原始输出: '{llm_content}'")
actions = self._create_no_reply(f"解析LLM响应JSON失败: {json_e}", available_actions)
traceback.print_exc()
else:
actions = self._create_no_reply("规划器没有获得LLM响应", available_actions)
# 添加循环开始时间到所有非no_action动作
for action in actions:
action.action_data = action.action_data or {}
action.action_data["loop_start_time"] = loop_start_time
logger.info(f"{self.log_prefix}规划器决定执行{len(actions)}个动作: {' '.join([a.action_type for a in actions])}")
logger.info(
f"{self.log_prefix}规划器决定执行{len(actions)}个动作: {' '.join([a.action_type for a in actions])}"
)
return actions
def _create_no_reply(self, reasoning: str, available_actions: Dict[str, ActionInfo]) -> List[ActionPlannerInfo]:
"""创建no_action"""
return [
@@ -529,23 +521,22 @@ class ActionPlanner:
available_actions=available_actions,
)
]
def _extract_json_from_markdown(self, content: str) -> List[dict]:
# sourcery skip: for-append-to-extend
"""从Markdown格式的内容中提取JSON对象"""
json_objects = []
# 使用正则表达式查找```json包裹的JSON内容
json_pattern = r'```json\s*(.*?)\s*```'
json_pattern = r"```json\s*(.*?)\s*```"
matches = re.findall(json_pattern, content, re.DOTALL)
for match in matches:
try:
# 清理可能的注释和格式问题
json_str = re.sub(r'//.*?\n', '\n', match) # 移除单行注释
json_str = re.sub(r'/\*.*?\*/', '', json_str, flags=re.DOTALL) # 移除多行注释
json_str = json_str.strip()
if json_str:
json_str = re.sub(r"//.*?\n", "\n", match) # 移除单行注释
json_str = re.sub(r"/\*.*?\*/", "", json_str, flags=re.DOTALL) # 移除多行注释
if json_str := json_str.strip():
json_obj = json.loads(repair_json(json_str))
if isinstance(json_obj, dict):
json_objects.append(json_obj)
@@ -556,7 +547,7 @@ class ActionPlanner:
except Exception as e:
logger.warning(f"解析JSON块失败: {e}, 块内容: {match[:100]}...")
continue
return json_objects

View File

@@ -368,37 +368,37 @@ class DefaultReplyer:
return f"{expression_habits_title}\n{expression_habits_block}", selected_ids
async def build_memory_block(self, chat_history: List[DatabaseMessages], target: str) -> str:
"""构建记忆块
# async def build_memory_block(self, chat_history: List[DatabaseMessages], target: str) -> str:
# """构建记忆块
Args:
chat_history: 聊天历史记录
target: 目标消息内容
# Args:
# chat_history: 聊天历史记录
# target: 目标消息内容
Returns:
str: 记忆信息字符串
"""
# Returns:
# str: 记忆信息字符串
# """
if not global_config.memory.enable_memory:
return ""
# if not global_config.memory.enable_memory:
# return ""
instant_memory = None
# instant_memory = None
running_memories = await self.memory_activator.activate_memory_with_chat_history(
target_message=target, chat_history=chat_history
)
if not running_memories:
return ""
# running_memories = await self.memory_activator.activate_memory_with_chat_history(
# target_message=target, chat_history=chat_history
# )
# if not running_memories:
# return ""
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
for running_memory in running_memories:
keywords, content = running_memory
memory_str += f"- {keywords}{content}\n"
# memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
# for running_memory in running_memories:
# keywords, content = running_memory
# memory_str += f"- {keywords}{content}\n"
if instant_memory:
memory_str += f"- {instant_memory}\n"
# if instant_memory:
# memory_str += f"- {instant_memory}\n"
return memory_str
# return memory_str
async def build_tool_info(self, chat_history: str, sender: str, target: str, enable_tool: bool = True) -> str:
"""构建工具信息块

View File

@@ -23,3 +23,4 @@ class ActionPlannerInfo(BaseDataModel):
action_data: Optional[Dict] = None
action_message: Optional["DatabaseMessages"] = None
available_actions: Optional[Dict[str, "ActionInfo"]] = None
loop_start_time: Optional[float] = None

View File

@@ -1,4 +1,4 @@
from typing import Optional, TYPE_CHECKING, List, Tuple, Union, Dict
from typing import Optional, TYPE_CHECKING, List, Tuple, Union, Dict, Any
from dataclasses import dataclass, field
from enum import Enum
@@ -50,10 +50,65 @@ class ReplyContentType(Enum):
return self.value
@dataclass
class ForwardNode(BaseDataModel):
user_id: Optional[str] = None
user_nickname: Optional[str] = None
content: Union[List["ReplyContent"], str] = field(default_factory=list)
@classmethod
def construct_as_id_reference(cls, message_id: str) -> "ForwardNode":
return cls(user_id="", user_nickname="", content=message_id)
@classmethod
def construct_as_created_node(
cls, user_id: str, user_nickname: str, content: List["ReplyContent"]
) -> "ForwardNode":
return cls(user_id=user_id, user_nickname=user_nickname, content=content)
@dataclass
class ReplyContent(BaseDataModel):
content_type: ReplyContentType | str
content: Union[str, Dict, List["ReplyContent"]] # 支持嵌套的 ReplyContent
content: Union[str, Dict, List[ForwardNode], List["ReplyContent"]] # 支持嵌套的 ReplyContent
@classmethod
def construct_as_text(cls, text: str):
return cls(content_type=ReplyContentType.TEXT, content=text)
@classmethod
def construct_as_image(cls, image_base64: str):
return cls(content_type=ReplyContentType.IMAGE, content=image_base64)
@classmethod
def construct_as_voice(cls, voice_base64: str):
return cls(content_type=ReplyContentType.VOICE, content=voice_base64)
@classmethod
def construct_as_emoji(cls, emoji_str: str):
return cls(content_type=ReplyContentType.EMOJI, content=emoji_str)
@classmethod
def construct_as_command(cls, command_arg: Dict):
return cls(content_type=ReplyContentType.COMMAND, content=command_arg)
@classmethod
def construct_as_hybrid(cls, hybrid_content: List[Tuple[ReplyContentType | str, str]]):
hybrid_content_list: List[ReplyContent] = []
for content_type, content in hybrid_content:
assert content_type not in [
ReplyContentType.HYBRID,
ReplyContentType.FORWARD,
ReplyContentType.VOICE,
ReplyContentType.COMMAND,
], "混合内容的每个项不能是混合、转发、语音或命令类型"
assert isinstance(content, str), "混合内容的每个项必须是字符串"
hybrid_content_list.append(ReplyContent(content_type=content_type, content=content))
return cls(content_type=ReplyContentType.HYBRID, content=hybrid_content_list)
@classmethod
def construct_as_forward(cls, forward_nodes: List[ForwardNode]):
return cls(content_type=ReplyContentType.FORWARD, content=forward_nodes)
def __post_init__(self):
if isinstance(self.content_type, ReplyContentType):
@@ -82,36 +137,70 @@ class ReplySetModel(BaseDataModel):
return len(self.reply_data)
def add_text_content(self, text: str):
"""添加文本内容"""
"""
添加文本内容
Args:
text: 文本内容
"""
self.reply_data.append(ReplyContent(content_type=ReplyContentType.TEXT, content=text))
def add_image_content(self, image_base64: str):
"""添加图片内容base64编码的图片数据"""
"""
添加图片内容base64编码的图片数据
Args:
image_base64: base64编码的图片数据
"""
self.reply_data.append(ReplyContent(content_type=ReplyContentType.IMAGE, content=image_base64))
def add_voice_content(self, voice_base64: str):
"""添加语音内容base64编码的音频数据"""
"""
添加语音内容base64编码的音频数据
Args:
voice_base64: base64编码的音频数据
"""
self.reply_data.append(ReplyContent(content_type=ReplyContentType.VOICE, content=voice_base64))
def add_hybrid_content(self, hybrid_content: List[Tuple[ReplyContentType, str]]):
def add_hybrid_content_by_raw(self, hybrid_content: List[Tuple[ReplyContentType | str, str]]):
"""
添加混合型内容,可以包含多种类型的内容
实际解析时只关注最外层,没有递归嵌套处理
添加混合型内容,可以包含text, image, emoji的任意组合
Args:
hybrid_content: 元组 (类型, 消息内容) 构成的列表,如[(ReplyContentType.TEXT, "Hello"), (ReplyContentType.IMAGE, "<base64")]
"""
hybrid_content_list: List[ReplyContent] = []
for content_type, content in hybrid_content:
assert content_type not in [
ReplyContentType.HYBRID,
ReplyContentType.FORWARD,
ReplyContentType.VOICE,
ReplyContentType.COMMAND,
], "混合内容的每个项不能是混合、转发、语音或命令类型"
assert isinstance(content, str), "混合内容的每个项必须是字符串"
self.reply_data.append(ReplyContent(content_type=content_type, content=content))
hybrid_content_list.append(ReplyContent(content_type=content_type, content=content))
def add_custom_content(self, content_type: str, content: str):
"""添加自定义类型的内容"""
self.reply_data.append(ReplyContent(content_type=ReplyContentType.HYBRID, content=hybrid_content_list))
def add_hybrid_content(self, hybrid_content: List[ReplyContent]):
"""
添加混合型内容,使用已经构造好的 ReplyContent 列表
Args:
hybrid_content: ReplyContent 构成的列表,如[ReplyContent(ReplyContentType.TEXT, "Hello"), ReplyContent(ReplyContentType.IMAGE, "<base64")]
"""
for content in hybrid_content:
assert content.content_type not in [
ReplyContentType.HYBRID,
ReplyContentType.FORWARD,
ReplyContentType.VOICE,
ReplyContentType.COMMAND,
], "混合内容的每个项不能是混合、转发、语音或命令类型"
assert isinstance(content.content, str), "混合内容的每个项必须是字符串"
self.reply_data.append(ReplyContent(content_type=ReplyContentType.HYBRID, content=hybrid_content))
def add_custom_content(self, content_type: str, content: Any):
"""
添加自定义类型的内容"""
self.reply_data.append(ReplyContent(content_type=content_type, content=content))
def add_forward_content(self, forward_content: List[Tuple[ReplyContentType, Union[str, ReplyContent]]]):
def add_forward_content(self, forward_content: List[ForwardNode]):
"""添加转发内容可以是字符串或ReplyContent嵌套的转发内容需要自己构造放入"""
for content_type, content in forward_content:
if isinstance(content, ReplyContent):
self.reply_data.append(content)
else:
assert isinstance(content, str), "转发内容的每个data必须是字符串或ReplyContent"
self.reply_data.append(ReplyContent(content_type=content_type, content=content))
self.reply_data.append(ReplyContent(content_type=ReplyContentType.FORWARD, content=forward_content))

View File

@@ -0,0 +1,57 @@
# 有关转发消息和其他消息的构建类型说明
```mermaid
graph LR;
direction TB;
A[ReplySet] --- B[ReplyContent];
A --- C["ReplyContent"];
A --- K["ReplyContent"];
A --- L["ReplyContent"];
A --- N["ReplyContent"];
A --- D[...];
B --- E["Text (in str)"];
B --- F["Image (in base64)"];
C --- G["Voice (in base64)"];
B --- I["Emoji (in base64)"];
subgraph "可行内容(以下的任意组合)";
subgraph "转发消息(Forward)"
M["List[ForwardNode]"]
end
subgraph "混合消息(Hybrid)"
J["List[ReplyContent] (要求只能包含普通消息)"]
end
subgraph "命令消息(Command)"
H["Command (in Dict)"]
end
subgraph "语音消息"
G
end
subgraph "普通消息"
E
F
I
end
end
N --- H
K --- J
L --- M
subgraph ForwardNodes
O["ForwardNode"]
P["ForwardNode"]
Q["ForwardNode"]
end
M --- O
M --- P
M --- Q
subgraph "内容 (message_id引用法)"
P --- U["content: str, 引用已有消息的有效ID"];
end
subgraph "内容 (生成法)"
O --- R["user_id: str"];
O --- S["user_nickname: str"];
O --- T["content: List[ReplyContent], 为这个转发节点的消息内容"];
end
```
另外,自定义消息类型我们在这里不做讨论。
以上列出了所有可能的ReplySet构建方式下面我们来解释一下各个类型的含义。

View File

@@ -72,9 +72,6 @@ class ChatConfig(ConfigBase):
interest_rate_mode: Literal["fast", "accurate"] = "fast"
"""兴趣值计算模式fast为快速计算accurate为精确计算"""
mentioned_bot_reply: float = 1
"""提及 bot 必然回复1为100%回复0为不额外增幅"""
planner_size: float = 1.5
"""副规划器大小越小麦麦的动作执行能力越精细但是消耗更多token调大可以缓解429类错误"""

View File

@@ -132,13 +132,13 @@ class MainSystem:
await asyncio.gather(*tasks)
async def forget_memory_task(self):
"""记忆遗忘任务"""
while True:
await asyncio.sleep(global_config.memory.forget_memory_interval)
logger.info("[记忆遗忘] 开始遗忘记忆...")
await self.hippocampus_manager.forget_memory(percentage=global_config.memory.memory_forget_percentage) # type: ignore
logger.info("[记忆遗忘] 记忆遗忘完成")
# async def forget_memory_task(self):
# """记忆遗忘任务"""
# while True:
# await asyncio.sleep(global_config.memory.forget_memory_interval)
# logger.info("[记忆遗忘] 开始遗忘记忆...")
# await self.hippocampus_manager.forget_memory(percentage=global_config.memory.memory_forget_percentage) # type: ignore
# logger.info("[记忆遗忘] 记忆遗忘完成")
async def main():

View File

@@ -21,7 +21,7 @@
import traceback
import time
from typing import Optional, Union, Dict, List, TYPE_CHECKING
from typing import Optional, Union, Dict, List, TYPE_CHECKING, Tuple
from src.common.logger import get_logger
from src.common.data_models.message_data_model import ReplyContentType
@@ -29,11 +29,11 @@ from src.config.config import global_config
from src.chat.message_receive.chat_stream import get_chat_manager
from src.chat.message_receive.uni_message_sender import UniversalMessageSender
from src.chat.message_receive.message import MessageSending, MessageRecv
from maim_message import Seg, UserInfo
from maim_message import Seg, UserInfo, MessageBase, BaseMessageInfo
if TYPE_CHECKING:
from src.common.data_models.database_data_model import DatabaseMessages
from src.common.data_models.message_data_model import ReplySetModel
from src.common.data_models.message_data_model import ReplySetModel, ReplyContent, ForwardNode
logger = get_logger("send_api")
@@ -367,89 +367,84 @@ async def custom_reply_set_to_stream(
flag: bool = True
for reply_content in reply_set.reply_data:
status: bool = False
content_type = reply_content.content_type
message_data = reply_content.content
if content_type == ReplyContentType.TEXT:
status = await _send_to_target(
message_segment=Seg(type="text", data=message_data), # type: ignore
stream_id=stream_id,
display_message=display_message,
typing=typing,
reply_message=reply_message,
set_reply=set_reply,
storage_message=storage_message,
show_log=show_log,
)
elif content_type in [
ReplyContentType.IMAGE,
ReplyContentType.EMOJI,
ReplyContentType.COMMAND,
ReplyContentType.VOICE,
]:
message_segment: Seg
if ReplyContentType == ReplyContentType.IMAGE:
message_segment = Seg(type="image", data=message_data) # type: ignore
elif ReplyContentType == ReplyContentType.EMOJI:
message_segment = Seg(type="emoji", data=message_data) # type: ignore
elif ReplyContentType == ReplyContentType.COMMAND:
message_segment = Seg(type="command", data=message_data) # type: ignore
elif ReplyContentType == ReplyContentType.VOICE:
message_segment = Seg(type="voice", data=message_data) # type: ignore
status = await _send_to_target(
message_segment=message_segment,
stream_id=stream_id,
display_message=display_message,
typing=False,
reply_message=reply_message,
set_reply=set_reply,
storage_message=storage_message,
show_log=show_log,
)
elif content_type == ReplyContentType.HYBRID:
assert isinstance(message_data, list), "混合类型内容必须是列表"
sub_seg_list: List[Seg] = []
for sub_content in message_data:
sub_content_type = sub_content.content_type
sub_content_data = sub_content.content
if sub_content_type == ReplyContentType.TEXT:
sub_seg_list.append(Seg(type="text", data=sub_content_data)) # type: ignore
elif sub_content_type == ReplyContentType.IMAGE:
sub_seg_list.append(Seg(type="image", data=sub_content_data)) # type: ignore
elif sub_content_type == ReplyContentType.EMOJI:
sub_seg_list.append(Seg(type="emoji", data=sub_content_data)) # type: ignore
else:
logger.warning(f"[SendAPI] 混合类型中不支持的子内容类型: {repr(sub_content_type)}")
continue
status = await _send_to_target(
message_segment=Seg(type="seglist", data=sub_seg_list), # type: ignore
stream_id=stream_id,
display_message=display_message,
typing=typing,
reply_message=reply_message,
set_reply=set_reply,
storage_message=storage_message,
show_log=show_log,
)
elif content_type == ReplyContentType.FORWARD:
assert isinstance(message_data, list), "转发类型内容必须是列表"
# TODO: 完成转发消息的发送机制
else:
message_type_in_str = (
content_type.value if isinstance(content_type, ReplyContentType) else str(content_type)
)
return await _send_to_target(
message_segment=Seg(type=message_type_in_str, data=message_data), # type: ignore
stream_id=stream_id,
display_message=display_message,
typing=typing,
reply_message=reply_message,
set_reply=set_reply,
storage_message=storage_message,
show_log=show_log,
)
message_seg, need_typing = _parse_content_to_seg(reply_content)
status = await _send_to_target(
message_segment=message_seg,
stream_id=stream_id,
display_message=display_message,
typing=bool(need_typing and typing),
reply_message=reply_message,
set_reply=set_reply,
storage_message=storage_message,
show_log=show_log,
)
if not status:
flag = False
logger.error(f"[SendAPI] 发送{repr(content_type)}消息失败,消息内容:{str(message_data)[:100]}")
logger.error(
f"[SendAPI] 发送{repr(reply_content.content_type)}消息失败,消息内容:{str(reply_content.content)[:100]}"
)
return flag
def _parse_content_to_seg(reply_content: "ReplyContent") -> Tuple[Seg, bool]:
"""
把 ReplyContent 转换为 Seg 结构 (Forward 中仅递归一次)
Args:
reply_content: ReplyContent 对象
Returns:
Tuple[Seg, bool]: 转换后的 Seg 结构和是否需要typing的标志
"""
content_type = reply_content.content_type
if content_type == ReplyContentType.TEXT:
text_data: str = reply_content.content # type: ignore
return Seg(type="text", data=text_data), True
elif content_type == ReplyContentType.IMAGE:
return Seg(type="image", data=reply_content.content), False # type: ignore
elif content_type == ReplyContentType.EMOJI:
return Seg(type="emoji", data=reply_content.content), False # type: ignore
elif content_type == ReplyContentType.COMMAND:
return Seg(type="command", data=reply_content.content), False # type: ignore
elif content_type == ReplyContentType.VOICE:
return Seg(type="voice", data=reply_content.content), False # type: ignore
elif content_type == ReplyContentType.HYBRID:
hybrid_message_list_data: List[ReplyContent] = reply_content.content # type: ignore
assert isinstance(hybrid_message_list_data, list), "混合类型内容必须是列表"
sub_seg_list: List[Seg] = []
for sub_content in hybrid_message_list_data:
sub_content_type = sub_content.content_type
sub_content_data = sub_content.content
if sub_content_type == ReplyContentType.TEXT:
sub_seg_list.append(Seg(type="text", data=sub_content_data)) # type: ignore
elif sub_content_type == ReplyContentType.IMAGE:
sub_seg_list.append(Seg(type="image", data=sub_content_data)) # type: ignore
elif sub_content_type == ReplyContentType.EMOJI:
sub_seg_list.append(Seg(type="emoji", data=sub_content_data)) # type: ignore
else:
logger.warning(f"[SendAPI] 混合类型中不支持的子内容类型: {repr(sub_content_type)}")
continue
return Seg(type="seglist", data=sub_seg_list), True
elif content_type == ReplyContentType.FORWARD:
forward_message_list_data: List["ForwardNode"] = reply_content.content # type: ignore
assert isinstance(forward_message_list_data, list), "转发类型内容必须是列表"
forward_message_list: List[MessageBase] = []
for forward_node in forward_message_list_data:
message_segment = Seg(type="id", data=forward_node.content) # type: ignore
user_info: Optional[UserInfo] = None
if forward_node.user_id and forward_node.user_nickname:
assert isinstance(forward_node.content, list), "转发节点内容必须是列表"
user_info = UserInfo(user_id=forward_node.user_id, user_nickname=forward_node.user_nickname)
single_node_content: List[Seg] = []
for sub_content in forward_node.content:
if sub_content.content_type != ReplyContentType.FORWARD:
sub_seg, _ = _parse_content_to_seg(sub_content)
single_node_content.append(sub_seg)
message_segment = Seg(type="seglist", data=single_node_content)
forward_message_list.append(
MessageBase(message_segment=message_segment, message_info=BaseMessageInfo(user_info=user_info))
)
return Seg(type="forward", data=forward_message_list), False # type: ignore
else:
message_type_in_str = content_type.value if isinstance(content_type, ReplyContentType) else str(content_type)
return Seg(type=message_type_in_str, data=reply_content.content), True # type: ignore

View File

@@ -179,7 +179,7 @@ class BuildRelationAction(BaseAction):
chat_model_config = models.get("utils")
success, update_memory, _, _ = await llm_api.generate_with_model(
prompt,
model_config=chat_model_config,
model_config=chat_model_config, # type: ignore
request_type="relation.category.update", # type: ignore
)