Merge branch 'dev' of github.com:MaiM-with-u/MaiBot into dev

This commit is contained in:
UnCLAS-Prommer
2025-09-17 21:21:21 +08:00
18 changed files with 117 additions and 91 deletions

View File

@@ -502,12 +502,6 @@ class BrainChatting:
)
return {"action_type": "no_action", "success": True, "reply_text": "", "command": ""}
elif action_planner_info.action_type == "wait_time":
action_planner_info.action_data = action_planner_info.action_data or {}
logger.info(f"{self.log_prefix} 等待{action_planner_info.action_data['time']}秒后回复")
await asyncio.sleep(action_planner_info.action_data["time"])
return {"action_type": "wait_time", "success": True, "reply_text": "", "command": ""}
elif action_planner_info.action_type == "reply":
try:
success, llm_response = await generator_api.generate_reply(

View File

@@ -59,7 +59,7 @@ reply
no_reply
动作描述:
保持沉默,等待对方发言
等待,保持沉默,等待对方发言
{{
"action": "no_reply",
}}
@@ -332,7 +332,7 @@ class BrainPlanner:
moderation_prompt=moderation_prompt_block,
name_block=name_block,
interest=interest,
plan_style=global_config.personality.plan_style,
plan_style=global_config.personality.private_plan_style,
)
return prompt, message_id_list

View File

@@ -1,37 +0,0 @@
from typing import Optional
import hashlib
def parse_stream_config_to_chat_id(stream_config_str: str) -> Optional[str]:
"""
解析流配置字符串并生成对应的 chat_id
Args:
stream_config_str: 格式为 "platform:id:type" 的字符串
Returns:
str: 生成的 chat_id如果解析失败则返回 None
"""
try:
parts = stream_config_str.split(":")
if len(parts) != 3:
return None
platform = parts[0]
id_str = parts[1]
stream_type = parts[2]
# 判断是否为群聊
is_group = stream_type == "group"
# 使用与 ChatStream.get_stream_id 相同的逻辑生成 chat_id
if is_group:
components = [platform, str(id_str)]
else:
components = [platform, str(id_str), "private"]
key = "_".join(components)
return hashlib.md5(key.encode()).hexdigest()
except (ValueError, IndexError):
return None

View File

@@ -18,6 +18,7 @@ from src.chat.planner_actions.action_manager import ActionManager
from src.chat.heart_flow.hfc_utils import CycleDetail
from src.chat.heart_flow.hfc_utils import send_typing, stop_typing
from src.chat.express.expression_learner import expression_learner_manager
from src.chat.frequency_control.frequency_control import frequency_control_manager
from src.person_info.person_info import Person
from src.plugin_system.base.component_types import EventType, ActionInfo
from src.plugin_system.core import events_manager
@@ -202,7 +203,7 @@ class HeartFChatting:
# *控制频率用
if mentioned_message:
await self._observe(recent_messages_list=recent_messages_list, force_reply_message=mentioned_message)
elif random.random() < global_config.chat.talk_value:
elif random.random() < global_config.chat.talk_value * frequency_control_manager.get_or_create_frequency_control(self.stream_id).get_talk_frequency_adjust():
await self._observe(recent_messages_list=recent_messages_list)
else:
return True

View File

@@ -106,8 +106,8 @@ class HeartFCMessageReceiver:
message.message_info.platform, # type: ignore
replace_bot_name=True,
)
if not processed_plain_text:
print(message)
# if not processed_plain_text:
# print(message)
logger.info(f"[{mes_name}]{userinfo.user_nickname}:{processed_plain_text}") # type: ignore

View File

@@ -343,6 +343,7 @@ class ActionPlanner:
interest=interest,
plan_style=global_config.personality.plan_style,
)
return prompt, message_id_list
except Exception as e:
@@ -450,8 +451,8 @@ class ActionPlanner:
# 调用LLM
llm_content, (reasoning_content, _, _) = await self.planner_llm.generate_response_async(prompt=prompt)
# logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
# logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
if global_config.debug.show_prompt:
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")

View File

@@ -702,9 +702,9 @@ class DefaultReplyer:
self._time_and_run_task(
self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits"
),
self._time_and_run_task(
self.build_relation_info(chat_talking_prompt_short, sender, person_list_short), "relation_info"
),
# self._time_and_run_task(
# self.build_relation_info(chat_talking_prompt_short, sender, person_list_short), "relation_info"
# ),
# self._time_and_run_task(self.build_memory_block(message_list_before_short, target), "memory_block"),
self._time_and_run_task(
self.build_tool_info(chat_talking_prompt_short, sender, target, enable_tool=enable_tool), "tool_info"
@@ -745,7 +745,7 @@ class DefaultReplyer:
expression_habits_block, selected_expressions = results_dict["expression_habits"]
expression_habits_block: str
selected_expressions: List[int]
relation_info: str = results_dict["relation_info"]
# relation_info: str = results_dict["relation_info"]
# memory_block: str = results_dict["memory_block"]
tool_info: str = results_dict["tool_info"]
prompt_info: str = results_dict["prompt_info"] # 直接使用格式化后的结果
@@ -786,7 +786,7 @@ class DefaultReplyer:
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
# memory_block=memory_block,
relation_info_block=relation_info,
# relation_info_block=relation_info,
extra_info_block=extra_info_block,
identity=personality_prompt,
action_descriptions=actions_info,
@@ -806,7 +806,7 @@ class DefaultReplyer:
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
# memory_block=memory_block,
relation_info_block=relation_info,
# relation_info_block=relation_info,
extra_info_block=extra_info_block,
identity=personality_prompt,
action_descriptions=actions_info,
@@ -856,9 +856,9 @@ class DefaultReplyer:
)
# 并行执行2个构建任务
(expression_habits_block, _), relation_info, personality_prompt = await asyncio.gather(
(expression_habits_block, _), personality_prompt = await asyncio.gather(
self.build_expression_habits(chat_talking_prompt_half, target),
self.build_relation_info(chat_talking_prompt_half, sender, []),
# self.build_relation_info(chat_talking_prompt_half, sender, []),
self.build_personality_prompt(),
)
@@ -909,7 +909,7 @@ class DefaultReplyer:
return await global_prompt_manager.format_prompt(
template_name,
expression_habits_block=expression_habits_block,
relation_info_block=relation_info,
# relation_info_block=relation_info,
chat_target=chat_target_1,
time_block=time_block,
chat_info=chat_talking_prompt_half,

View File

@@ -216,25 +216,25 @@ class PrivateReplyer:
traceback.print_exc()
return False, llm_response
async def build_relation_info(self, chat_content: str, sender: str):
if not global_config.relationship.enable_relationship:
return ""
# async def build_relation_info(self, chat_content: str, sender: str):
# if not global_config.relationship.enable_relationship:
# return ""
if not sender:
return ""
# if not sender:
# return ""
if sender == global_config.bot.nickname:
return ""
# if sender == global_config.bot.nickname:
# return ""
# 获取用户ID
person = Person(person_name=sender)
if not is_person_known(person_name=sender):
logger.warning(f"未找到用户 {sender} 的ID跳过信息提取")
return f"你完全不认识{sender}不理解ta的相关信息。"
# # 获取用户ID
# person = Person(person_name=sender)
# if not is_person_known(person_name=sender):
# logger.warning(f"未找到用户 {sender} 的ID跳过信息提取")
# return f"你完全不认识{sender}不理解ta的相关信息。"
sender_relation = await person.build_relationship(chat_content)
# sender_relation = await person.build_relationship(chat_content)
return f"{sender_relation}"
# return f"{sender_relation}"
async def build_expression_habits(self, chat_history: str, target: str) -> Tuple[str, List[int]]:
# sourcery skip: for-append-to-extend
@@ -724,9 +724,9 @@ class PrivateReplyer:
)
# 并行执行2个构建任务
(expression_habits_block, _), relation_info, personality_prompt = await asyncio.gather(
(expression_habits_block, _), personality_prompt = await asyncio.gather(
self.build_expression_habits(chat_talking_prompt_half, target),
self.build_relation_info(chat_talking_prompt_half, sender),
# self.build_relation_info(chat_talking_prompt_half, sender),
self.build_personality_prompt(),
)
@@ -777,7 +777,7 @@ class PrivateReplyer:
return await global_prompt_manager.format_prompt(
template_name,
expression_habits_block=expression_habits_block,
relation_info_block=relation_info,
# relation_info_block=relation_info,
chat_target=chat_target_1,
time_block=time_block,
chat_info=chat_talking_prompt_half,

View File

@@ -12,7 +12,7 @@ def init_replyer_prompt():
Prompt(
"""{knowledge_prompt}{relation_info_block}{tool_info_block}{extra_info_block}
"""{knowledge_prompt}{tool_info_block}{extra_info_block}
{expression_habits_block}
你正在qq群里聊天下面是群里正在聊的内容:
@@ -33,7 +33,7 @@ def init_replyer_prompt():
Prompt(
"""{knowledge_prompt}{relation_info_block}{tool_info_block}{extra_info_block}
"""{knowledge_prompt}{tool_info_block}{extra_info_block}
{expression_habits_block}
你正在qq群里聊天下面是群里正在聊的内容:
@@ -54,7 +54,7 @@ def init_replyer_prompt():
Prompt(
"""{knowledge_prompt}{relation_info_block}{tool_info_block}{extra_info_block}
"""{knowledge_prompt}{tool_info_block}{extra_info_block}
{expression_habits_block}
你正在和{sender_name}聊天,这是你们之前聊的内容:
@@ -73,7 +73,7 @@ def init_replyer_prompt():
Prompt(
"""{knowledge_prompt}{relation_info_block}{tool_info_block}{extra_info_block}
"""{knowledge_prompt}{tool_info_block}{extra_info_block}
{expression_habits_block}
你正在和{sender_name}聊天,这是你们之前聊的内容:

View File

@@ -13,8 +13,6 @@ def init_rewrite_prompt():
Prompt(
"""
{expression_habits_block}
{relation_info_block}
{chat_target}
{time_block}
{chat_info}