全部prompt独立

This commit is contained in:
UnCLAS-Prommer
2026-01-21 22:24:31 +08:00
parent 1a1edde750
commit f44598a331
34 changed files with 690 additions and 1037 deletions

View File

@@ -13,7 +13,7 @@ from src.config.config import global_config, model_config
from src.common.logger import get_logger
from src.chat.logger.plan_reply_logger import PlanReplyLogger
from src.common.data_models.info_data_model import ActionPlannerInfo
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.prompt.prompt_manager import prompt_manager
from src.chat.utils.chat_message_builder import (
build_readable_actions,
get_actions_by_timestamp_with_chat,
@@ -35,108 +35,6 @@ logger = get_logger("planner")
install(extra_lines=3)
def init_prompt():
# ReAct 形式的 Planner Prompt
Prompt(
"""
{time_block}
{name_block}
{chat_context_description},以下是具体的聊天内容
**聊天内容**
{chat_content_block}
**动作记录**
{actions_before_now_block}
**可用的action**
reply
动作描述:
进行回复,你可以自然的顺着正在进行的聊天内容进行回复或自然的提出一个问题
{{
"action": "reply",
"target_message_id":"想要回复的消息id",
"reason":"回复的原因"
}}
wait
动作描述:
暂时不再发言,等待指定时间。适用于以下情况:
- 你已经表达清楚一轮,想给对方留出空间
- 你感觉对方的话还没说完,或者自己刚刚发了好几条连续消息
- 你想要等待一定时间来让对方把话说完,或者等待对方反应
- 你想保持安静,专注""而不是马上回复
请你根据上下文来判断要等待多久,请你灵活判断:
- 如果你们交流间隔时间很短,聊的很频繁,不宜等待太久
- 如果你们交流间隔时间很长,聊的很少,可以等待较长时间
{{
"action": "wait",
"target_message_id":"想要作为这次等待依据的消息id通常是对方的最新消息",
"wait_seconds": 等待的秒数必填例如5 表示等待5秒,
"reason":"选择等待的原因"
}}
complete_talk
动作描述:
当前聊天暂时结束了,对方离开,没有更多话题了
你可以使用该动作来暂时休息,等待对方有新发言再继续:
- 多次wait之后对方迟迟不回复消息才用
- 如果对方只是短暂不回复应该使用wait而不是complete_talk
- 聊天内容显示当前聊天已经结束或者没有新内容时候选择complete_talk
选择此动作后,将不再继续循环思考,直到收到对方的新消息
{{
"action": "complete_talk",
"target_message_id":"触发完成对话的消息id通常是对方的最新消息",
"reason":"选择完成对话的原因"
}}
{action_options_text}
请选择合适的action并说明触发action的消息id和选择该action的原因。消息id格式:m+数字
先输出你的选择思考理由再输出你选择的action理由是一段平文本不要分点精简。
**动作选择要求**
请你根据聊天内容,用户的最新消息和以下标准选择合适的动作:
{plan_style}
{moderation_prompt}
请选择所有符合使用要求的action动作用json格式输出如果输出多个json每个json都要单独用```json包裹你可以重复使用同一个动作或不同动作:
**示例**
// 理由文本
```json
{{
"action":"动作名",
"target_message_id":"触发动作的消息id",
//对应参数
}}
```
```json
{{
"action":"动作名",
"target_message_id":"触发动作的消息id",
//对应参数
}}
```
""",
"brain_planner_prompt_react",
)
Prompt(
"""
{action_name}
动作描述:{action_description}
使用条件:
{action_require}
{{
"action": "{action_name}",{action_parameters},
"target_message_id":"触发action的消息id",
"reason":"触发action的原因"
}}
""",
"brain_action_prompt",
)
class BrainPlanner:
def __init__(self, chat_id: str, action_manager: ActionManager):
self.chat_id = chat_id
@@ -373,6 +271,7 @@ class BrainPlanner:
else:
actions_before_now_block = ""
chat_context_description: str = ""
if chat_target_info:
# 构建聊天上下文描述
chat_context_description = (
@@ -392,18 +291,17 @@ class BrainPlanner:
name_block = f"你的名字是{bot_name}{bot_nickname},请注意哪些是你自己的发言。"
# 获取主规划器模板并填充
planner_prompt_template = await global_prompt_manager.get_prompt_async(prompt_key)
prompt = planner_prompt_template.format(
time_block=time_block,
chat_context_description=chat_context_description,
chat_content_block=chat_content_block,
actions_before_now_block=actions_before_now_block,
action_options_text=action_options_block,
moderation_prompt=moderation_prompt_block,
name_block=name_block,
interest=interest,
plan_style=global_config.experimental.private_plan_style,
)
planner_prompt_template = prompt_manager.get_prompt(prompt_key)
planner_prompt_template.add_context("time_block", time_block)
planner_prompt_template.add_context("chat_context_description", chat_context_description)
planner_prompt_template.add_context("chat_content_block", chat_content_block)
planner_prompt_template.add_context("actions_before_now_block", actions_before_now_block)
planner_prompt_template.add_context("action_options_text", action_options_block)
planner_prompt_template.add_context("moderation_prompt", moderation_prompt_block)
planner_prompt_template.add_context("name_block", name_block)
planner_prompt_template.add_context("interest", interest)
planner_prompt_template.add_context("plan_style", global_config.experimental.private_plan_style)
prompt = await prompt_manager.render_prompt(planner_prompt_template)
return prompt, message_id_list
except Exception as e:
@@ -483,13 +381,12 @@ class BrainPlanner:
require_text = require_text.rstrip("\n")
# 获取动作提示模板并填充
using_action_prompt = await global_prompt_manager.get_prompt_async("brain_action_prompt")
using_action_prompt = using_action_prompt.format(
action_name=action_name,
action_description=action_info.description,
action_parameters=param_text,
action_require=require_text,
)
using_action_prompt_template = prompt_manager.get_prompt("brain_action_prompt")
using_action_prompt_template.add_context("action_name", action_name)
using_action_prompt_template.add_context("action_description", action_info.description)
using_action_prompt_template.add_context("action_parameters", param_text)
using_action_prompt_template.add_context("action_require", require_text)
using_action_prompt = await prompt_manager.render_prompt(using_action_prompt_template)
action_options_block += using_action_prompt
@@ -713,6 +610,3 @@ class BrainPlanner:
logger.debug(f"处理不完整的JSON代码块时出错: {e}")
return json_objects, reasoning_content
init_prompt()

View File

@@ -13,7 +13,6 @@ from src.config.config import global_config, model_config
from src.common.logger import get_logger
from src.chat.logger.plan_reply_logger import PlanReplyLogger
from src.common.data_models.info_data_model import ActionPlannerInfo
# from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.prompt.prompt_manager import prompt_manager
from src.chat.utils.chat_message_builder import (
build_readable_messages_with_id,
@@ -601,19 +600,6 @@ class ActionPlanner:
reply_action_example += ', "quote":"如果需要引用该message设置为true"'
reply_action_example += "}"
# planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
# prompt = planner_prompt_template.format(
# time_block=time_block,
# chat_context_description=chat_context_description,
# chat_content_block=chat_content_block,
# actions_before_now_block=actions_before_now_block,
# action_options_text=action_options_block,
# moderation_prompt=moderation_prompt_block,
# name_block=name_block,
# interest=interest,
# plan_style=global_config.personality.plan_style,
# reply_action_example=reply_action_example,
# )
planner_prompt_template = prompt_manager.get_prompt("planner_prompt")
planner_prompt_template.add_context("time_block", time_block)
planner_prompt_template.add_context("chat_context_description", chat_context_description)
@@ -709,14 +695,6 @@ class ActionPlanner:
parallel_text = ""
# 获取动作提示模板并填充
# using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt")
# using_action_prompt = using_action_prompt.format(
# action_name=action_name,
# action_description=action_info.description,
# action_parameters=param_text,
# action_require=require_text,
# parallel_text=parallel_text,
# )
using_action_prompt = prompt_manager.get_prompt("action_prompt")
using_action_prompt.add_context("action_name", action_name)
using_action_prompt.add_context("action_description", action_info.description)

View File

@@ -32,13 +32,11 @@ from src.plugin_system.base.component_types import ActionInfo, EventType
from src.plugin_system.apis import llm_api
from src.chat.logger.plan_reply_logger import PlanReplyLogger
from src.chat.replyer.prompt.lpmm_prompt import init_lpmm_prompt
from src.memory_system.memory_retrieval import init_memory_retrieval_prompt, build_memory_retrieval_prompt
from src.memory_system.memory_retrieval import init_memory_retrieval_sys, build_memory_retrieval_prompt
from src.bw_learner.jargon_explainer import explain_jargon_in_context, retrieve_concepts_with_jargon
from src.chat.utils.common_utils import TempMethodsExpression
init_lpmm_prompt()
init_memory_retrieval_prompt()
init_memory_retrieval_sys()
logger = get_logger("replyer")
@@ -977,33 +975,6 @@ class DefaultReplyer:
# 兜底:即使 multiple_reply_style 配置异常也不影响正常回复
reply_style = global_config.personality.reply_style
# return (
# await global_prompt_manager.format_prompt(
# prompt_name,
# expression_habits_block=expression_habits_block,
# tool_info_block=tool_info,
# bot_name=global_config.bot.nickname,
# knowledge_prompt=prompt_info,
# # relation_info_block=relation_info,
# extra_info_block=extra_info_block,
# jargon_explanation=jargon_explanation,
# identity=personality_prompt,
# action_descriptions=actions_info,
# sender_name=sender,
# dialogue_prompt=dialogue_prompt,
# time_block=time_block,
# reply_target_block=reply_target_block,
# reply_style=reply_style,
# keywords_reaction_prompt=keywords_reaction_prompt,
# moderation_prompt=moderation_prompt_block,
# memory_retrieval=memory_retrieval,
# chat_prompt=chat_prompt_block,
# planner_reasoning=planner_reasoning,
# ),
# selected_expressions,
# timing_logs,
# almost_zero_str,
# )
prompt = prompt_manager.get_prompt(prompt_name)
prompt.add_context("expression_habits_block", expression_habits_block)
prompt.add_context("tool_info_block", tool_info)
@@ -1111,22 +1082,6 @@ class DefaultReplyer:
except Exception:
reply_style = global_config.personality.reply_style
# return await global_prompt_manager.format_prompt(
# template_name,
# expression_habits_block=expression_habits_block,
# # relation_info_block=relation_info,
# chat_target=chat_target_1,
# time_block=time_block,
# chat_info=chat_talking_prompt_half,
# identity=personality_prompt,
# chat_target_2=chat_target_2,
# reply_target_block=reply_target_block,
# raw_reply=raw_reply,
# reason=reason,
# reply_style=reply_style,
# keywords_reaction_prompt=keywords_reaction_prompt,
# moderation_prompt=moderation_prompt_block,
# )
prompt_template = prompt_manager.get_prompt("default_expressor_prompt")
prompt_template.add_context("expression_habits_block", expression_habits_block)
# prompt_template.add_context("relation_info_block", relation_info)
@@ -1220,14 +1175,6 @@ class DefaultReplyer:
template_prompt.add_context("chat_history", message)
template_prompt.add_context("sender", sender)
template_prompt.add_context("target_message", target)
# prompt = await global_prompt_manager.format_prompt(
# "lpmm_get_knowledge_prompt",
# bot_name=bot_name,
# time_now=time_now,
# chat_history=message,
# sender=sender,
# target_message=target,
# )
prompt = await prompt_manager.render_prompt(template_prompt)
_, _, _, _, tool_calls = await llm_api.generate_with_model_with_tools(
prompt,

View File

@@ -15,9 +15,8 @@ from src.llm_models.utils_model import LLMRequest
from src.chat.message_receive.message import UserInfo, Seg, MessageRecv, MessageSending
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.message_receive.uni_message_sender import UniversalMessageSender
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
from src.chat.utils.timer_calculator import Timer
from src.chat.utils.utils import get_chat_type_and_target_info, is_bot_self
from src.chat.utils.prompt_builder import global_prompt_manager
from src.prompt.prompt_manager import prompt_manager
from src.chat.utils.common_utils import TempMethodsExpression
from src.chat.utils.chat_message_builder import (
@@ -27,21 +26,14 @@ from src.chat.utils.chat_message_builder import (
)
from src.bw_learner.expression_selector import expression_selector
from src.plugin_system.apis.message_api import translate_pid_to_description
# from src.memory_system.memory_activator import MemoryActivator
from src.person_info.person_info import Person, is_person_known
from src.plugin_system.base.component_types import ActionInfo, EventType
from src.plugin_system.apis import llm_api
from src.chat.replyer.prompt.lpmm_prompt import init_lpmm_prompt
from src.chat.replyer.prompt.replyer_private_prompt import init_replyer_private_prompt
from src.memory_system.memory_retrieval import init_memory_retrieval_prompt, build_memory_retrieval_prompt
from src.memory_system.memory_retrieval import init_memory_retrieval_sys, build_memory_retrieval_prompt
from src.bw_learner.jargon_explainer import explain_jargon_in_context
init_lpmm_prompt()
init_replyer_private_prompt()
init_memory_retrieval_prompt()
init_memory_retrieval_sys()
logger = get_logger("replyer")
@@ -667,7 +659,7 @@ class PrivateReplyer:
timestamp_mode="relative",
read_mark=0.0,
show_actions=True,
long_time_notice=True
long_time_notice=True,
)
message_list_before_short = get_raw_msg_before_timestamp_with_chat(
@@ -724,7 +716,12 @@ class PrivateReplyer:
self._time_and_run_task(self.build_personality_prompt(), "personality_prompt"),
self._time_and_run_task(
build_memory_retrieval_prompt(
chat_talking_prompt_short, sender, target, self.chat_stream, think_level=1, unknown_words=unknown_words
chat_talking_prompt_short,
sender,
target,
self.chat_stream,
think_level=1,
unknown_words=unknown_words,
),
"memory_retrieval",
),
@@ -800,7 +797,7 @@ class PrivateReplyer:
# 根据配置构建最终的 reply_style支持 multiple_reply_style 按概率随机替换
reply_style = global_config.personality.reply_style
multi_styles =global_config.personality.multiple_reply_style
multi_styles = global_config.personality.multiple_reply_style
multi_prob = global_config.personality.multiple_probability or 0.0
if multi_styles and multi_prob > 0 and random.random() < multi_prob:
try:
@@ -810,50 +807,33 @@ class PrivateReplyer:
reply_style = global_config.personality.reply_style
# 使用统一的 is_bot_self 函数判断是否是机器人自己(支持多平台,包括 WebUI
if is_bot_self(platform, user_id):
return await global_prompt_manager.format_prompt(
"private_replyer_self_prompt",
expression_habits_block=expression_habits_block,
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
relation_info_block=relation_info,
extra_info_block=extra_info_block,
identity=personality_prompt,
action_descriptions=actions_info,
dialogue_prompt=dialogue_prompt,
jargon_explanation=jargon_explanation,
time_block=time_block,
target=target,
reason=reply_reason,
sender_name=sender,
reply_style=reply_style,
keywords_reaction_prompt=keywords_reaction_prompt,
moderation_prompt=moderation_prompt_block,
memory_retrieval=memory_retrieval,
chat_prompt=chat_prompt_block,
), selected_expressions
prompt_template = prompt_manager.get_prompt("private_replyer_self_prompt")
prompt_template.add_context("target", target)
prompt_template.add_context("reason", reply_reason)
else:
return await global_prompt_manager.format_prompt(
"private_replyer_prompt",
expression_habits_block=expression_habits_block,
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
relation_info_block=relation_info,
extra_info_block=extra_info_block,
identity=personality_prompt,
action_descriptions=actions_info,
dialogue_prompt=dialogue_prompt,
jargon_explanation=jargon_explanation,
time_block=time_block,
reply_target_block=reply_target_block,
reply_style=reply_style,
keywords_reaction_prompt=keywords_reaction_prompt,
moderation_prompt=moderation_prompt_block,
sender_name=sender,
memory_retrieval=memory_retrieval,
chat_prompt=chat_prompt_block,
planner_reasoning=planner_reasoning,
), selected_expressions
prompt_template = prompt_manager.get_prompt("private_replyer_prompt")
prompt_template.add_context("reply_target_block", reply_target_block)
prompt_template.add_context("planner_reasoning", planner_reasoning)
prompt_template.add_context("expression_habits_block", expression_habits_block)
prompt_template.add_context("tool_info_block", tool_info)
prompt_template.add_context("knowledge_prompt", prompt_info)
prompt_template.add_context("relation_info_block", relation_info)
prompt_template.add_context("extra_info_block", extra_info_block)
prompt_template.add_context("identity", personality_prompt)
prompt_template.add_context("action_descriptions", actions_info)
prompt_template.add_context("dialogue_prompt", dialogue_prompt)
prompt_template.add_context("jargon_explanation", jargon_explanation)
prompt_template.add_context("time_block", time_block)
prompt_template.add_context("sender_name", sender)
prompt_template.add_context("keywords_reaction_prompt", keywords_reaction_prompt)
prompt_template.add_context("reply_style", reply_style)
prompt_template.add_context("memory_retrieval", memory_retrieval)
prompt_template.add_context("chat_prompt", chat_prompt_block)
prompt_template.add_context("moderation_prompt", moderation_prompt_block)
prompt = await prompt_manager.render_prompt(prompt_template)
return prompt, selected_expressions
async def build_prompt_rewrite_context(
self,
@@ -943,22 +923,6 @@ class PrivateReplyer:
# 兜底:即使 multiple_reply_style 配置异常也不影响正常回复
reply_style = global_config.personality.reply_style
# return await global_prompt_manager.format_prompt(
# template_name,
# expression_habits_block=expression_habits_block,
# # relation_info_block=relation_info,
# chat_target=chat_target_1,
# time_block=time_block,
# chat_info=chat_talking_prompt_half,
# identity=personality_prompt,
# chat_target_2=chat_target_2,
# reply_target_block=reply_target_block,
# raw_reply=raw_reply,
# reason=reason,
# reply_style=reply_style,
# keywords_reaction_prompt=keywords_reaction_prompt,
# moderation_prompt=moderation_prompt_block,
# )
prompt_template = prompt_manager.get_prompt("default_expressor_prompt")
prompt_template.add_context("expression_habits_block", expression_habits_block)
# prompt_template.add_context("relation_info_block", relation_info)
@@ -1046,18 +1010,14 @@ class PrivateReplyer:
if global_config.lpmm_knowledge.lpmm_mode == "agent":
return ""
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
prompt_template = prompt_manager.get_prompt("lpmm_get_knowledge_prompt")
prompt_template.add_context("bot_name", global_config.bot.nickname)
prompt_template.add_context("time_now", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
prompt_template.add_context("chat_history", message)
prompt_template.add_context("sender", sender)
prompt_template.add_context("target_message", target)
prompt = await prompt_manager.render_prompt(prompt_template)
bot_name = global_config.bot.nickname
prompt = await global_prompt_manager.format_prompt(
"lpmm_get_knowledge_prompt",
bot_name=bot_name,
time_now=time_now,
chat_history=message,
sender=sender,
target_message=target,
)
_, _, _, _, tool_calls = await llm_api.generate_with_model_with_tools(
prompt,
model_config=model_config.model_task_config.tool_use,

View File

@@ -1,20 +0,0 @@
from src.chat.utils.prompt_builder import Prompt
# from src.memory_system.memory_activator import MemoryActivator
def init_lpmm_prompt():
Prompt(
"""
你是一个专门获取知识的助手。你的名字是{bot_name}。现在是{time_now}
群里正在进行的聊天内容:
{chat_history}
现在,{sender}发送了内容:{target_message},你想要回复ta。
请仔细分析聊天内容,考虑以下几点:
1. 内容中是否包含需要查询信息的问题
2. 是否有明确的知识获取指令
If you need to use the search tool, please directly call the function "lpmm_search_knowledge". If you do not need to use any tool, simply output "No tool needed".
""",
name="lpmm_get_knowledge_prompt",
)

View File

@@ -1,41 +0,0 @@
from src.chat.utils.prompt_builder import Prompt
def init_replyer_private_prompt():
Prompt(
"""{knowledge_prompt}{tool_info_block}{extra_info_block}
{expression_habits_block}{memory_retrieval}{jargon_explanation}
你正在和{sender_name}聊天,这是你们之前聊的内容:
{time_block}
{dialogue_prompt}
{reply_target_block}
{planner_reasoning}
{identity}
{chat_prompt}你正在和{sender_name}聊天,现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,
尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理。
{reply_style}
请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
{moderation_prompt}不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )。""",
"private_replyer_prompt",
)
Prompt(
"""{knowledge_prompt}{tool_info_block}{extra_info_block}
{expression_habits_block}{memory_retrieval}{jargon_explanation}
你正在和{sender_name}聊天,这是你们之前聊的内容:
{time_block}
{dialogue_prompt}
你现在想补充说明你刚刚自己的发言内容:{target},原因是{reason}
请你根据聊天内容,组织一条新回复。注意,{target} 是刚刚你自己的发言,你要在这基础上进一步发言,请按照你自己的角度来继续进行回复。注意保持上下文的连贯性。
{identity}
{chat_prompt}尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。
{reply_style}
请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
{moderation_prompt}不要输出多余内容(包括冒号和引号括号表情包at或 @等 )。
""",
"private_replyer_self_prompt",
)