feat:添加回复loig

This commit is contained in:
SengokuCola
2025-12-23 21:42:48 +08:00
parent 0f6ec45821
commit 839a42578c
6 changed files with 298 additions and 10 deletions

View File

@@ -10,6 +10,7 @@ from json_repair import repair_json
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config, model_config
from src.common.logger import get_logger
from src.chat.logger.plan_reply_logger import PlanReplyLogger
from src.common.data_models.info_data_model import ActionPlannerInfo
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import (
@@ -310,6 +311,7 @@ class ActionPlanner:
"""
规划器 (Planner): 使用LLM根据上下文决定做出什么动作。
"""
plan_start = time.perf_counter()
# 获取聊天上下文
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
@@ -345,6 +347,7 @@ class ActionPlanner:
logger.debug(f"{self.log_prefix}过滤后有{len(filtered_actions)}个可用动作")
prompt_build_start = time.perf_counter()
# 构建包含所有动作的提示词
prompt, message_id_list = await self.build_planner_prompt(
is_group_chat=is_group_chat,
@@ -353,9 +356,10 @@ class ActionPlanner:
chat_content_block=chat_content_block,
message_id_list=message_id_list,
)
prompt_build_ms = (time.perf_counter() - prompt_build_start) * 1000
# 调用LLM获取决策
reasoning, actions = await self._execute_main_planner(
reasoning, actions, llm_raw_output, llm_reasoning, llm_duration_ms = await self._execute_main_planner(
prompt=prompt,
message_id_list=message_id_list,
filtered_actions=filtered_actions,
@@ -397,6 +401,25 @@ class ActionPlanner:
self.add_plan_log(reasoning, actions)
try:
PlanReplyLogger.log_plan(
chat_id=self.chat_id,
prompt=prompt,
reasoning=reasoning,
raw_output=llm_raw_output,
raw_reasoning=llm_reasoning,
actions=actions,
timing={
"prompt_build_ms": round(prompt_build_ms, 2),
"llm_duration_ms": round(llm_duration_ms, 2) if llm_duration_ms is not None else None,
"total_plan_ms": round((time.perf_counter() - plan_start) * 1000, 2),
"loop_start_time": loop_start_time,
},
extra=None,
)
except Exception:
logger.exception(f"{self.log_prefix}记录plan日志失败")
return actions
def add_plan_log(self, reasoning: str, actions: List[ActionPlannerInfo]):
@@ -610,14 +633,19 @@ class ActionPlanner:
filtered_actions: Dict[str, ActionInfo],
available_actions: Dict[str, ActionInfo],
loop_start_time: float,
) -> Tuple[str, List[ActionPlannerInfo]]:
) -> Tuple[str, List[ActionPlannerInfo], Optional[str], Optional[str], Optional[float]]:
"""执行主规划器"""
llm_content = None
actions: List[ActionPlannerInfo] = []
llm_reasoning = None
llm_duration_ms = None
try:
# 调用LLM
llm_start = time.perf_counter()
llm_content, (reasoning_content, _, _) = await self.planner_llm.generate_response_async(prompt=prompt)
llm_duration_ms = (time.perf_counter() - llm_start) * 1000
llm_reasoning = reasoning_content
if global_config.debug.show_planner_prompt:
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
@@ -640,7 +668,7 @@ class ActionPlanner:
action_message=None,
available_actions=available_actions,
)
]
], llm_content, llm_reasoning, llm_duration_ms
# 解析LLM响应
extracted_reasoning = ""
@@ -685,7 +713,7 @@ class ActionPlanner:
logger.debug(f"{self.log_prefix}规划器选择了{len(actions)}个动作: {' '.join([a.action_type for a in actions])}")
return extracted_reasoning, actions
return extracted_reasoning, actions, llm_content, llm_reasoning, llm_duration_ms
def _create_no_reply(self, reasoning: str, available_actions: Dict[str, ActionInfo]) -> List[ActionPlannerInfo]:
"""创建no_reply"""