fix:修复合并问题

This commit is contained in:
SengokuCola
2025-05-16 23:16:47 +08:00
parent b4f6db0767
commit 61e0dbe372
13 changed files with 25 additions and 23 deletions

View File

@@ -27,7 +27,7 @@ class ChattingInfoProcessor(BaseProcessor):
"""初始化观察处理器"""
super().__init__()
# TODO: API-Adapter修改标记
self.llm_summary = LLMRequest(
self.model_summary = LLMRequest(
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
@@ -94,7 +94,7 @@ class ChattingInfoProcessor(BaseProcessor):
async def chat_compress(self, obs: ChattingObservation):
if obs.compressor_prompt:
try:
summary_result, _, _ = await self.llm_summary.generate_response(obs.compressor_prompt)
summary_result, _, _ = await self.model_summary.generate_response(obs.compressor_prompt)
summary = "没有主题的闲聊" # 默认值
if summary_result: # 确保结果不为空
summary = summary_result

View File

@@ -49,8 +49,8 @@ class SelfProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
model=global_config.model.sub_heartflow,
temperature=global_config.model.sub_heartflow["temp"],
max_tokens=800,
request_type="self_identify",
)

View File

@@ -61,8 +61,8 @@ class WorkingMemoryProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
model=global_config.model.sub_heartflow,
temperature=global_config.model.sub_heartflow["temp"],
max_tokens=800,
request_type="working_memory",
)
@@ -118,7 +118,7 @@ class WorkingMemoryProcessor(BaseProcessor):
# 使用提示模板进行处理
prompt = (await global_prompt_manager.get_prompt_async("prompt_memory_proces")).format(
bot_name=global_config.BOT_NICKNAME,
bot_name=global_config.bot.nickname,
time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
chat_observe_info=chat_info,
memory_str=memory_choose_str,