汉化日志

This commit is contained in:
DrSmoothl
2026-03-30 21:56:18 +08:00
parent 0e14cb5de9
commit e7dbaa1a70
7 changed files with 230 additions and 219 deletions

View File

@@ -80,7 +80,7 @@ class MaisakaReplyGenerator:
return f"你的名字是{bot_name}{bot_aliases},你{prompt_personality};"
except Exception as exc:
logger.warning(f"Failed to build Maisaka personality prompt: {exc}")
logger.warning(f"构建 Maisaka 人设提示词失败: {exc}")
return "你的名字是麦麦,你是一个活泼可爱的 AI 助手。"
@staticmethod
@@ -217,7 +217,7 @@ class MaisakaReplyGenerator:
"""在 replyer 内部构建表达习惯和黑话解释。"""
session_id = self._resolve_session_id(stream_id)
if not session_id:
logger.warning("Failed to build Maisaka reply context: session_id is missing")
logger.warning("构建 Maisaka 回复上下文失败:缺少会话标识")
return MaisakaReplyContext()
expression_habits, selected_expression_ids = self._build_expression_habits(
@@ -256,8 +256,8 @@ class MaisakaReplyGenerator:
block = "【表达习惯参考】\n" + "\n".join(lines)
logger.info(
f"Built Maisaka expression habits: session_id={session_id} "
f"count={len(selected_ids)} ids={selected_ids!r}"
f"已构建 Maisaka 表达习惯: 会话标识={session_id} "
f"数量={len(selected_ids)} 表达编号={selected_ids!r}"
)
return block, selected_ids
@@ -313,12 +313,12 @@ class MaisakaReplyGenerator:
result = ReplyGenerationResult()
if chat_history is None:
result.error_message = "chat_history is empty"
result.error_message = "聊天历史为空"
return False, result
logger.info(
f"Maisaka replyer start: stream_id={stream_id} reply_reason={reply_reason!r} "
f"history_size={len(chat_history)} target_message_id="
f"Maisaka 回复器开始生成: 会话流标识={stream_id} 回复原因={reply_reason!r} "
f"历史消息数={len(chat_history)} 目标消息编号="
f"{reply_message.message_id if reply_message else None}"
)
@@ -328,12 +328,12 @@ class MaisakaReplyGenerator:
if not isinstance(message, (ReferenceMessage, ToolResultMessage))
]
logger.debug(f"Maisaka replyer: filtered_history size={len(filtered_history)}")
logger.debug(f"Maisaka 回复器过滤后历史消息数={len(filtered_history)}")
# Validate that express_model is properly initialized
if self.express_model is None:
logger.error("Maisaka replyer: express_model is None!")
result.error_message = "express_model is not initialized"
logger.error("Maisaka 回复器的回复模型未初始化")
result.error_message = "回复模型尚未初始化"
return False, result
try:
@@ -345,8 +345,8 @@ class MaisakaReplyGenerator:
)
except Exception as exc:
import traceback
logger.error(f"Maisaka replyer: _build_reply_context failed: {exc}\n{traceback.format_exc()}")
result.error_message = f"_build_reply_context failed: {exc}"
logger.error(f"Maisaka 回复器构建回复上下文失败: {exc}\n{traceback.format_exc()}")
result.error_message = f"构建回复上下文失败: {exc}"
return False, result
merged_expression_habits = expression_habits.strip() or reply_context.expression_habits
@@ -357,8 +357,8 @@ class MaisakaReplyGenerator:
)
logger.info(
f"Maisaka reply context built: stream_id={stream_id} "
f"selected_expression_ids={result.selected_expression_ids!r}"
f"Maisaka 回复上下文构建完成: 会话流标识={stream_id} "
f"已选表达编号={result.selected_expression_ids!r}"
)
try:
@@ -369,20 +369,20 @@ class MaisakaReplyGenerator:
)
except Exception as exc:
import traceback
logger.error(f"Maisaka replyer: _build_prompt failed: {exc}\n{traceback.format_exc()}")
result.error_message = f"_build_prompt failed: {exc}"
logger.error(f"Maisaka 回复器构建提示词失败: {exc}\n{traceback.format_exc()}")
result.error_message = f"构建提示词失败: {exc}"
return False, result
result.completion.request_prompt = prompt
if global_config.debug.show_replyer_prompt:
logger.info(f"\nMaisaka replyer prompt:\n{prompt}\n")
logger.info(f"\nMaisaka 回复器提示词:\n{prompt}\n")
started_at = time.perf_counter()
try:
generation_result = await self.express_model.generate_response(prompt)
except Exception as exc:
logger.exception("Maisaka replyer call failed")
logger.exception("Maisaka 回复器调用失败")
result.error_message = str(exc)
result.metrics = GenerationMetrics(
overall_ms=round((time.perf_counter() - started_at) * 1000, 2),
@@ -403,17 +403,17 @@ class MaisakaReplyGenerator:
)
if global_config.debug.show_replyer_reasoning and result.completion.reasoning_text:
logger.info(f"Maisaka replyer reasoning:\n{result.completion.reasoning_text}")
logger.info(f"Maisaka 回复器思考内容:\n{result.completion.reasoning_text}")
if not result.success:
result.error_message = "replyer returned empty content"
logger.warning("Maisaka replyer returned empty content")
result.error_message = "回复器返回了空内容"
logger.warning("Maisaka 回复器返回了空内容")
return False, result
logger.info(
f"Maisaka replyer success: response_text={response_text!r} "
f"overall_ms={result.metrics.overall_ms} "
f"selected_expression_ids={result.selected_expression_ids!r}"
f"Maisaka 回复器生成成功: 回复文本={response_text!r} "
f"总耗时毫秒={result.metrics.overall_ms} "
f"已选表达编号={result.selected_expression_ids!r}"
)
result.text_fragments = [response_text]
return True, result

View File

@@ -58,9 +58,9 @@ class BufferCLI:
knowledge_stats = self._knowledge_store.get_stats()
if knowledge_stats["total_items"] > 0:
console.print(f"[success][OK] Knowledge store: {knowledge_stats['total_items']} item(s)[/success]")
console.print(f"[success]知识库中已有 {knowledge_stats['total_items']} 条数据[/success]")
else:
console.print("[muted][OK] Knowledge store: initialized with no data[/muted]")
console.print("[muted]知识库已初始化,当前没有数据[/muted]")
self._chat_start_time: Optional[datetime] = None
self._last_user_input_time: Optional[datetime] = None
@@ -78,7 +78,7 @@ class BufferCLI:
self._chat_loop_service = MaisakaChatLoopService()
model_name = self._get_current_model_name()
console.print(f"[success][OK] LLM service initialized[/success] [muted](model: {model_name})[/muted]")
console.print(f"[success]大模型服务已初始化[/success] [muted](模型: {model_name})[/muted]")
@staticmethod
def _get_current_model_name() -> str:
@@ -89,7 +89,7 @@ class BufferCLI:
return model_task_config.planner.model_list[0]
except Exception:
pass
return "unconfigured"
return "未配置"
def _build_tool_context(self) -> ToolHandlerContext:
"""构建工具处理的共享上下文。"""
@@ -105,7 +105,7 @@ class BufferCLI:
banner = Text()
banner.append("MaiSaka", style="bold cyan")
banner.append(" v2.0\n", style="muted")
banner.append("Type to chat | Ctrl+C to exit", style="muted")
banner.append("输入内容开始对话 | Ctrl+C 退出", style="muted")
console.print(Panel(banner, box=box.DOUBLE_EDGE, border_style="cyan", padding=(1, 2)))
console.print()
@@ -113,7 +113,7 @@ class BufferCLI:
async def _start_chat(self, user_text: str) -> None:
"""追加用户输入并继续内部循环。"""
if self._chat_loop_service is None:
console.print("[warning]LLM service is not initialized; skipping chat.[/warning]")
console.print("[warning]大模型服务尚未初始化,已跳过本次对话。[/warning]")
return
now = datetime.now()
@@ -145,7 +145,7 @@ class BufferCLI:
speaker_name: Optional[str] = None,
) -> SessionBackedMessage:
"""为 CLI 构造新的上下文消息。"""
resolved_speaker_name = speaker_name or global_config.maisaka.user_name.strip() or "User"
resolved_speaker_name = speaker_name or global_config.maisaka.user_name.strip() or "用户"
visible_text = format_speaker_content(
resolved_speaker_name,
user_text,
@@ -177,7 +177,7 @@ class BufferCLI:
message.message_info = MessageInfo(
user_info=UserInfo(
user_id="maisaka_user",
user_nickname=global_config.maisaka.user_name.strip() or "User",
user_nickname=global_config.maisaka.user_name.strip() or "用户",
user_cardname=None,
),
group_info=None,
@@ -186,7 +186,7 @@ class BufferCLI:
message.session_id = "maisaka_cli"
message.raw_message = MessageSequence([])
visible_text = format_speaker_content(
global_config.maisaka.user_name.strip() or "User",
global_config.maisaka.user_name.strip() or "用户",
user_text,
timestamp,
)
@@ -219,9 +219,9 @@ class BufferCLI:
try:
added_count = await self._knowledge_learner.learn()
if added_count > 0 and global_config.maisaka.show_thinking:
console.print(f"[muted]Knowledge learning added {added_count} item(s).[/muted]")
console.print(f"[muted]知识学习已完成,新增 {added_count} 条数据。[/muted]")
except Exception as exc:
console.print(f"[warning]Knowledge learning failed: {exc}[/warning]")
console.print(f"[warning]知识学习失败:{exc}[/warning]")
async def _run_llm_loop(self, chat_history: list[LLMContextMessage]) -> None:
"""
@@ -246,10 +246,10 @@ class BufferCLI:
if global_config.maisaka.enable_knowledge_module:
tasks.append(("knowledge", retrieve_relevant_knowledge(self._chat_loop_service, chat_history)))
status_text_parts.append("knowledge")
status_text_parts.append("知识库")
with console.status(
f"[info]{' + '.join(status_text_parts)} analyzing...[/info]",
f"[info]{' + '.join(status_text_parts)} 分析中...[/info]",
spinner="dots",
):
results = await asyncio.gather(*[task for _, task in tasks], return_exceptions=True)
@@ -258,14 +258,14 @@ class BufferCLI:
if global_config.maisaka.enable_knowledge_module:
knowledge_result = results[0] if results else None
if isinstance(knowledge_result, Exception):
console.print(f"[warning]Knowledge analysis failed: {knowledge_result}[/warning]")
console.print(f"[warning]知识分析失败:{knowledge_result}[/warning]")
elif knowledge_result:
knowledge_analysis = knowledge_result
if global_config.maisaka.show_thinking:
console.print(
Panel(
Markdown(knowledge_analysis),
title="Knowledge",
title="知识",
border_style="bright_magenta",
padding=(0, 1),
style="dim",
@@ -277,7 +277,7 @@ class BufferCLI:
perception_parts = []
if knowledge_analysis:
perception_parts.append(f"Knowledge\n{knowledge_analysis}")
perception_parts.append(f"知识库\n{knowledge_analysis}")
if perception_parts:
chat_history.append(
@@ -288,17 +288,17 @@ class BufferCLI:
)
)
elif global_config.maisaka.show_thinking:
console.print("[muted]Skipping module analysis because the last round used no tools.[/muted]")
console.print("[muted]上一轮没有使用工具,本轮跳过模块分析。[/muted]")
with console.status("[info]AI is thinking...[/info]", spinner="dots"):
with console.status("[info]正在思考...[/info]", spinner="dots"):
try:
response = await self._chat_loop_service.chat_loop_step(chat_history)
consecutive_errors = 0
except Exception as exc:
consecutive_errors += 1
console.print(f"[error]LLM call failed: {exc}[/error]")
console.print(f"[error]大模型调用失败:{exc}[/error]")
if consecutive_errors >= 3:
console.print("[error]Too many consecutive errors. Exiting chat.[/error]\n")
console.print("[error]连续失败次数过多,结束对话。[/error]\n")
break
continue
@@ -309,7 +309,7 @@ class BufferCLI:
console.print(
Panel(
Markdown(response.content),
title="Thought",
title="思考",
border_style="dim",
padding=(1, 2),
style="dim",
@@ -336,7 +336,7 @@ class BufferCLI:
reply = await self._generate_visible_reply(chat_history, response.content)
chat_history.append(
ToolResultMessage(
content="Visible reply generated and recorded.",
content="已生成并记录可见回复。",
timestamp=datetime.now(),
tool_call_id=tool_call.call_id,
tool_name=tool_call.func_name,
@@ -353,10 +353,10 @@ class BufferCLI:
elif tool_call.func_name == "no_reply":
if global_config.maisaka.show_thinking:
console.print("[muted]No visible reply this round.[/muted]")
console.print("[muted]本轮未发送可见回复。[/muted]")
chat_history.append(
ToolResultMessage(
content="No visible reply was sent for this round.",
content="本轮未发送可见回复。",
timestamp=datetime.now(),
tool_call_id=tool_call.call_id,
tool_name=tool_call.func_name,
@@ -377,7 +377,7 @@ class BufferCLI:
await handle_unknown_tool(tool_call, chat_history)
if should_stop:
console.print("[muted]Conversation paused. Waiting for new input...[/muted]\n")
console.print("[muted]对话已暂停,等待新的输入...[/muted]\n")
break
last_had_tool_calls = True
@@ -394,8 +394,8 @@ class BufferCLI:
summary = self._mcp_manager.get_tool_summary()
console.print(
Panel(
f"Loaded {len(mcp_tools)} MCP tool(s):\n{summary}",
title="MCP Tools",
f"已加载 {len(mcp_tools)} MCP 工具:\n{summary}",
title="MCP 工具",
border_style="green",
padding=(0, 1),
)
@@ -406,7 +406,7 @@ class BufferCLI:
if not latest_thought:
return ""
with console.status("[info]Generating visible reply...[/info]", spinner="dots"):
with console.status("[info]正在生成可见回复...[/info]", spinner="dots"):
success, result = await self._reply_generator.generate_reply_with_context(
reply_reason=latest_thought,
chat_history=chat_history,
@@ -432,7 +432,7 @@ class BufferCLI:
if global_config.maisaka.enable_mcp:
await self._init_mcp()
else:
console.print("[muted]MCP is disabled (ENABLE_MCP=false)[/muted]")
console.print("[muted]MCP 已禁用(ENABLE_MCP=false[/muted]")
self._reader.start(asyncio.get_event_loop())
self._show_banner()
@@ -443,7 +443,7 @@ class BufferCLI:
raw_input = await self._reader.get_line()
if raw_input is None:
console.print("\n[muted]Goodbye![/muted]")
console.print("\n[muted]再见![/muted]")
break
raw_input = raw_input.strip()

View File

@@ -66,7 +66,7 @@ async def retrieve_relevant_knowledge(
return ""
return store.get_formatted_knowledge(category_ids)
except Exception:
logger.exception("Failed to retrieve relevant knowledge")
logger.exception("检索相关知识失败")
return ""
@@ -115,12 +115,12 @@ class KnowledgeLearner:
),
)
except Exception:
logger.exception("Knowledge learning model call failed")
logger.exception("知识学习模型调用失败")
return 0
knowledge_items = self._parse_learning_result(result.response or "")
if not knowledge_items:
logger.debug("Knowledge learning finished without extracted entries")
logger.debug("知识学习已完成,但未提取到有效条目")
return 0
added_count = 0
@@ -148,11 +148,11 @@ class KnowledgeLearner:
if added_count > 0:
logger.info(
f"Maisaka knowledge learning finished: session_id={self._session_id} added={added_count}"
f"Maisaka 知识学习已完成: 会话标识={self._session_id} 新增条数={added_count}"
)
else:
logger.debug(
f"Maisaka knowledge learning finished without new entries: session_id={self._session_id}"
f"Maisaka 知识学习已完成,但没有新增条目: 会话标识={self._session_id}"
)
return added_count

View File

@@ -165,6 +165,18 @@ class MaisakaChatLoopService:
return "bold white on magenta"
return "bold white on bright_black"
@staticmethod
def _get_role_badge_label(role: str) -> str:
if role == "system":
return "系统"
if role == "user":
return "用户"
if role == "assistant":
return "助手"
if role == "tool":
return "工具"
return "未知"
@staticmethod
def _build_terminal_image_preview(image_base64: str) -> Optional[str]:
ascii_chars = " .:-=+*#%@"
@@ -209,7 +221,7 @@ class MaisakaChatLoopService:
approx_size = max(0, len(image_base64) * 3 // 4)
size_text = f"{approx_size / 1024:.1f} KB" if approx_size >= 1024 else f"{approx_size} B"
preview_parts: List[object] = [
Text(f"image/{image_format} {size_text}\nbase64 omitted", style="magenta")
Text(f"图片格式 image/{image_format} {size_text}\nbase64 内容已省略", style="magenta")
]
if global_config.maisaka.terminal_image_preview:
preview_text = cls._build_terminal_image_preview(image_base64)
@@ -252,7 +264,7 @@ class MaisakaChatLoopService:
def _render_tool_call_panel(self, tool_call: Any, index: int, parent_index: int) -> Panel:
title = Text.assemble(
Text(" TOOL CALL ", style="bold white on magenta"),
Text(" 工具调用 ", style="bold white on magenta"),
Text(f" #{parent_index}.{index}", style="muted"),
)
return Panel(
@@ -274,26 +286,26 @@ class MaisakaChatLoopService:
role = raw_role.value if hasattr(raw_role, "value") else str(raw_role)
title = Text.assemble(
Text(f" {role.upper()} ", style=self._get_role_badge_style(role)),
Text(f" {self._get_role_badge_label(role)} ", style=self._get_role_badge_style(role)),
Text(f" #{index}", style="muted"),
)
parts: List[object] = []
if content not in (None, "", []):
parts.append(Text(" message ", style="bold cyan"))
parts.append(Text(" 消息 ", style="bold cyan"))
parts.append(self._render_message_content(content))
if tool_call_id:
parts.append(
Text.assemble(
Text(" tool_call_id ", style="bold magenta"),
Text(" 工具调用编号 ", style="bold magenta"),
Text(" "),
Text(str(tool_call_id), style="magenta"),
)
)
if not parts:
parts.append(Text("[empty message]", style="muted"))
parts.append(Text("[空消息]", style="muted"))
return Panel(
Group(*parts),
@@ -334,7 +346,7 @@ class MaisakaChatLoopService:
console.print(
Panel(
Group(*ordered_panels),
title="MaiSaka LLM Request - chat_loop_step",
title="MaiSaka 大模型请求 - 对话单步",
subtitle=selection_reason,
border_style="cyan",
padding=(0, 1),
@@ -343,11 +355,11 @@ class MaisakaChatLoopService:
request_started_at = perf_counter()
logger.info(
"planner 请求开始: "
f"selected_history={len(selected_history)} "
f"llm_messages={len(built_messages)} "
f"tool_count={len(all_tools)} "
f"interrupt_enabled={self._interrupt_flag is not None}"
"规划器请求开始: "
f"已选上下文消息数={len(selected_history)} "
f"大模型消息数={len(built_messages)} "
f"工具数={len(all_tools)} "
f"启用打断={self._interrupt_flag is not None}"
)
generation_result = await self._llm_chat.generate_response_with_messages(
message_factory=message_factory,
@@ -359,19 +371,19 @@ class MaisakaChatLoopService:
),
)
request_elapsed = perf_counter() - request_started_at
logger.info(f"planner 请求完成elapsed={request_elapsed:.3f}s")
logger.info(f"规划器请求完成,耗时={request_elapsed:.3f}")
tool_call_summaries = [
{
"id": getattr(tool_call, "call_id", getattr(tool_call, "id", None)),
"name": getattr(tool_call, "func_name", getattr(tool_call, "name", None)),
"args": getattr(tool_call, "args", getattr(tool_call, "arguments", None)),
"调用编号": getattr(tool_call, "call_id", getattr(tool_call, "id", None)),
"工具名": getattr(tool_call, "func_name", getattr(tool_call, "name", None)),
"参数": getattr(tool_call, "args", getattr(tool_call, "arguments", None)),
}
for tool_call in (generation_result.tool_calls or [])
]
logger.info(
f"Maisaka planner returned content={generation_result.response or ''!r} "
f"tool_calls={tool_call_summaries}"
f"Maisaka 规划器返回结果: 内容={generation_result.response or ''!r} "
f"工具调用={tool_call_summaries}"
)
raw_message = AssistantMessage(

View File

@@ -74,11 +74,11 @@ class MaisakaReasoningEngine:
anchor_message = self._get_timeout_anchor_message()
if anchor_message is None:
logger.warning(
f"{self._runtime.log_prefix} wait 超时后缺少可复用的锚点消息,跳过本轮继续思考"
f"{self._runtime.log_prefix} 等待超时后缺少可复用的锚点消息,跳过本轮继续思考"
)
self._runtime._internal_turn_queue.task_done()
continue
logger.info(f"{self._runtime.log_prefix} wait 超时后开始新一轮思考")
logger.info(f"{self._runtime.log_prefix} 等待超时后开始新一轮思考")
self._runtime._chat_history.append(self._build_wait_timeout_message())
self._trim_chat_history()
try:
@@ -88,10 +88,10 @@ class MaisakaReasoningEngine:
try:
planner_started_at = time.time()
logger.info(
f"{self._runtime.log_prefix} planner 开始: "
f"round={round_index + 1} "
f"history_size={len(self._runtime._chat_history)} "
f"started_at={planner_started_at:.3f}"
f"{self._runtime.log_prefix} 规划器开始执行: "
f"回合={round_index + 1} "
f"历史消息数={len(self._runtime._chat_history)} "
f"开始时间={planner_started_at:.3f}"
)
interrupt_flag = asyncio.Event()
self._runtime._planner_interrupt_flag = interrupt_flag
@@ -104,16 +104,16 @@ class MaisakaReasoningEngine:
self._runtime._chat_loop_service.set_interrupt_flag(None)
cycle_detail.time_records["planner"] = time.time() - planner_started_at
logger.info(
f"{self._runtime.log_prefix} planner 完成: "
f"round={round_index + 1} "
f"elapsed={cycle_detail.time_records['planner']:.3f}s"
f"{self._runtime.log_prefix} 规划器执行完成: "
f"回合={round_index + 1} "
f"耗时={cycle_detail.time_records['planner']:.3f}"
)
reasoning_content = response.content or ""
if self._should_replace_reasoning(reasoning_content):
response.content = "让我根据新情况重新思考:"
response.raw_message.content = "让我根据新情况重新思考:"
logger.info(f"{self._runtime.log_prefix} reasoning content replaced due to high similarity")
logger.info(f"{self._runtime.log_prefix} 当前思考与上一轮过于相似,已替换为重新思考提示")
self._last_reasoning_content = reasoning_content
self._runtime._chat_history.append(response.raw_message)
@@ -137,11 +137,11 @@ class MaisakaReasoningEngine:
except ReqAbortException:
interrupted_at = time.time()
logger.info(
f"{self._runtime.log_prefix} planner 打断成功: "
f"round={round_index + 1} "
f"started_at={planner_started_at:.3f} "
f"interrupted_at={interrupted_at:.3f} "
f"elapsed={interrupted_at - planner_started_at:.3f}s"
f"{self._runtime.log_prefix} 规划器打断成功: "
f"回合={round_index + 1} "
f"开始时间={planner_started_at:.3f} "
f"打断时间={interrupted_at:.3f} "
f"耗时={interrupted_at - planner_started_at:.3f}"
)
break
finally:
@@ -154,7 +154,7 @@ class MaisakaReasoningEngine:
self._runtime._log_internal_loop_cancelled()
raise
except Exception:
logger.exception("%s Maisaka internal loop crashed", self._runtime.log_prefix)
logger.exception(f"{self._runtime.log_prefix} Maisaka 内部循环发生异常")
logger.error(traceback.format_exc())
raise
@@ -169,7 +169,7 @@ class MaisakaReasoningEngine:
tool_call_id = self._runtime._pending_wait_tool_call_id or "wait_timeout"
self._runtime._pending_wait_tool_call_id = None
return ToolResultMessage(
content="wait 已超时,期间没有收到新的用户输入。请基于现有上下文继续下一轮思考。",
content="等待已超时,期间没有收到新的用户输入。请基于现有上下文继续下一轮思考。",
timestamp=datetime.now(),
tool_call_id=tool_call_id,
tool_name="wait",
@@ -184,7 +184,7 @@ class MaisakaReasoningEngine:
self._runtime._pending_wait_tool_call_id = None
self._runtime._chat_history.append(
ToolResultMessage(
content="wait 被新的用户输入打断,已继续处理最新消息。",
content="等待过程被新的用户输入打断,已继续处理最新消息。",
timestamp=datetime.now(),
tool_call_id=tool_call_id,
tool_name="wait",
@@ -337,14 +337,14 @@ class MaisakaReasoningEngine:
"""
if not self._last_reasoning_content or not current_content:
logger.info(
f"{self._runtime.log_prefix} reasoning similarity skipped: "
f"last_empty={not bool(self._last_reasoning_content)} "
f"current_empty={not bool(current_content)} similarity=0.00"
f"{self._runtime.log_prefix} 跳过思考相似度判定: "
f"上一轮为空={not bool(self._last_reasoning_content)} "
f"当前为空={not bool(current_content)} 相似度=0.00"
)
return False
similarity = self._calculate_similarity(current_content, self._last_reasoning_content)
logger.info(f"{self._runtime.log_prefix} reasoning similarity: {similarity:.2f}")
logger.info(f"{self._runtime.log_prefix} 思考内容相似度: {similarity:.2f}")
return similarity > 0.9
@staticmethod
@@ -371,7 +371,7 @@ class MaisakaReasoningEngine:
reply_sent = await self._handle_reply(tool_call, latest_thought, anchor_message)
if not reply_sent:
logger.warning(
f"{self._runtime.log_prefix} reply tool did not produce a visible message, continuing loop"
f"{self._runtime.log_prefix} 回复工具未生成可见消息,将继续下一轮循环"
)
continue
@@ -379,7 +379,7 @@ class MaisakaReasoningEngine:
self._runtime._chat_history.append(
self._build_tool_message(
tool_call,
"No visible reply was sent for this round.",
"本轮未发送可见回复。",
)
)
continue
@@ -406,7 +406,7 @@ class MaisakaReasoningEngine:
self._runtime._chat_history.append(
self._build_tool_message(
tool_call,
"Conversation loop paused until a new message arrives.",
"当前对话循环已暂停,等待新消息到来。",
)
)
self._runtime._enter_stop_state()
@@ -430,7 +430,7 @@ class MaisakaReasoningEngine:
if not isinstance(raw_words, list):
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "query_jargon requires a words array.")
self._build_tool_message(tool_call, "查询黑话工具需要提供 `words` 数组参数。")
)
return
@@ -447,11 +447,11 @@ class MaisakaReasoningEngine:
if not words:
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "query_jargon requires at least one non-empty word.")
self._build_tool_message(tool_call, "查询黑话工具至少需要一个非空词条。")
)
return
logger.info(f"{self._runtime.log_prefix} query_jargon triggered: words={words!r}")
logger.info(f"{self._runtime.log_prefix} 已触发黑话查询: 词条={words!r}")
results: list[dict[str, object]] = []
for word in words:
@@ -478,7 +478,7 @@ class MaisakaReasoningEngine:
}
)
logger.info(f"{self._runtime.log_prefix} query_jargon finished: results={results!r}")
logger.info(f"{self._runtime.log_prefix} 黑话查询完成: 结果={results!r}")
self._runtime._chat_history.append(
self._build_tool_message(
tool_call,
@@ -494,14 +494,14 @@ class MaisakaReasoningEngine:
if not isinstance(raw_person_name, str):
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "query_person_info requires a person_name string.")
self._build_tool_message(tool_call, "查询人物信息工具需要提供字符串类型的 `person_name` 参数。")
)
return
person_name = raw_person_name.strip()
if not person_name:
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "query_person_info requires a non-empty person_name.")
self._build_tool_message(tool_call, "查询人物信息工具需要提供非空的 `person_name` 参数。")
)
return
@@ -511,8 +511,8 @@ class MaisakaReasoningEngine:
limit = 3
logger.info(
f"{self._runtime.log_prefix} query_person_info triggered: "
f"person_name={person_name!r} limit={limit}"
f"{self._runtime.log_prefix} 已触发人物信息查询: "
f"人物名={person_name!r} 限制条数={limit}"
)
persons = self._query_person_records(person_name, limit)
@@ -523,8 +523,8 @@ class MaisakaReasoningEngine:
}
logger.info(
f"{self._runtime.log_prefix} query_person_info finished: "
f"persons={len(result['persons'])} related_knowledge={len(result['related_knowledge'])}"
f"{self._runtime.log_prefix} 人物信息查询完成: "
f"人物记录数={len(result['persons'])} 相关知识数={len(result['related_knowledge'])}"
)
self._runtime._chat_history.append(
self._build_tool_message(
@@ -641,22 +641,22 @@ class MaisakaReasoningEngine:
unknown_words = raw_unknown_words if isinstance(raw_unknown_words, list) else None
if not target_message_id:
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "reply requires a valid msg_id argument.")
self._build_tool_message(tool_call, "回复工具需要提供有效的 `msg_id` 参数。")
)
return False
target_message = self._runtime._source_messages_by_id.get(target_message_id)
if target_message is None:
self._runtime._chat_history.append(
self._build_tool_message(tool_call, f"reply target msg_id not found: {target_message_id}")
self._build_tool_message(tool_call, f"未找到要回复的目标消息msg_id={target_message_id}")
)
return False
logger.info(
f"{self._runtime.log_prefix} reply tool triggered: "
f"target_msg_id={target_message_id} quote={quote_reply} latest_thought={latest_thought!r}"
f"{self._runtime.log_prefix} 已触发回复工具: "
f"目标消息编号={target_message_id} 引用回复={quote_reply} 最新思考={latest_thought!r}"
)
logger.info(f"{self._runtime.log_prefix} acquiring Maisaka reply generator")
logger.info(f"{self._runtime.log_prefix} 正在获取 Maisaka 回复生成器")
try:
replyer = replyer_manager.get_replyer(
chat_stream=self._runtime.chat_stream,
@@ -665,24 +665,24 @@ class MaisakaReasoningEngine:
)
except Exception:
logger.exception(
f"{self._runtime.log_prefix} replyer_manager.get_replyer crashed: "
f"target_msg_id={target_message_id}"
f"{self._runtime.log_prefix} 获取回复生成器时发生异常: "
f"目标消息编号={target_message_id}"
)
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "Maisaka reply generator acquisition crashed.")
self._build_tool_message(tool_call, "获取 Maisaka 回复生成器时发生异常。")
)
return False
if replyer is None:
logger.error(f"{self._runtime.log_prefix} failed to acquire Maisaka reply generator")
logger.error(f"{self._runtime.log_prefix} 获取 Maisaka 回复生成器失败")
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "Maisaka reply generator is unavailable.")
self._build_tool_message(tool_call, "Maisaka 回复生成器当前不可用。")
)
return False
logger.info(f"{self._runtime.log_prefix} acquired Maisaka reply generator successfully")
logger.info(f"{self._runtime.log_prefix} 已成功获取 Maisaka 回复生成器")
logger.info(f"{self._runtime.log_prefix} calling generate_reply_with_context: target_msg_id={target_message_id}")
logger.info(f"{self._runtime.log_prefix} 正在调用回复生成接口: 目标消息编号={target_message_id}")
try:
success, reply_result = await replyer.generate_reply_with_context(
reply_reason=latest_thought,
@@ -695,41 +695,41 @@ class MaisakaReasoningEngine:
except Exception as exc:
import traceback
logger.error(
f"{self._runtime.log_prefix} reply generator crashed: target_msg_id={target_message_id} "
f"exc_type={type(exc).__name__} exc_msg={str(exc)}\n{traceback.format_exc()}"
f"{self._runtime.log_prefix} 回复生成器执行异常: 目标消息编号={target_message_id} "
f"异常类型={type(exc).__name__} 异常信息={str(exc)}\n{traceback.format_exc()}"
)
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "Visible reply generation crashed.")
self._build_tool_message(tool_call, "生成可见回复时发生异常。")
)
return False
logger.info(
f"{self._runtime.log_prefix} reply generator finished: "
f"success={success} response_text={reply_result.completion.response_text!r} "
f"error={reply_result.error_message!r}"
f"{self._runtime.log_prefix} 回复生成完成: "
f"成功={success} 回复文本={reply_result.completion.response_text!r} "
f"错误信息={reply_result.error_message!r}"
)
reply_text = reply_result.completion.response_text.strip() if success else ""
if not reply_text:
logger.warning(
f"{self._runtime.log_prefix} reply generator returned empty text: "
f"target_msg_id={target_message_id} error={reply_result.error_message!r}"
f"{self._runtime.log_prefix} 回复生成器返回空文本: "
f"目标消息编号={target_message_id} 错误信息={reply_result.error_message!r}"
)
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "Visible reply generation failed.")
self._build_tool_message(tool_call, "生成可见回复失败。")
)
return False
reply_segments = self._post_process_reply_text(reply_text)
combined_reply_text = "".join(reply_segments)
logger.info(
f"{self._runtime.log_prefix} reply post process finished: "
f"target_msg_id={target_message_id} segment_count={len(reply_segments)} "
f"segments={reply_segments!r}"
f"{self._runtime.log_prefix} 回复后处理完成: "
f"目标消息编号={target_message_id} 分段数={len(reply_segments)} "
f"分段内容={reply_segments!r}"
)
logger.info(
f"{self._runtime.log_prefix} sending guided reply: "
f"target_msg_id={target_message_id} quote={quote_reply} reply_segments={reply_segments!r}"
f"{self._runtime.log_prefix} 正在发送引导回复: "
f"目标消息编号={target_message_id} 引用回复={quote_reply} 回复分段={reply_segments!r}"
)
try:
sent = False
@@ -746,19 +746,18 @@ class MaisakaReasoningEngine:
break
except Exception:
logger.exception(
f"{self._runtime.log_prefix} send_service.text_to_stream crashed "
f"for target_msg_id={target_message_id}"
f"{self._runtime.log_prefix} 发送文字消息时发生异常,目标消息编号={target_message_id}"
)
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "Visible reply send crashed.")
self._build_tool_message(tool_call, "发送可见回复时发生异常。")
)
return False
logger.info(
f"{self._runtime.log_prefix} guided reply send result: "
f"target_msg_id={target_message_id} sent={sent}"
f"{self._runtime.log_prefix} 引导回复发送结果: "
f"目标消息编号={target_message_id} 发送成功={sent}"
)
tool_result = "Visible reply generated and sent." if sent else "Visible reply generation succeeded but send failed."
tool_result = "可见回复已生成并发送。" if sent else "可见回复生成成功,但发送失败。"
self._runtime._chat_history.append(self._build_tool_message(tool_call, tool_result))
if not sent:
return False
@@ -821,12 +820,12 @@ class MaisakaReasoningEngine:
tool_args = tool_call.args or {}
emotion = str(tool_args.get("emotion") or "").strip()
logger.info(f"{self._runtime.log_prefix} send_emoji tool triggered: emotion={emotion!r}")
logger.info(f"{self._runtime.log_prefix} 已触发表情包发送工具: 情绪={emotion!r}")
# 获取表情包列表
if not emoji_manager.emojis:
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "No emojis available in the emoji library.")
self._build_tool_message(tool_call, "当前表情包库中没有可用表情。")
)
return
@@ -841,16 +840,16 @@ class MaisakaReasoningEngine:
if matching_emojis:
selected_emoji = random.choice(matching_emojis)
logger.info(
f"{self._runtime.log_prefix} found {len(matching_emojis)} emojis matching emotion '{emotion}', "
f"selected: {selected_emoji.description}"
f"{self._runtime.log_prefix} 找到 {len(matching_emojis)} 个匹配情绪 {emotion!r} 的表情包,"
f"已选择:{selected_emoji.description}"
)
# 如果没有找到匹配的情感表情包,随机选择一个
if selected_emoji is None:
selected_emoji = random.choice(emoji_manager.emojis)
logger.info(
f"{self._runtime.log_prefix} no emoji matched emotion '{emotion}', "
f"randomly selected: {selected_emoji.description}"
f"{self._runtime.log_prefix} 没有表情包匹配情绪 {emotion!r}"
f"已随机选择:{selected_emoji.description}"
)
# 更新表情包使用次数
@@ -860,13 +859,13 @@ class MaisakaReasoningEngine:
try:
emoji_base64 = ImageUtils.image_path_to_base64(str(selected_emoji.full_path))
if not emoji_base64:
raise ValueError("Failed to convert emoji image to base64")
raise ValueError("表情图片转换为 base64 失败")
except Exception as exc:
logger.error(
f"{self._runtime.log_prefix} failed to convert emoji to base64: {exc}"
f"{self._runtime.log_prefix} 表情图片转换为 base64 失败: {exc}"
)
self._runtime._chat_history.append(
self._build_tool_message(tool_call, f"Failed to send emoji: {exc}")
self._build_tool_message(tool_call, f"发送表情包失败:{exc}")
)
return
@@ -881,28 +880,28 @@ class MaisakaReasoningEngine:
)
except Exception as exc:
logger.exception(
f"{self._runtime.log_prefix} send_service.emoji_to_stream crashed: {exc}"
f"{self._runtime.log_prefix} 发送表情包时发生异常: {exc}"
)
self._runtime._chat_history.append(
self._build_tool_message(tool_call, f"Emoji send crashed: {exc}")
self._build_tool_message(tool_call, f"发送表情包时发生异常:{exc}")
)
return
if sent:
logger.info(
f"{self._runtime.log_prefix} emoji sent successfully: "
f"description={selected_emoji.description!r} emotion={selected_emoji.emotion}"
f"{self._runtime.log_prefix} 表情包发送成功: "
f"描述={selected_emoji.description!r} 情绪标签={selected_emoji.emotion}"
)
self._runtime._chat_history.append(
self._build_tool_message(
tool_call,
f"Sent emoji: {selected_emoji.description} (emotion: {', '.join(selected_emoji.emotion)})"
f"已发送表情包:{selected_emoji.description}(情绪:{', '.join(selected_emoji.emotion)}"
)
)
else:
logger.warning(f"{self._runtime.log_prefix} emoji send failed")
logger.warning(f"{self._runtime.log_prefix} 表情包发送失败")
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "Failed to send emoji.")
self._build_tool_message(tool_call, "发送表情包失败。")
)
def _build_tool_message(self, tool_call: ToolCall, content: str) -> ToolResultMessage:

View File

@@ -36,7 +36,7 @@ class MaisakaHeartFlowChatting:
self.session_id = session_id
self.chat_stream: Optional[BotChatSession] = chat_manager.get_session_by_session_id(session_id)
if self.chat_stream is None:
raise ValueError(f"Session not found for Maisaka runtime: {session_id}")
raise ValueError(f"未找到会话 {session_id} 对应的 Maisaka 运行时")
session_name = chat_manager.get_session_name(session_id) or session_id
self.log_prefix = f"[{session_name}]"
@@ -89,7 +89,7 @@ class MaisakaHeartFlowChatting:
self._running = True
self._ensure_background_tasks_running()
logger.info(f"{self.log_prefix} Maisaka runtime started")
logger.info(f"{self.log_prefix} Maisaka 运行时已启动")
async def stop(self) -> None:
"""Stop the runtime loop."""
@@ -123,7 +123,7 @@ class MaisakaHeartFlowChatting:
await self._mcp_manager.close()
self._mcp_manager = None
logger.info(f"{self.log_prefix} Maisaka runtime stopped")
logger.info(f"{self.log_prefix} Maisaka 运行时已停止")
def adjust_talk_frequency(self, frequency: float) -> None:
"""Compatibility shim for the existing manager API."""
@@ -137,9 +137,9 @@ class MaisakaHeartFlowChatting:
self._source_messages_by_id[message.message_id] = message
if self._agent_state == self._STATE_RUNNING and self._planner_interrupt_flag is not None:
logger.info(
f"{self.log_prefix} 收到新消息,发起 planner 打断; "
f"msg_id={message.message_id} cache_size={len(self.message_cache)} "
f"timestamp={time.time():.3f}"
f"{self.log_prefix} 收到新消息,发起规划器打断; "
f"消息编号={message.message_id} 缓存条数={len(self.message_cache)} "
f"时间戳={time.time():.3f}"
)
self._planner_interrupt_flag.set()
if self._agent_state in (self._STATE_WAIT, self._STATE_STOP):
@@ -158,9 +158,9 @@ class MaisakaHeartFlowChatting:
except Exception:
exc = None
if exc is not None:
logger.error(f"{self.log_prefix} internal loop task exited unexpectedly: {exc}")
logger.error(f"{self.log_prefix} 内部循环任务异常退出: {exc}")
self._internal_loop_task = asyncio.create_task(self._reasoning_engine.run_loop())
logger.warning(f"{self.log_prefix} restarted Maisaka internal loop task")
logger.warning(f"{self.log_prefix} 已重新拉起 Maisaka 内部循环任务")
if self._loop_task is None or self._loop_task.done():
if self._loop_task is not None and not self._loop_task.cancelled():
@@ -169,9 +169,9 @@ class MaisakaHeartFlowChatting:
except Exception:
exc = None
if exc is not None:
logger.error(f"{self.log_prefix} main loop task exited unexpectedly: {exc}")
logger.error(f"{self.log_prefix} 主循环任务异常退出: {exc}")
self._loop_task = asyncio.create_task(self._main_loop())
logger.warning(f"{self.log_prefix} restarted Maisaka main loop task")
logger.warning(f"{self.log_prefix} 已重新拉起 Maisaka 主循环任务")
async def _main_loop(self) -> None:
try:
@@ -195,8 +195,8 @@ class MaisakaHeartFlowChatting:
self._new_message_event.clear()
if trigger_reason == "timeout":
# wait 超时后继续下一轮内部思考,但不要重复注入旧消息。
logger.info(f"{self.log_prefix} wait 超时后投递继续思考触发")
# 等待超时后继续下一轮内部思考,但不要重复注入旧消息。
logger.info(f"{self.log_prefix} 等待超时后投递继续思考触发信号")
await self._internal_turn_queue.put(None)
continue
@@ -207,7 +207,7 @@ class MaisakaHeartFlowChatting:
await self._internal_turn_queue.put(cached_messages)
asyncio.create_task(self._trigger_batch_learning(cached_messages))
except asyncio.CancelledError:
logger.info(f"{self.log_prefix} Maisaka runtime loop cancelled")
logger.info(f"{self.log_prefix} Maisaka 运行时主循环已取消")
def _has_pending_messages(self) -> bool:
return self._last_processed_index < len(self.message_cache)
@@ -230,8 +230,8 @@ class MaisakaHeartFlowChatting:
self._last_processed_index = len(self.message_cache)
logger.info(
f"{self.log_prefix} collected {len(unique_messages)} new messages "
f"from message_cache[{start_index}:{self._last_processed_index}]"
f"{self.log_prefix} 已从消息缓存区[{start_index}:{self._last_processed_index}] "
f"收集 {len(unique_messages)} 条新消息"
)
return unique_messages
@@ -247,7 +247,7 @@ class MaisakaHeartFlowChatting:
timeout = self._wait_until - time.time()
if timeout <= 0:
logger.info(f"{self.log_prefix} Maisaka wait timed out")
logger.info(f"{self.log_prefix} Maisaka 等待已超时")
self._agent_state = self._STATE_RUNNING
self._wait_until = None
return "timeout"
@@ -256,7 +256,7 @@ class MaisakaHeartFlowChatting:
await asyncio.wait_for(self._new_message_event.wait(), timeout=timeout)
return "message"
except asyncio.TimeoutError:
logger.info(f"{self.log_prefix} Maisaka wait timed out")
logger.info(f"{self.log_prefix} Maisaka 等待已超时")
self._agent_state = self._STATE_RUNNING
self._wait_until = None
return "timeout"
@@ -281,110 +281,110 @@ class MaisakaHeartFlowChatting:
return_exceptions=True,
)
if isinstance(expression_result, Exception):
logger.error(f"{self.log_prefix} expression learning task crashed: {expression_result}")
logger.error(f"{self.log_prefix} 表达学习任务异常退出: {expression_result}")
if isinstance(knowledge_result, Exception):
logger.error(f"{self.log_prefix} knowledge learning task crashed: {knowledge_result}")
logger.error(f"{self.log_prefix} 知识学习任务异常退出: {knowledge_result}")
async def _trigger_expression_learning(self, messages: list[SessionMessage]) -> None:
"""Trigger expression learning from the newly collected batch."""
self._expression_learner.add_messages(messages)
if not self._enable_expression_learning:
logger.debug(f"{self.log_prefix} expression learning disabled, skip this batch")
logger.debug(f"{self.log_prefix} 表达学习未启用,跳过当前批次")
return
elapsed = time.time() - self._last_expression_extraction_time
if elapsed < self._min_extraction_interval:
logger.debug(
f"{self.log_prefix} expression learning interval not reached: "
f"elapsed={elapsed:.2f}s threshold={self._min_extraction_interval}s"
f"{self.log_prefix} 表达学习尚未达到触发间隔: "
f"已过={elapsed:.2f} 秒 阈值={self._min_extraction_interval}"
)
return
cache_size = self._expression_learner.get_cache_size()
if cache_size < self._min_messages_for_extraction:
logger.debug(
f"{self.log_prefix} expression learning skipped due to cache size: "
f"learner_cache={cache_size} threshold={self._min_messages_for_extraction} "
f"message_cache_total={len(self.message_cache)}"
f"{self.log_prefix} 表达学习因缓存数量不足而跳过: "
f"学习器缓存={cache_size} 阈值={self._min_messages_for_extraction} "
f"消息总缓存={len(self.message_cache)}"
)
return
self._last_expression_extraction_time = time.time()
logger.info(
f"{self.log_prefix} starting expression learning: "
f"new_batch={len(messages)} learner_cache={cache_size} "
f"message_cache_total={len(self.message_cache)} "
f"enable_jargon_learning={self._enable_jargon_learning}"
f"{self.log_prefix} 开始表达学习: "
f"新批次消息数={len(messages)} 学习器缓存={cache_size} "
f"消息总缓存={len(self.message_cache)} "
f"启用黑话学习={self._enable_jargon_learning}"
)
try:
jargon_miner = self._jargon_miner if self._enable_jargon_learning else None
learnt_style = await self._expression_learner.learn(jargon_miner)
if learnt_style:
logger.info(f"{self.log_prefix} expression learning finished")
logger.info(f"{self.log_prefix} 表达学习已完成")
else:
logger.debug(f"{self.log_prefix} expression learning finished without usable result")
logger.debug(f"{self.log_prefix} 表达学习已完成,但没有可用结果")
except Exception:
logger.exception(f"{self.log_prefix} expression learning failed")
logger.exception(f"{self.log_prefix} 表达学习失败")
async def _trigger_knowledge_learning(self, messages: list[SessionMessage]) -> None:
"""Trigger knowledge learning from the newly collected batch."""
self._knowledge_learner.add_messages(messages)
if not global_config.maisaka.enable_knowledge_module:
logger.debug(f"{self.log_prefix} knowledge learning disabled, skip this batch")
logger.debug(f"{self.log_prefix} 知识学习未启用,跳过当前批次")
return
elapsed = time.time() - self._last_knowledge_extraction_time
if elapsed < self._min_extraction_interval:
logger.debug(
f"{self.log_prefix} knowledge learning interval not reached: "
f"elapsed={elapsed:.2f}s threshold={self._min_extraction_interval}s"
f"{self.log_prefix} 知识学习尚未达到触发间隔: "
f"已过={elapsed:.2f} 秒 阈值={self._min_extraction_interval}"
)
return
cache_size = self._knowledge_learner.get_cache_size()
if cache_size < self._min_messages_for_extraction:
logger.debug(
f"{self.log_prefix} knowledge learning skipped due to cache size: "
f"learner_cache={cache_size} threshold={self._min_messages_for_extraction} "
f"message_cache_total={len(self.message_cache)}"
f"{self.log_prefix} 知识学习因缓存数量不足而跳过: "
f"学习器缓存={cache_size} 阈值={self._min_messages_for_extraction} "
f"消息总缓存={len(self.message_cache)}"
)
return
self._last_knowledge_extraction_time = time.time()
logger.info(
f"{self.log_prefix} starting knowledge learning: "
f"new_batch={len(messages)} learner_cache={cache_size} "
f"message_cache_total={len(self.message_cache)}"
f"{self.log_prefix} 开始知识学习: "
f"新批次消息数={len(messages)} 学习器缓存={cache_size} "
f"消息总缓存={len(self.message_cache)}"
)
try:
added_count = await self._knowledge_learner.learn()
if added_count > 0:
logger.info(f"{self.log_prefix} knowledge learning finished: added={added_count}")
logger.info(f"{self.log_prefix} 知识学习已完成: 新增条目数={added_count}")
else:
logger.debug(f"{self.log_prefix} knowledge learning finished without usable result")
logger.debug(f"{self.log_prefix} 知识学习已完成,但没有可用结果")
except Exception:
logger.exception(f"{self.log_prefix} knowledge learning failed")
logger.exception(f"{self.log_prefix} 知识学习失败")
async def _init_mcp(self) -> None:
"""Initialize MCP tools and inject them into the planner."""
config_path = Path(__file__).resolve().parents[2] / "config" / "mcp_config.json"
self._mcp_manager = await MCPManager.from_config(str(config_path))
if self._mcp_manager is None:
logger.info(f"{self.log_prefix} MCP manager is unavailable")
logger.info(f"{self.log_prefix} MCP 管理器不可用")
return
mcp_tools = self._mcp_manager.get_openai_tools()
if not mcp_tools:
logger.info(f"{self.log_prefix} No MCP tools were exposed to Maisaka")
logger.info(f"{self.log_prefix} 没有可供 Maisaka 使用的 MCP 工具")
return
self._chat_loop_service.set_extra_tools(mcp_tools)
logger.info(
f"{self.log_prefix} Loaded {len(mcp_tools)} MCP tools into Maisaka:\n"
f"{self.log_prefix} 已向 Maisaka 加载 {len(mcp_tools)} MCP 工具:\n"
f"{self._mcp_manager.get_tool_summary()}"
)
@@ -392,10 +392,10 @@ class MaisakaHeartFlowChatting:
if self.chat_stream.user_id:
return UserInfo(
user_id=self.chat_stream.user_id,
user_nickname=global_config.maisaka.user_name.strip() or "User",
user_nickname=global_config.maisaka.user_name.strip() or "用户",
user_cardname=None,
)
return UserInfo(user_id="maisaka_user", user_nickname="user", user_cardname=None)
return UserInfo(user_id="maisaka_user", user_nickname="用户", user_cardname=None)
def _build_group_info(self, message: Optional[SessionMessage] = None) -> Optional[GroupInfo]:
group_info = None
@@ -411,23 +411,23 @@ class MaisakaHeartFlowChatting:
def _log_cycle_started(self, cycle_detail: CycleDetail, round_index: int) -> None:
logger.info(
f"{self.log_prefix} MaiSaka cycle={cycle_detail.cycle_id} "
f"round={round_index + 1}/{self._max_internal_rounds} "
f"context_size={len(self._chat_history)}"
f"{self.log_prefix} MaiSaka 轮次开始: 循环编号={cycle_detail.cycle_id} "
f"回合={round_index + 1}/{self._max_internal_rounds} "
f"上下文消息数={len(self._chat_history)}"
)
def _log_cycle_completed(self, cycle_detail: CycleDetail, timer_strings: list[str]) -> None:
logger.info(
f"{self.log_prefix} MaiSaka cycle={cycle_detail.cycle_id} completed "
f"in {cycle_detail.end_time - cycle_detail.start_time:.2f}s; "
f"stages={', '.join(timer_strings) if timer_strings else 'none'}"
f"{self.log_prefix} MaiSaka 轮次结束: 循环编号={cycle_detail.cycle_id} "
f"总耗时={cycle_detail.end_time - cycle_detail.start_time:.2f}; "
f"阶段耗时={', '.join(timer_strings) if timer_strings else ''}"
)
def _log_history_trimmed(self, removed_count: int, user_message_count: int) -> None:
logger.info(
f"{self.log_prefix} Trimmed {removed_count} history messages; "
f"remaining_user_messages={user_message_count}"
f"{self.log_prefix} 已裁剪 {removed_count} 条历史消息; "
f"剩余计入上下文的消息数={user_message_count}"
)
def _log_internal_loop_cancelled(self) -> None:
logger.info(f"{self.log_prefix} Maisaka internal loop cancelled")
logger.info(f"{self.log_prefix} Maisaka 内部循环已取消")

View File

@@ -102,7 +102,7 @@ async def handle_mcp_tool(tc: ToolCall, chat_history: list[LLMContextMessage], m
console.print(
Panel(
display_text,
title=f"MCP: {tc.func_name}",
title=f"MCP 工具:{tc.func_name}",
border_style="bright_green",
padding=(0, 1),
)