fix:解决 upstream 合并冲突

在临时整合分支中合并 upstream/r-dev
保留人物事实写回与反馈纠错配置
移除已下线的聊天总结配置并同步测试
This commit is contained in:
A-Dawn
2026-04-16 10:27:25 +08:00
31 changed files with 349 additions and 5128 deletions

View File

@@ -4,7 +4,7 @@ from __future__ import annotations
from base64 import b64decode
from datetime import datetime
from typing import Any, Dict, List, Optional, TYPE_CHECKING
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from src.chat.utils.utils import process_llm_response
from src.common.data_models.message_component_data_model import EmojiComponent, MessageSequence, TextComponent
@@ -12,13 +12,10 @@ from src.config.config import global_config
from src.core.tooling import ToolExecutionResult
from ..context_messages import SessionBackedMessage
from ..history_utils import build_prefixed_message_sequence, build_session_message_visible_text
from ..message_adapter import format_speaker_content
from ..planner_message_utils import build_planner_prefix, build_session_backed_text_message
if TYPE_CHECKING:
from src.chat.message_receive.message import SessionMessage
from ..reasoning_engine import MaisakaReasoningEngine
from ..runtime import MaisakaHeartFlowChatting
@@ -139,37 +136,6 @@ class BuiltinToolRuntimeContext:
return self.engine._get_runtime_manager()
@staticmethod
def _build_visible_text_from_sent_message(message: "SessionMessage") -> str:
"""将已发送消息转换为 Maisaka 可见文本。"""
return build_session_message_visible_text(message)
def append_sent_message_to_chat_history(
self,
message: "SessionMessage",
*,
source_kind: str = "guided_reply",
) -> None:
"""将真实已发送消息同步到 Maisaka 历史。"""
user_info = message.message_info.user_info
speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id
planner_prefix = build_planner_prefix(
timestamp=message.timestamp,
user_name=speaker_name,
group_card=user_info.user_cardname or "",
message_id=message.message_id,
include_message_id=not message.is_notify and bool(message.message_id),
)
history_message = SessionBackedMessage.from_session_message(
message,
raw_message=build_prefixed_message_sequence(message.raw_message, planner_prefix),
visible_text=self._build_visible_text_from_sent_message(message),
source_kind=source_kind,
)
self.runtime._chat_history.append(history_message)
def append_guided_reply_to_chat_history(self, reply_text: str) -> None:
"""将引导回复写回 Maisaka 历史。"""

View File

@@ -36,7 +36,8 @@ def get_tool_spec() -> ToolSpec:
detailed_description=(
"参数说明:\n"
"- msg_idstring必填。要回复的目标用户消息编号。\n"
"- set_quoteboolean可选。以引用回复的方式发送默认 true。"
"- set_quoteboolean可选。以引用回复的方式发送默认 true。\n"
"- reference_infostring可选。上文中有助于回复的所有参考信息使用平文本格式。"
),
parameters_schema={
"type": "object",
@@ -50,6 +51,11 @@ def get_tool_spec() -> ToolSpec:
"description": "以引用回复的方式发送这条回复,不用每句都引用。",
"default": True,
},
"reference_info": {
"type": "string",
"description": "有助于回复的信息,之前搜集得到的事实性信息,记忆等,使用平文本格式。",
"default": True,
},
},
"required": ["msg_id"],
},
@@ -75,6 +81,7 @@ async def handle_tool(
"""执行 reply 内置工具。"""
latest_thought = context.reasoning if context is not None else invocation.reasoning
reference_info = str(invocation.arguments.get("reference_info") or "").strip()
target_message_id = str(invocation.arguments.get("msg_id") or "").strip()
set_quote = bool(invocation.arguments.get("set_quote", True))
@@ -117,6 +124,7 @@ async def handle_tool(
try:
success, reply_result = await replyer.generate_reply_with_context(
reply_reason=latest_thought,
reference_info=reference_info,
stream_id=tool_ctx.runtime.session_id,
reply_message=target_message,
chat_history=tool_ctx.runtime._chat_history,
@@ -152,7 +160,6 @@ async def handle_tool(
combined_reply_text = "".join(reply_segments)
try:
sent = False
sent_messages = []
if tool_ctx.runtime.chat_stream.platform == CLI_PLATFORM_NAME:
for segment in reply_segments:
render_cli_message(segment)
@@ -166,11 +173,12 @@ async def handle_tool(
reply_message=target_message if set_quote and index == 0 else None,
selected_expressions=reply_result.selected_expression_ids or None,
typing=index > 0,
sync_to_maisaka_history=True,
maisaka_source_kind="guided_reply",
)
sent = sent_message is not None
if not sent:
break
sent_messages.append(sent_message)
except Exception:
logger.exception(
f"{tool_ctx.runtime.log_prefix} 发送文字消息时发生异常,目标消息编号={target_message_id}"
@@ -198,9 +206,6 @@ async def handle_tool(
if tool_ctx.runtime.chat_stream.platform == CLI_PLATFORM_NAME:
tool_ctx.append_guided_reply_to_chat_history(combined_reply_text)
else:
for sent_message in sent_messages:
tool_ctx.append_sent_message_to_chat_history(sent_message)
tool_ctx.runtime._record_reply_sent()
return tool_ctx.build_success_result(
invocation.tool_name,

View File

@@ -53,40 +53,16 @@ def get_tool_spec() -> ToolSpec:
return ToolSpec(
name="send_emoji",
brief_description="发送一个合适的表情包来辅助表达情绪。",
detailed_description="参数说明:\n- emotionstring可选。希望表达的情绪例如 happy、sad、angry 等",
detailed_description="无需参数,直接发送一个合适的表情包",
parameters_schema={
"type": "object",
"properties": {
"emotion": {
"type": "string",
"description": "希望表达的情绪,例如 happy、sad、angry 等。",
},
},
"properties": {},
},
provider_name="maisaka_builtin",
provider_type="builtin",
)
def _normalize_candidate_emotions(emoji: MaiEmoji) -> list[str]:
"""清洗候选表情上的情绪标签。"""
raw_emotions = getattr(emoji, "emotion", None)
if isinstance(raw_emotions, list) and raw_emotions:
return [str(item).strip() for item in raw_emotions if str(item).strip()]
description = str(getattr(emoji, "description", "") or "").strip()
if not description:
return []
normalized_description = (
description.replace("", ",")
.replace("", ",")
.replace("", ",")
)
return [item.strip() for item in normalized_description.split(",") if item.strip()]
async def _load_emoji_bytes(emoji: MaiEmoji) -> bytes:
"""读取单个表情包图片字节。"""
@@ -232,18 +208,6 @@ async def _build_emoji_candidate_message(emojis: list[MaiEmoji]) -> SessionBacke
)
def _build_emoji_candidate_summary(emojis: list[MaiEmoji]) -> str:
"""构建供监控展示使用的候选表情摘要。"""
summary_lines: list[str] = []
for index, emoji in enumerate(emojis, start=1):
description = emoji.description.strip() or "(无描述)"
emotions = "".join(_normalize_candidate_emotions(emoji)) or ""
summary_lines.append(f"{index}. 描述:{description}")
summary_lines.append(f" 情绪:{emotions}")
return "\n".join(summary_lines).strip()
def _build_send_emoji_monitor_detail(
*,
request_messages: Optional[list[dict[str, Any]]] = None,
@@ -252,7 +216,7 @@ def _build_send_emoji_monitor_detail(
metrics: Optional[Dict[str, Any]] = None,
extra_sections: Optional[list[dict[str, str]]] = None,
) -> Dict[str, Any]:
"""构建 emotion tool 统一监控详情。"""
"""构建 send_emoji 工具统一监控详情。"""
detail: Dict[str, Any] = {}
if isinstance(request_messages, list) and request_messages:
@@ -281,7 +245,6 @@ def _build_send_emoji_monitor_detail(
def _build_send_emoji_monitor_metadata(
selection_metadata: Dict[str, Any],
*,
requested_emotion: str,
send_result: Optional[Any] = None,
error_message: str = "",
) -> Dict[str, Any]:
@@ -293,7 +256,6 @@ def _build_send_emoji_monitor_metadata(
if send_result is not None:
result_lines = [
f"请求情绪:{requested_emotion or '未指定'}",
f"命中情绪:{send_result.matched_emotion or '未命中'}",
f"表情描述:{send_result.description or '无描述'}",
f"情绪标签:{''.join(send_result.emotions) if send_result.emotions else ''}",
@@ -306,10 +268,7 @@ def _build_send_emoji_monitor_metadata(
elif error_message.strip():
extra_sections.append({
"title": "表情发送结果",
"content": (
f"请求情绪:{requested_emotion or '未指定'}\n"
f"发送结果:{error_message.strip()}"
),
"content": f"发送结果:{error_message.strip()}",
})
if extra_sections:
@@ -322,7 +281,6 @@ def _build_send_emoji_monitor_metadata(
async def _select_emoji_with_sub_agent(
tool_ctx: BuiltinToolRuntimeContext,
requested_emotion: str,
reasoning: str,
context_texts: list[str],
sample_size: int,
@@ -347,14 +305,12 @@ async def _select_emoji_with_sub_agent(
f"一共 {len(sampled_emojis)} 个位置。\n"
f"每张小图左上角都有一个较大的序号,范围是 1 到 {len(sampled_emojis)}\n"
f"你的任务是根据上下文和当前语气,从这 {len(sampled_emojis)} 张图里选出最合适的一张表情包。\n"
"如果提供了 requested_emotion请优先考虑与其接近的候选如果没有完全匹配则选择最符合上下文语气的候选。\n"
"你必须返回一个 JSON 对象json object不要输出任何 JSON 之外的内容。\n"
'返回格式固定为:{"emoji_index":1,"reason":"简短理由"}'
)
prompt_message = ReferenceMessage(
content=(
f"[选择任务]\n"
f"requested_emotion: {requested_emotion or '未指定'}\n"
f"候选总数: {len(sampled_emojis)}\n"
f"拼图布局: {grid_rows}x{grid_columns}\n"
"请只输出 JSON。"
@@ -439,7 +395,6 @@ async def handle_tool(
"""执行 send_emoji 内置工具。"""
del context
emotion = str(invocation.arguments.get("emotion") or "").strip()
context_texts = [
message.processed_plain_text.strip()
for message in tool_ctx.runtime._chat_history[-5:]
@@ -450,23 +405,20 @@ async def handle_tool(
"message": "",
"description": "",
"emotion": [],
"requested_emotion": emotion,
"matched_emotion": "",
"reason": "",
}
selection_metadata: Dict[str, Any] = {"reason": "", "monitor_detail": {}}
logger.info(f"{tool_ctx.runtime.log_prefix} 触发表情包发送工具,请求情绪={emotion!r}")
logger.info(f"{tool_ctx.runtime.log_prefix} 触发表情包发送工具")
try:
send_result = await send_emoji_for_maisaka(
stream_id=tool_ctx.runtime.session_id,
requested_emotion=emotion,
reasoning=tool_ctx.engine.last_reasoning_content,
context_texts=context_texts,
emoji_selector=lambda requested_emotion, reasoning, context_texts, sample_size: _select_emoji_with_sub_agent(
emoji_selector=lambda _requested_emotion, reasoning, context_texts, sample_size: _select_emoji_with_sub_agent(
tool_ctx,
requested_emotion,
reasoning,
list(context_texts or []),
sample_size,
@@ -482,7 +434,6 @@ async def handle_tool(
structured_content=structured_result,
metadata=_build_send_emoji_monitor_metadata(
selection_metadata,
requested_emotion=emotion,
error_message=structured_result["message"],
),
)
@@ -493,11 +444,9 @@ async def handle_tool(
logger.info(
f"{tool_ctx.runtime.log_prefix} 表情包发送成功 "
f"描述={send_result.description!r} 情绪标签={send_result.emotions} "
f"请求情绪={emotion!r} 命中情绪={send_result.matched_emotion!r}"
f"命中情绪={send_result.matched_emotion!r}"
)
if send_result.sent_message is not None:
tool_ctx.append_sent_message_to_chat_history(send_result.sent_message)
else:
if send_result.sent_message is None:
tool_ctx.append_sent_emoji_to_chat_history(
emoji_base64=send_result.emoji_base64,
success_message=_EMOJI_SUCCESS_MESSAGE,
@@ -509,7 +458,6 @@ async def handle_tool(
structured_content=structured_result,
metadata=_build_send_emoji_monitor_metadata(
selection_metadata,
requested_emotion=emotion,
send_result=send_result,
),
)
@@ -521,7 +469,7 @@ async def handle_tool(
logger.warning(
f"{tool_ctx.runtime.log_prefix} 表情包发送失败 "
f"请求情绪={emotion!r} 错误信息={send_result.message}"
f"错误信息={send_result.message}"
)
return tool_ctx.build_failure_result(
invocation.tool_name,
@@ -529,7 +477,6 @@ async def handle_tool(
structured_content=structured_result,
metadata=_build_send_emoji_monitor_metadata(
selection_metadata,
requested_emotion=emotion,
send_result=send_result,
),
)

View File

@@ -30,9 +30,15 @@ from src.plugin_runtime.host.hook_spec_registry import HookSpec, HookSpecRegistr
from src.services.llm_service import LLMServiceClient
from .builtin_tool import get_builtin_tools
from .context_messages import AssistantMessage, LLMContextMessage, ToolResultMessage
from .context_messages import (
AssistantMessage,
LLMContextMessage,
ToolResultMessage,
build_llm_message_from_context,
)
from .history_utils import drop_orphan_tool_results
from .display.prompt_cli_renderer import PromptCLIVisualizer
from .visual_mode_utils import resolve_enable_visual_planner
TIMING_GATE_TOOL_NAMES = {"continue", "no_reply", "wait"}
@@ -395,6 +401,7 @@ class MaisakaChatLoopService:
self,
selected_history: List[LLMContextMessage],
*,
enable_visual_message: bool,
injected_user_messages: Sequence[str] | None = None,
system_prompt: Optional[str] = None,
) -> List[Message]:
@@ -413,7 +420,10 @@ class MaisakaChatLoopService:
messages.append(system_msg.build())
for msg in selected_history:
llm_message = msg.to_llm_message()
llm_message = build_llm_message_from_context(
msg,
enable_visual_message=enable_visual_message,
)
if llm_message is not None:
messages.append(llm_message)
@@ -475,12 +485,15 @@ class MaisakaChatLoopService:
if not self._prompts_loaded:
await self.ensure_chat_prompt_loaded()
enable_visual_message = self._resolve_enable_visual_message(request_kind)
selected_history, selection_reason = self.select_llm_context_messages(
chat_history,
request_kind=request_kind,
enable_visual_message=enable_visual_message,
)
built_messages = self._build_request_messages(
selected_history,
enable_visual_message=enable_visual_message,
injected_user_messages=injected_user_messages,
)
@@ -528,14 +541,12 @@ class MaisakaChatLoopService:
prompt_section: RenderableType | None = None
if global_config.debug.show_maisaka_thinking:
image_display_mode: str = "path_link" if global_config.maisaka.show_image_path else "legacy"
prompt_section = PromptCLIVisualizer.build_prompt_section(
built_messages,
category="planner" if request_kind != "timing_gate" else "timing_gate",
chat_id=self._session_id,
request_kind=request_kind,
selection_reason=selection_reason,
image_display_mode=image_display_mode,
folded=global_config.debug.fold_maisaka_thinking,
tool_definitions=list(all_tools),
)
@@ -604,6 +615,7 @@ class MaisakaChatLoopService:
def select_llm_context_messages(
chat_history: List[LLMContextMessage],
*,
enable_visual_message: Optional[bool] = None,
request_kind: str = "planner",
max_context_size: Optional[int] = None,
) -> tuple[List[LLMContextMessage], str]:
@@ -617,9 +629,21 @@ class MaisakaChatLoopService:
selected_indices: List[int] = []
counted_message_count = 0
active_enable_visual_message = (
enable_visual_message
if enable_visual_message is not None
else MaisakaChatLoopService._resolve_enable_visual_message(request_kind)
)
for index in range(len(filtered_history) - 1, -1, -1):
message = filtered_history[index]
if message.to_llm_message() is None:
if (
build_llm_message_from_context(
message,
enable_visual_message=active_enable_visual_message,
)
is None
):
continue
selected_indices.append(index)
@@ -629,18 +653,18 @@ class MaisakaChatLoopService:
break
if not selected_indices:
return [], f"没有选择到上下文消息,实际发送 {effective_context_size} 条 user/assistant 消息"
return [], "实际发送 0 条消息tool 0 条,普通消息 0 条)"
selected_indices.reverse()
selected_history = [filtered_history[index] for index in selected_indices]
selected_history, hidden_assistant_count = MaisakaChatLoopService._hide_early_assistant_messages(selected_history)
selected_history, _ = MaisakaChatLoopService._hide_early_assistant_messages(selected_history)
selected_history, _ = drop_orphan_tool_results(selected_history)
tool_message_count = sum(1 for message in selected_history if isinstance(message, ToolResultMessage))
normal_message_count = len(selected_history) - tool_message_count
selection_reason = (
f"上下文裁剪:最近 {effective_context_size} 条 user/assistant 消息"
f"实际发送 {len(selected_history)}"
f"实际发送 {len(selected_history)}消息"
f"|消息 {normal_message_count} 条|tool {tool_message_count}"
)
if hidden_assistant_count > 0:
selection_reason += f",已隐藏最早 {hidden_assistant_count} 条 assistant 消息"
return (
selected_history,
selection_reason,
@@ -685,6 +709,12 @@ class MaisakaChatLoopService:
return filtered_history
@staticmethod
def _resolve_enable_visual_message(request_kind: str) -> bool:
if request_kind in {"planner", "timing_gate"}:
return resolve_enable_visual_planner()
return True
@staticmethod
def _hide_early_assistant_messages(
selected_history: List[LLMContextMessage],

View File

@@ -40,10 +40,15 @@ def _guess_image_format(image_bytes: bytes) -> Optional[str]:
return None
def _append_emoji_component(builder: MessageBuilder, component: EmojiComponent) -> bool:
def _append_emoji_component(
builder: MessageBuilder,
component: EmojiComponent,
*,
enable_visual_message: bool,
) -> bool:
"""将表情组件追加到 LLM 消息构建器。"""
image_format = _guess_image_format(component.binary_data)
if image_format and component.binary_data:
if enable_visual_message and image_format and component.binary_data:
builder.add_text_content("[消息类型]表情包")
builder.add_image_content(image_format, base64.b64encode(component.binary_data).decode("utf-8"))
return True
@@ -56,10 +61,15 @@ def _append_emoji_component(builder: MessageBuilder, component: EmojiComponent)
return True
def _append_image_component(builder: MessageBuilder, component: ImageComponent) -> bool:
def _append_image_component(
builder: MessageBuilder,
component: ImageComponent,
*,
enable_visual_message: bool,
) -> bool:
"""将图片组件追加到 LLM 消息构建器。"""
image_format = _guess_image_format(component.binary_data)
if image_format and component.binary_data:
if enable_visual_message and image_format and component.binary_data:
builder.add_text_content("[消息类型]图片")
builder.add_image_content(image_format, base64.b64encode(component.binary_data).decode("utf-8"))
return True
@@ -216,6 +226,7 @@ def _build_message_from_sequence(
message_sequence: MessageSequence,
fallback_text: str,
*,
enable_visual_message: bool = True,
tool_call_id: Optional[str] = None,
tool_name: Optional[str] = None,
tool_calls: Optional[list[ToolCall]] = None,
@@ -238,11 +249,25 @@ def _build_message_from_sequence(
continue
if isinstance(component, EmojiComponent):
has_content = _append_emoji_component(builder, component) or has_content
has_content = (
_append_emoji_component(
builder,
component,
enable_visual_message=enable_visual_message,
)
or has_content
)
continue
if isinstance(component, ImageComponent):
has_content = _append_image_component(builder, component) or has_content
has_content = (
_append_image_component(
builder,
component,
enable_visual_message=enable_visual_message,
)
or has_content
)
continue
if isinstance(component, AtComponent):
@@ -297,7 +322,7 @@ class LLMContextMessage(ABC):
return self.__class__.__name__
@abstractmethod
def to_llm_message(self) -> Optional[Message]:
def to_llm_message(self, enable_visual_message: bool = True) -> Optional[Message]:
"""转换为统一 LLM 消息。"""
def consume_once(self) -> bool:
@@ -328,11 +353,12 @@ class SessionBackedMessage(LLMContextMessage):
def source(self) -> str:
return self.source_kind
def to_llm_message(self) -> Optional[Message]:
def to_llm_message(self, enable_visual_message: bool = True) -> Optional[Message]:
return _build_message_from_sequence(
RoleType.User,
self.raw_message,
self.processed_plain_text,
enable_visual_message=enable_visual_message,
)
@classmethod
@@ -366,7 +392,8 @@ class ComplexSessionMessage(SessionBackedMessage):
def source(self) -> str:
return f"{self.source_kind}:{self.complex_message_type}"
def to_llm_message(self) -> Optional[Message]:
def to_llm_message(self, enable_visual_message: bool = True) -> Optional[Message]:
del enable_visual_message
message_sequence = MessageSequence([TextComponent(self.prompt_text)])
return _build_message_from_sequence(
RoleType.User,
@@ -426,7 +453,8 @@ class ReferenceMessage(LLMContextMessage):
def source(self) -> str:
return self.reference_type.value
def to_llm_message(self) -> Optional[Message]:
def to_llm_message(self, enable_visual_message: bool = True) -> Optional[Message]:
del enable_visual_message
message_sequence = MessageSequence([TextComponent(self.processed_plain_text)])
return _build_message_from_sequence(RoleType.User, message_sequence, self.processed_plain_text)
@@ -463,7 +491,8 @@ class AssistantMessage(LLMContextMessage):
def source(self) -> str:
return self.source_kind
def to_llm_message(self) -> Optional[Message]:
def to_llm_message(self, enable_visual_message: bool = True) -> Optional[Message]:
del enable_visual_message
message_sequence = MessageSequence([])
if self.content:
message_sequence.text(self.content)
@@ -501,7 +530,8 @@ class ToolResultMessage(LLMContextMessage):
def source(self) -> str:
return self.tool_name or "tool"
def to_llm_message(self) -> Optional[Message]:
def to_llm_message(self, enable_visual_message: bool = True) -> Optional[Message]:
del enable_visual_message
message_sequence = MessageSequence([TextComponent(self.content)])
return _build_message_from_sequence(
RoleType.Tool,
@@ -510,3 +540,13 @@ class ToolResultMessage(LLMContextMessage):
tool_call_id=self.tool_call_id,
tool_name=self.tool_name,
)
def build_llm_message_from_context(
context_message: LLMContextMessage,
*,
enable_visual_message: bool = True,
) -> Optional[Message]:
"""将 Maisaka 内部上下文消息转换为发给 LLM 的统一消息。"""
return context_message.to_llm_message(enable_visual_message=enable_visual_message)

View File

@@ -799,7 +799,7 @@ class PromptCLIVisualizer:
chat_id: str,
request_kind: str,
selection_reason: str,
image_display_mode: Literal["legacy", "path_link"],
image_display_mode: Literal["legacy", "path_link"] = "path_link",
tool_definitions: list[dict[str, Any]] | None = None,
) -> RenderableType:
"""构建用于查看完整 prompt 的折叠入口内容。"""
@@ -864,7 +864,7 @@ class PromptCLIVisualizer:
chat_id: str,
request_kind: str,
selection_reason: str,
image_display_mode: Literal["legacy", "path_link"],
image_display_mode: Literal["legacy", "path_link"] = "path_link",
folded: bool,
tool_definitions: list[dict[str, Any]] | None = None,
) -> Panel:
@@ -878,14 +878,10 @@ class PromptCLIVisualizer:
chat_id=chat_id,
request_kind=request_kind,
selection_reason=selection_reason,
image_display_mode=image_display_mode,
tool_definitions=tool_definitions,
)
else:
ordered_panels = cls.build_prompt_panels(
messages,
image_display_mode=image_display_mode,
)
ordered_panels = cls.build_prompt_panels(messages)
prompt_renderable = Group(*ordered_panels)
return Panel(
@@ -1102,11 +1098,9 @@ class PromptCLIVisualizer:
cls,
messages: list[Any],
*,
image_display_mode: Literal["legacy", "path_link"],
image_display_mode: Literal["legacy", "path_link"] = "path_link",
) -> List[Panel]:
"""构建完整 prompt 可视化面板。"""
if image_display_mode not in {mode.value for mode in PromptImageDisplayMode}:
image_display_mode = PromptImageDisplayMode.LEGACY
settings = PromptImageDisplaySettings(
display_mode=PromptImageDisplayMode(image_display_mode),
)

View File

@@ -14,7 +14,7 @@ from src.chat.message_receive.message import SessionMessage
from src.common.data_models.message_component_data_model import EmojiComponent, ImageComponent, MessageSequence
from src.common.logger import get_logger
from src.common.prompt_i18n import load_prompt
from src.config.config import config_manager, global_config
from src.config.config import global_config
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from src.llm_models.exceptions import ReqAbortException
from src.llm_models.payload_content.tool_option import ToolCall
@@ -44,6 +44,7 @@ from .monitor_events import (
emit_timing_gate_result,
)
from .planner_message_utils import build_planner_user_prefix_from_session_message
from .visual_mode_utils import resolve_enable_visual_planner
if TYPE_CHECKING:
from .runtime import MaisakaHeartFlowChatting
@@ -739,47 +740,10 @@ class MaisakaReasoningEngine:
planner_prefix: str,
) -> MessageSequence:
message_sequence = build_prefixed_message_sequence(message.raw_message, planner_prefix)
if self._resolve_enable_visual_planner():
if resolve_enable_visual_planner():
await self._hydrate_visual_components(message_sequence.components)
return message_sequence
@staticmethod
def _resolve_enable_visual_planner() -> bool:
planner_mode = global_config.visual.planner_mode
planner_task_config = config_manager.get_model_config().model_task_config.planner
models_by_name = {model.name: model for model in config_manager.get_model_config().models}
if planner_mode == "text":
return False
planner_models: list[str] = list(planner_task_config.model_list)
missing_models = [model_name for model_name in planner_models if model_name not in models_by_name]
non_visual_models = [
model_name for model_name in planner_models if model_name in models_by_name and not models_by_name[model_name].visual
]
if planner_mode == "multimodal":
if missing_models:
raise ValueError(
"planner_mode=multimodal但 planner 任务存在未定义的模型:"
f"{', '.join(missing_models)}"
)
if non_visual_models:
raise ValueError(
"planner_mode=multimodal但 planner 任务存在未开启 visual 的模型:"
f"{', '.join(non_visual_models)}"
)
return True
if missing_models:
logger.warning(
"planner_mode=auto 时发现 planner 任务存在未定义模型:"
f"{', '.join(missing_models)},将退化为纯文本 planner"
)
return False
return bool(planner_models) and not non_visual_models
async def _hydrate_visual_components(self, planner_components: list[object]) -> None:
"""在 Maisaka 真正需要图片或表情时,按需回填二进制数据。"""
load_tasks: list[asyncio.Task[None]] = []

View File

@@ -183,6 +183,43 @@ class MaisakaHeartFlowChatting:
self._talk_frequency_adjust = max(0.01, float(frequency))
self._schedule_message_turn()
def append_sent_message_to_chat_history(
self,
message: SessionMessage,
*,
source_kind: str = "guided_reply",
) -> bool:
"""将一条已发送成功的消息同步到 Maisaka 内部历史。"""
try:
from .context_messages import SessionBackedMessage
from .history_utils import build_prefixed_message_sequence, build_session_message_visible_text
from .planner_message_utils import build_planner_prefix
user_info = message.message_info.user_info
speaker_name = user_info.user_cardname or user_info.user_nickname or user_info.user_id
planner_prefix = build_planner_prefix(
timestamp=message.timestamp,
user_name=speaker_name,
group_card=user_info.user_cardname or "",
message_id=message.message_id,
include_message_id=not message.is_notify and bool(message.message_id),
)
history_message = SessionBackedMessage.from_session_message(
message,
raw_message=build_prefixed_message_sequence(message.raw_message, planner_prefix),
visible_text=build_session_message_visible_text(message),
source_kind=source_kind,
)
self._chat_history.append(history_message)
return True
except Exception as exc:
logger.warning(
f"{self.log_prefix} 同步已发送消息到 Maisaka 历史失败: "
f"message_id={message.message_id} error={exc}"
)
return False
async def register_message(self, message: SessionMessage) -> None:
"""缓存一条新消息并唤醒主循环。"""
if self._running:
@@ -1151,7 +1188,6 @@ class MaisakaHeartFlowChatting:
chat_id=self.session_id,
request_kind=labels["request_kind"],
selection_reason=subtitle,
image_display_mode="path_link" if global_config.maisaka.show_image_path else "legacy",
),
title=labels["prompt_title"],
border_style=border_style,

View File

@@ -0,0 +1,43 @@
from src.common.logger import get_logger
from src.config.config import config_manager, global_config
logger = get_logger("maisaka_visual_mode")
def resolve_enable_visual_planner() -> bool:
"""根据 planner 配置解析当前是否应启用视觉消息。"""
planner_mode = global_config.visual.planner_mode
planner_task_config = config_manager.get_model_config().model_task_config.planner
models_by_name = {model.name: model for model in config_manager.get_model_config().models}
if planner_mode == "text":
return False
planner_models: list[str] = list(planner_task_config.model_list)
missing_models = [model_name for model_name in planner_models if model_name not in models_by_name]
non_visual_models = [
model_name for model_name in planner_models if model_name in models_by_name and not models_by_name[model_name].visual
]
if planner_mode == "multimodal":
if missing_models:
raise ValueError(
"planner_mode=multimodal但 planner 任务存在未定义的模型:"
f"{', '.join(missing_models)}"
)
if non_visual_models:
raise ValueError(
"planner_mode=multimodal但 planner 任务存在未开启 visual 的模型:"
f"{', '.join(non_visual_models)}"
)
return True
if missing_models:
logger.warning(
"planner_mode=auto 时发现 planner 任务存在未定义模型:"
f"{', '.join(missing_models)},将退化为纯文本 planner"
)
return False
return bool(planner_models) and not non_visual_models