diff --git a/pytests/test_context_message_fallback.py b/pytests/test_context_message_fallback.py new file mode 100644 index 00000000..4f6c590f --- /dev/null +++ b/pytests/test_context_message_fallback.py @@ -0,0 +1,22 @@ +from src.common.data_models.message_component_data_model import ImageComponent, MessageSequence, TextComponent +from src.llm_models.payload_content.message import RoleType +from src.maisaka.context_messages import _build_message_from_sequence + + +def test_image_only_message_keeps_placeholder_in_text_fallback() -> None: + message_sequence = MessageSequence( + [ + TextComponent("[时间]19:21:20\n[用户名]William730\n[用户群昵称]\n[msg_id]1385025976\n[发言内容]"), + ImageComponent(binary_hash="hash", content=None, binary_data=None), + ] + ) + + message = _build_message_from_sequence( + RoleType.User, + message_sequence, + "[时间]19:21:20\n[用户名]William730\n[用户群昵称]\n[msg_id]1385025976\n[发言内容][图片]", + ) + + assert message is not None + assert "[发言内容]" in message.get_text_content() + assert "[图片]" in message.get_text_content() diff --git a/src/chat/replyer/maisaka_generator_base.py b/src/chat/replyer/maisaka_generator_base.py index 06833997..ae3ab645 100644 --- a/src/chat/replyer/maisaka_generator_base.py +++ b/src/chat/replyer/maisaka_generator_base.py @@ -116,28 +116,6 @@ class BaseMaisakaReplyGenerator: return self._normalize_content(body.strip()) return "" - @staticmethod - def _split_user_message_segments(raw_content: str) -> List[tuple[Optional[str], str]]: - segments: List[tuple[Optional[str], str]] = [] - current_speaker: Optional[str] = None - current_lines: List[str] = [] - - for raw_line in raw_content.splitlines(): - speaker_name, content_body = parse_speaker_content(raw_line) - if speaker_name is not None: - if current_lines: - segments.append((current_speaker, "\n".join(current_lines))) - current_speaker = speaker_name - current_lines = [content_body] - continue - - current_lines.append(raw_line) - - if current_lines: - segments.append((current_speaker, "\n".join(current_lines))) - - return segments - def _build_target_message_block(self, reply_message: Optional[SessionMessage]) -> str: if reply_message is None: return "" @@ -292,8 +270,6 @@ class BaseMaisakaReplyGenerator: chat_history: List[LLMContextMessage], enable_visual_message: bool, ) -> List[Message]: - bot_nickname = global_config.bot.nickname.strip() or "Bot" - default_user_name = global_config.maisaka.cli_user_name.strip() or "User" messages: List[Message] = [] for message in chat_history: @@ -313,20 +289,9 @@ class BaseMaisakaReplyGenerator: messages.append(visual_message) continue - for speaker_name, content_body in self._split_user_message_segments(message.processed_plain_text): - content = self._normalize_content(content_body) - if not content: - continue - - visible_speaker = speaker_name or default_user_name - if visible_speaker == bot_nickname: - messages.append( - MessageBuilder().set_role(RoleType.Assistant).add_text_content(content).build() - ) - continue - - user_content = f"[{visible_speaker}]{content}" - messages.append(MessageBuilder().set_role(RoleType.User).add_text_content(user_content).build()) + llm_message = message.to_llm_message() + if llm_message is not None: + messages.append(llm_message) continue if isinstance(message, AssistantMessage): @@ -529,8 +494,6 @@ class BaseMaisakaReplyGenerator: prompt_preview = PromptCLIVisualizer._build_prompt_dump_text(request_messages) return request_messages - result.completion.request_prompt = prompt_preview - result.request_messages = serialize_prompt_messages(request_messages) preview_chat_id = self._resolve_session_id(stream_id) replyer_prompt_section: RenderableType | None = None if show_replyer_prompt: diff --git a/src/llm_models/exceptions.py b/src/llm_models/exceptions.py index 25452e0b..12efc6fd 100644 --- a/src/llm_models/exceptions.py +++ b/src/llm_models/exceptions.py @@ -54,27 +54,30 @@ class RespNotOkException(Exception): return f"未知的异常响应代码:{self.status_code}" -class RespParseException(Exception): - """响应解析错误,常见于响应格式不正确或解析方法不匹配""" +class ResponseContextException(Exception): + """携带原始响应上下文的异常基类。""" - def __init__(self, ext_info: Any, message: str | None = None): + default_message: str = "请求失败" + + def __init__(self, ext_info: Any = None, message: str | None = None): super().__init__(message) self.ext_info = ext_info self.message = message def __str__(self): - return self.message or "解析响应内容时发生未知错误,请检查是否配置了正确的解析方法" + return self.message or self.default_message -class EmptyResponseException(Exception): +class RespParseException(ResponseContextException): + """响应解析错误,常见于响应格式不正确或解析方法不匹配""" + + default_message = "解析响应内容时发生未知错误,请检查是否配置了正确的解析方法" + + +class EmptyResponseException(ResponseContextException): """响应内容为空""" - def __init__(self, message: str = "响应内容为空,这可能是一个临时性问题"): - super().__init__(message) - self.message = message - - def __str__(self): - return self.message + default_message = "响应内容为空,这可能是一个临时性问题" class ModelAttemptFailed(Exception): diff --git a/src/llm_models/model_client/gemini_client.py b/src/llm_models/model_client/gemini_client.py index 42862c3e..1ef6cdd4 100644 --- a/src/llm_models/model_client/gemini_client.py +++ b/src/llm_models/model_client/gemini_client.py @@ -552,7 +552,7 @@ def _build_stream_api_response( _warn_if_max_tokens_truncated(last_response, response.content, response.tool_calls) if not response.content and not response.tool_calls and not response.reasoning_content: - raise EmptyResponseException() + raise EmptyResponseException(last_response) return response @@ -627,7 +627,7 @@ def _default_normal_response_parser( usage_record = _extract_usage_record(response) _warn_if_max_tokens_truncated(response, api_response.content, api_response.tool_calls) if not api_response.content and not api_response.tool_calls and not api_response.reasoning_content: - raise EmptyResponseException("响应中既无文本内容也无工具调用") + raise EmptyResponseException(response, "响应中既无文本内容也无工具调用") return api_response, usage_record diff --git a/src/llm_models/model_client/openai_client.py b/src/llm_models/model_client/openai_client.py index b4dc0f1e..ddd9f72c 100644 --- a/src/llm_models/model_client/openai_client.py +++ b/src/llm_models/model_client/openai_client.py @@ -587,7 +587,7 @@ def _build_api_status_message(error: APIStatusError) -> str: message_parts.append(str(error.message)) response_text = getattr(getattr(error, "response", None), "text", None) if response_text: - message_parts.append(str(response_text)[:300]) + message_parts.append(str(response_text)) if message_parts: return " | ".join(message_parts) return f"上游接口返回状态码 {error.status_code}" @@ -750,7 +750,7 @@ class _OpenAIStreamAccumulator: response.raw_data = {"model": self.model_name} if self.model_name else None if not response.content and not response.tool_calls: - raise EmptyResponseException() + raise EmptyResponseException(response.raw_data) return response @@ -834,7 +834,7 @@ def _default_normal_response_parser( """ choices = getattr(resp, "choices", None) if not choices: - raise EmptyResponseException("响应解析失败,choices 为空或缺失") + raise EmptyResponseException(resp, "响应解析失败,choices 为空或缺失") api_response = APIResponse() message_part = choices[0].message @@ -875,7 +875,7 @@ def _default_normal_response_parser( _log_length_truncation(finish_reason, getattr(resp, "model", None)) if not api_response.content and not api_response.tool_calls: - raise EmptyResponseException() + raise EmptyResponseException(resp) return api_response, usage_record diff --git a/src/llm_models/request_snapshot.py b/src/llm_models/request_snapshot.py index a5ca84d0..8c9113d4 100644 --- a/src/llm_models/request_snapshot.py +++ b/src/llm_models/request_snapshot.py @@ -58,6 +58,42 @@ def _json_friendly(value: Any) -> Any: return str(value) +def extract_error_response_body(error: Exception) -> Any | None: + """尽量从异常对象中提取上游返回体,便于排查模型请求失败。""" + candidate_errors = [error, getattr(error, "__cause__", None)] + + for candidate in candidate_errors: + if candidate is None: + continue + + response = getattr(candidate, "response", None) + if response is not None: + response_json = getattr(response, "json", None) + if callable(response_json): + try: + return _json_friendly(response_json()) + except Exception: + pass + + response_text = getattr(response, "text", None) + if response_text not in (None, ""): + return str(response_text) + + response_content = getattr(response, "content", None) + if response_content not in (None, b"", ""): + return _json_friendly(response_content) + + response_body = getattr(candidate, "body", None) + if response_body not in (None, "", b""): + return _json_friendly(response_body) + + ext_info = getattr(candidate, "ext_info", None) + if ext_info is not None: + return _json_friendly(ext_info) + + return None + + def _sanitize_filename_component(value: str) -> str: """将任意字符串转换为适合文件名使用的片段。""" normalized_value = FILENAME_SAFE_PATTERN.sub("-", value.strip()) @@ -388,6 +424,10 @@ def save_failed_request_snapshot( "snapshot_version": SNAPSHOT_VERSION, } + response_body = extract_error_response_body(error) + if response_body is not None: + snapshot_payload["error"]["response_body"] = response_body + snapshot_payload["replay"] = { "command": build_replay_command(snapshot_path), "file_uri": snapshot_path.as_uri(), diff --git a/src/maisaka/builtin_tool/send_emoji.py b/src/maisaka/builtin_tool/send_emoji.py index 5cd73736..27353ec3 100644 --- a/src/maisaka/builtin_tool/send_emoji.py +++ b/src/maisaka/builtin_tool/send_emoji.py @@ -244,34 +244,8 @@ def _build_emoji_candidate_summary(emojis: list[MaiEmoji]) -> str: return "\n".join(summary_lines).strip() -def _build_send_emoji_prompt_preview( - *, - system_prompt: str, - requested_emotion: str, - grid_rows: int, - grid_columns: int, - sampled_emojis: list[MaiEmoji], -) -> str: - """构建表情选择子代理的文本预览。""" - - task_text = ( - "[选择任务]\n" - f"requested_emotion: {requested_emotion or '未指定'}\n" - f"候选总数: {len(sampled_emojis)}\n" - f"拼图布局: {grid_rows}x{grid_columns}\n" - "请只输出 JSON。" - ) - candidate_summary = _build_emoji_candidate_summary(sampled_emojis) - return ( - f"[System Prompt]\n{system_prompt}\n\n" - f"{task_text}\n\n" - f"[候选表情摘要]\n{candidate_summary or '无候选表情'}" - ).strip() - - def _build_send_emoji_monitor_detail( *, - prompt_text: str = "", request_messages: Optional[list[dict[str, Any]]] = None, reasoning_text: str = "", output_text: str = "", @@ -281,8 +255,6 @@ def _build_send_emoji_monitor_detail( """构建 emotion tool 统一监控详情。""" detail: Dict[str, Any] = {} - if prompt_text.strip(): - detail["prompt_text"] = prompt_text.strip() if isinstance(request_messages, list) and request_messages: detail["request_messages"] = request_messages if reasoning_text.strip(): @@ -392,13 +364,6 @@ async def _select_emoji_with_sub_agent( remaining_uses_value=1, display_prefix="[表情包选择任务]", ) - prompt_preview = _build_send_emoji_prompt_preview( - system_prompt=system_prompt, - requested_emotion=requested_emotion, - grid_rows=grid_rows, - grid_columns=grid_columns, - sampled_emojis=sampled_emojis, - ) request_messages = [ MessageBuilder().set_role(RoleType.System).add_text_content(system_prompt).build(), ] @@ -436,7 +401,6 @@ async def _select_emoji_with_sub_agent( logger.warning(f"{tool_ctx.runtime.log_prefix} 表情包子代理结果解析失败,将回退到候选首项: {exc}") if selection_metadata is not None: selection_metadata["monitor_detail"] = _build_send_emoji_monitor_detail( - prompt_text=prompt_preview, request_messages=serialized_request_messages, output_text=response.content or "", metrics=selection_metrics, @@ -451,7 +415,6 @@ async def _select_emoji_with_sub_agent( if selection_metadata is not None: selection_metadata["reason"] = selection.reason.strip() selection_metadata["monitor_detail"] = _build_send_emoji_monitor_detail( - prompt_text=prompt_preview, request_messages=serialized_request_messages, reasoning_text=selection.reason, output_text=response.content or "", diff --git a/src/maisaka/context_messages.py b/src/maisaka/context_messages.py index 8d85d237..c96e9993 100644 --- a/src/maisaka/context_messages.py +++ b/src/maisaka/context_messages.py @@ -51,7 +51,9 @@ def _append_emoji_component(builder: MessageBuilder, component: EmojiComponent) if component.content: builder.add_text_content(component.content) return True - return False + + builder.add_text_content("[表情包]") + return True def _append_image_component(builder: MessageBuilder, component: ImageComponent) -> bool: @@ -65,7 +67,9 @@ def _append_image_component(builder: MessageBuilder, component: ImageComponent) if component.content: builder.add_text_content(component.content) return True - return False + + builder.add_text_content("[图片]") + return True def _append_reply_component(builder: MessageBuilder, component: ReplyComponent) -> bool: