diff --git a/pytests/config_test/test_model_info_normalization.py b/pytests/config_test/test_model_info_normalization.py new file mode 100644 index 00000000..72db7ea6 --- /dev/null +++ b/pytests/config_test/test_model_info_normalization.py @@ -0,0 +1,11 @@ +from src.config.model_configs import ModelInfo + + +def test_model_identifier_strips_surrounding_whitespace() -> None: + model_info = ModelInfo( + api_provider="test-provider", + model_identifier=" glm-5.1 ", + name="test-model", + ) + + assert model_info.model_identifier == "glm-5.1" diff --git a/pytests/test_openai_client_toolless_request.py b/pytests/test_openai_client_toolless_request.py index eb6f6ae9..d99a691f 100644 --- a/pytests/test_openai_client_toolless_request.py +++ b/pytests/test_openai_client_toolless_request.py @@ -1,16 +1,59 @@ from types import SimpleNamespace +import pytest + from src.config.model_configs import APIProvider, ReasoningParseMode, ToolArgumentParseMode from src.llm_models.model_client.openai_client import ( _OpenAIStreamAccumulator, _build_reasoning_key, _default_normal_response_parser, + _parse_tool_arguments, _sanitize_messages_for_toolless_request, ) from src.llm_models.payload_content.message import Message, RoleType, TextMessagePart from src.llm_models.payload_content.tool_option import ToolCall +@pytest.mark.parametrize("parse_mode", list(ToolArgumentParseMode)) +def test_parse_tool_arguments_treats_blank_arguments_as_empty_dict(parse_mode: ToolArgumentParseMode) -> None: + assert _parse_tool_arguments("", parse_mode, None) == {} + assert _parse_tool_arguments(" ", parse_mode, None) == {} + + +def test_normal_response_parser_accepts_empty_string_arguments_for_parameterless_tool() -> None: + response = SimpleNamespace( + choices=[ + SimpleNamespace( + finish_reason="tool_calls", + message=SimpleNamespace( + content=None, + tool_calls=[ + SimpleNamespace( + id="finish-call", + type="function", + function=SimpleNamespace(name="finish", arguments=""), + ) + ], + ), + ) + ], + usage=None, + model="glm-5.1", + ) + + api_response, usage_record = _default_normal_response_parser( + response, + reasoning_parse_mode=ReasoningParseMode.AUTO, + tool_argument_parse_mode=ToolArgumentParseMode.AUTO, + reasoning_key=None, + ) + + assert len(api_response.tool_calls) == 1 + assert api_response.tool_calls[0].func_name == "finish" + assert api_response.tool_calls[0].args == {} + assert usage_record is None + + def test_sanitize_messages_for_toolless_request_drops_assistant_tool_call_without_parts() -> None: messages = [ Message( diff --git a/src/config/model_configs.py b/src/config/model_configs.py index faecff1d..9ef05dbf 100644 --- a/src/config/model_configs.py +++ b/src/config/model_configs.py @@ -351,6 +351,7 @@ class ModelInfo(ConfigBase): Gemini 客户端会按自身支持的字段筛选并映射到 GenerateContentConfig、EmbedContentConfig 或音频请求配置中。""" def model_post_init(self, context: Any = None): + self.model_identifier = self.model_identifier.strip() if not self.model_identifier: raise ValueError(t("config.model_identifier_empty_generic")) if not self.name: diff --git a/src/llm_models/model_client/openai_client.py b/src/llm_models/model_client/openai_client.py index 02611907..db2313ef 100644 --- a/src/llm_models/model_client/openai_client.py +++ b/src/llm_models/model_client/openai_client.py @@ -585,6 +585,9 @@ def _parse_tool_arguments( Raises: RespParseException: 当参数无法解析为字典时抛出。 """ + if not raw_arguments.strip(): + return {} + try: if parse_mode == ToolArgumentParseMode.STRICT: arguments: Any = json.loads(raw_arguments)