feat:修复部分模型请求问题(v4l)

This commit is contained in:
SengokuCola
2026-04-22 23:36:39 +08:00
parent 1716272b25
commit 066c8baf84
7 changed files with 195 additions and 3 deletions

View File

@@ -11,6 +11,7 @@ from typing import Any, Awaitable, Callable, Dict, List, Optional, Sequence
import asyncio
import json
import logging
import os
import sys
@@ -610,6 +611,33 @@ class TestSDK:
assert plugin.configs == [{"plugin_enabled": True}]
assert plugin.updates == [("model", {"models": []}, "", [{"plugin_enabled": True}])]
@pytest.mark.asyncio
async def test_host_logs_runner_ready_plugin_failures(self, caplog):
"""Host 收到 runner.ready 时应明确记录插件注册失败。"""
from src.plugin_runtime.host.supervisor import PluginRunnerSupervisor
from src.plugin_runtime.protocol.envelope import Envelope, MessageType
supervisor = PluginRunnerSupervisor(plugin_dirs=[], runner_spawn_timeout_sec=1)
envelope = Envelope(
request_id=1,
message_type=MessageType.REQUEST,
method="runner.ready",
plugin_id="",
payload={
"loaded_plugins": ["ok_plugin"],
"failed_plugins": ["bad_plugin"],
"inactive_plugins": ["disabled_plugin"],
},
)
with caplog.at_level(logging.INFO, logger="plugin_runtime.host.runner_manager"):
response = await supervisor._handle_runner_ready(envelope)
assert response.payload["accepted"] is True
assert "插件注册失败: bad_plugin" in caplog.text
assert "插件未激活: disabled_plugin" in caplog.text
assert "Runner 插件初始化完成: loaded=1 failed=1 inactive=1" in caplog.text
@pytest.mark.asyncio
async def test_runner_bootstraps_capabilities_before_on_load(self, monkeypatch):
"""on_load 期间的 capability 调用应在 bootstrap 后生效。"""

View File

@@ -1,4 +1,5 @@
from types import SimpleNamespace
from typing import Any
import importlib.util
import sys
@@ -220,3 +221,77 @@ def test_mute_plugin_exports_allowed_groups_as_component_allowed_session() -> No
assert mute_components[0]["chat_scope"] == "group"
assert mute_components[0]["allowed_session"] == ["qq:10001", "raw-group-id"]
assert "allowed_session" not in mute_components[0]["metadata"]
@pytest.mark.asyncio
async def test_mute_tool_queries_target_message_with_current_chat_id() -> None:
module_path = "plugins/MutePlugin/plugin.py"
spec = importlib.util.spec_from_file_location("mute_plugin_under_test_msg_id", module_path)
assert spec is not None
assert spec.loader is not None
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
module.MutePluginConfig.model_rebuild()
capability_calls: list[dict[str, Any]] = []
api_calls: list[dict[str, Any]] = []
async def fake_call_capability(name: str, **kwargs: Any) -> dict[str, Any]:
capability_calls.append({"name": name, **kwargs})
return {
"success": True,
"result": {
"success": True,
"message": {
"message_info": {
"user_info": {
"user_id": "35529667",
"user_cardname": "目标用户",
"user_nickname": "目标昵称",
}
}
},
},
}
async def fake_api_call(api_name: str, **kwargs: Any) -> dict[str, Any]:
api_calls.append({"name": api_name, **kwargs})
if api_name == "adapter.napcat.group.get_group_member_info":
return {"success": True, "result": {"data": {"role": "member"}}}
return {"status": "ok", "retcode": 0}
plugin = module.MutePlugin()
plugin.set_plugin_config({"components": {"enable_smart_mute": True}})
plugin._set_context(
SimpleNamespace(
call_capability=fake_call_capability,
api=SimpleNamespace(call=fake_api_call),
logger=SimpleNamespace(info=lambda *args, **kwargs: None, warning=lambda *args, **kwargs: None),
)
)
success, message = await plugin.handle_mute_tool(
stream_id="current-session-id",
group_id="766798517",
msg_id="2046083292",
duration=3600,
reason="测试",
)
assert success is True
assert message == "成功禁言 目标用户"
assert capability_calls == [
{
"name": "message.get_by_id",
"message_id": "2046083292",
"chat_id": "current-session-id",
}
]
assert api_calls[-1] == {
"name": "adapter.napcat.group.set_group_ban",
"version": "1",
"group_id": "766798517",
"user_id": "35529667",
"duration": 3600,
}

View File

@@ -35,7 +35,7 @@ from .context_messages import (
ToolResultMessage,
build_llm_message_from_context,
)
from .history_utils import drop_orphan_tool_results
from .history_utils import drop_orphan_tool_results, normalize_tool_result_order
from .display.prompt_cli_renderer import PromptCLIVisualizer
from .visual_mode_utils import resolve_enable_visual_planner
@@ -652,6 +652,7 @@ class MaisakaChatLoopService:
selected_history = [filtered_history[index] for index in selected_indices]
selected_history, _ = MaisakaChatLoopService._hide_early_assistant_messages(selected_history)
selected_history, _ = drop_orphan_tool_results(selected_history)
selected_history, _ = normalize_tool_result_order(selected_history)
tool_message_count = sum(1 for message in selected_history if isinstance(message, ToolResultMessage))
normal_message_count = len(selected_history) - tool_message_count
selection_reason = (

View File

@@ -3,7 +3,7 @@
from dataclasses import dataclass
from .context_messages import AssistantMessage, LLMContextMessage, ToolResultMessage
from .history_utils import drop_leading_orphan_tool_results, drop_orphan_tool_results
from .history_utils import drop_leading_orphan_tool_results, drop_orphan_tool_results, normalize_tool_result_order
TIMING_HISTORY_TOOL_NAMES = {"continue", "finish", "no_reply", "wait"}
EARLY_TRIM_RATIO = 0.2
@@ -15,6 +15,7 @@ class HistoryPostProcessResult:
history: list[LLMContextMessage]
removed_count: int
changed_count: int
remaining_context_count: int
@@ -30,6 +31,7 @@ def process_chat_history_after_cycle(
removed_assistant_thought_count = _remove_early_assistant_thoughts(processed_history)
processed_history, orphan_removed_count = drop_orphan_tool_results(processed_history)
processed_history, moved_tool_result_count = normalize_tool_result_order(processed_history)
remaining_context_count = sum(1 for message in processed_history if message.count_in_context)
removed_overflow_count = 0
@@ -48,9 +50,11 @@ def process_chat_history_after_cycle(
+ orphan_removed_count
+ removed_overflow_count
)
changed_count = removed_count + moved_tool_result_count
return HistoryPostProcessResult(
history=processed_history,
removed_count=removed_count,
changed_count=changed_count,
remaining_context_count=remaining_context_count,
)

View File

@@ -105,3 +105,70 @@ def drop_orphan_tool_results(
filtered_history.append(message)
return filtered_history, removed_count
def normalize_tool_result_order(
chat_history: list[LLMContextMessage],
) -> tuple[list[LLMContextMessage], int]:
"""把被其他消息隔开的 tool 结果移动到对应 assistant tool_calls 后面。"""
if not chat_history:
return chat_history, 0
consumed_indexes: set[int] = set()
normalized_history: list[LLMContextMessage] = []
moved_count = 0
for index, message in enumerate(chat_history):
if index in consumed_indexes:
continue
normalized_history.append(message)
if not isinstance(message, AssistantMessage) or not message.tool_calls:
continue
appended_tool_result_count = 0
for tool_call in message.tool_calls:
tool_call_id = str(tool_call.call_id or "").strip()
if not tool_call_id:
continue
matching_index = _find_tool_result_index(
chat_history,
tool_call_id=tool_call_id,
start_index=index + 1,
consumed_indexes=consumed_indexes,
)
if matching_index is None:
continue
consumed_indexes.add(matching_index)
normalized_history.append(chat_history[matching_index])
expected_index = index + appended_tool_result_count + 1
if matching_index != expected_index:
moved_count += 1
appended_tool_result_count += 1
if moved_count <= 0:
return chat_history, 0
return normalized_history, moved_count
def _find_tool_result_index(
chat_history: list[LLMContextMessage],
*,
tool_call_id: str,
start_index: int,
consumed_indexes: set[int],
) -> int | None:
"""查找指定 tool_call_id 对应的 tool 结果消息位置。"""
for index in range(start_index, len(chat_history)):
if index in consumed_indexes:
continue
message = chat_history[index]
if not isinstance(message, ToolResultMessage):
continue
if message.tool_call_id == tool_call_id:
return index
return None

View File

@@ -341,6 +341,13 @@ class MaisakaReasoningEngine:
queued_trigger = await self._runtime._internal_turn_queue.get()
message_triggered, timeout_triggered = self._drain_ready_turn_triggers(queued_trigger)
if self._runtime._agent_state == self._runtime._STATE_WAIT and not timeout_triggered:
self._runtime._message_turn_scheduled = False
logger.debug(
f"{self._runtime.log_prefix} 当前仍处于 wait 状态,忽略消息触发并继续等待超时"
)
continue
if message_triggered:
await self._runtime._wait_for_message_quiet_period()
self._runtime._message_turn_scheduled = False
@@ -809,10 +816,12 @@ class MaisakaReasoningEngine:
self._runtime._chat_history,
max_context_size=self._runtime._max_context_size,
)
if process_result.removed_count <= 0:
if process_result.changed_count <= 0:
return
self._runtime._chat_history = process_result.history
if process_result.removed_count <= 0:
return
self._runtime._log_history_trimmed(
process_result.removed_count,
process_result.remaining_context_count,

View File

@@ -1306,6 +1306,14 @@ class PluginRunnerSupervisor:
return envelope.make_error_response(ErrorCode.E_BAD_PAYLOAD.value, str(exc))
self._runner_ready_payloads = payload
if payload.failed_plugins:
logger.error(f"插件注册失败: {', '.join(payload.failed_plugins)}")
if payload.inactive_plugins:
logger.warning(f"插件未激活: {', '.join(payload.inactive_plugins)}")
logger.info(
"Runner 插件初始化完成: "
f"loaded={len(payload.loaded_plugins)} failed={len(payload.failed_plugins)} inactive={len(payload.inactive_plugins)}"
)
self._runner_ready_events.set()
return envelope.make_response(payload={"accepted": True})