fix:修部分显示问题和模型问题

This commit is contained in:
SengokuCola
2026-04-23 17:40:33 +08:00
parent 03750cdb6b
commit 3b6d30cd5e
7 changed files with 99 additions and 17 deletions

View File

@@ -33,7 +33,7 @@ logger = get_logger("expressor")
express_learn_model = LLMServiceClient(
task_name="replyer", request_type="expression.learner"
)
summary_model = LLMServiceClient(task_name="replyer", request_type="expression.summary")
summary_model = LLMServiceClient(task_name="utils", request_type="expression.summary")
def register_expression_hook_specs(registry: HookSpecRegistry) -> List[HookSpec]:
"""注册表达方式系统内置 Hook 规格。

View File

@@ -23,7 +23,6 @@ from .expression_utils import is_single_char_jargon
logger = get_logger("jargon")
llm_extract = LLMServiceClient(task_name="replyer", request_type="jargon.extract")
llm_inference = LLMServiceClient(task_name="replyer", request_type="jargon.inference")

View File

@@ -90,7 +90,7 @@ def _append_reply_component(builder: MessageBuilder, component: ReplyComponent)
if not target_message_id:
return False
builder.add_text_content(f"[引用回复]({target_message_id})")
builder.add_text_content(f"[引用]quote_id={target_message_id}")
return True
@@ -167,7 +167,7 @@ def _render_component_for_prompt(component: StandardMessageComponents) -> str:
if target_content:
return f"[回复消息: {target_content}]"
target_message_id = component.target_message_id.strip()
return f"[引用回复]({target_message_id})" if target_message_id else "[回复消息]"
return f"[引用]quote_id={target_message_id}" if target_message_id else "[回复消息]"
if isinstance(component, ForwardNodeComponent):
return _build_forward_preview_block(component)

View File

@@ -93,6 +93,6 @@ def build_visible_text_from_sequence(message_sequence: MessageSequence) -> str:
if isinstance(component, ReplyComponent):
target_message_id = component.target_message_id.strip()
if target_message_id:
parts.append(f"[引用回复]({target_message_id})")
parts.append(f"[引用]quote_id={target_message_id}")
return "".join(parts)

View File

@@ -31,13 +31,18 @@ def build_planner_prefix(
str: 拼接完成的规划器前缀。
"""
prefix_parts = [
f"[时间]{timestamp.strftime('%H:%M:%S')}\n",
f"[用户名]{user_name}\n",
f"[用户群昵称]{group_card}\n",
]
prefix_parts = []
if include_message_id:
prefix_parts.append(f"[msg_id]{message_id or ''}\n")
prefix_parts.extend(
[
f"[时间]{timestamp.strftime('%H:%M:%S')}\n",
f"[用户名]{user_name}\n",
]
)
normalized_group_card = group_card.strip()
if normalized_group_card:
prefix_parts.append(f"[用户群昵称]{normalized_group_card}\n")
prefix_parts.append("[发言内容]")
return "".join(prefix_parts)

View File

@@ -190,6 +190,11 @@ class MaisakaReasoningEngine:
deferred_tool_specs.append(tool_spec)
self._runtime.update_deferred_tool_specs(deferred_tool_specs)
selected_history, _ = self._runtime._chat_loop_service.select_llm_context_messages(
self._runtime._chat_history,
request_kind="planner",
)
self._runtime.sync_discovered_deferred_tools_with_context(selected_history)
discovered_deferred_tool_specs = self._runtime.get_discovered_deferred_tool_specs()
visible_tool_specs = [*visible_builtin_tool_specs, *discovered_deferred_tool_specs]
self._runtime.set_current_action_tool_names([tool_spec.name for tool_spec in visible_tool_specs])

View File

@@ -6,6 +6,7 @@ from math import ceil
from typing import Any, Literal, Optional, Sequence
import asyncio
import json
import time
from rich.console import Group, RenderableType
@@ -34,7 +35,13 @@ from src.plugin_runtime.tool_provider import PluginToolProvider
from src.plugin_runtime.hook_payloads import deserialize_prompt_messages
from .chat_loop_service import ChatResponse, MaisakaChatLoopService
from .context_messages import LLMContextMessage, ReferenceMessage, ReferenceMessageType
from .context_messages import (
AssistantMessage,
LLMContextMessage,
ReferenceMessage,
ReferenceMessageType,
ToolResultMessage,
)
from .display.display_utils import build_tool_call_summary_lines, format_token_count
from .display.prompt_cli_renderer import PromptCLIVisualizer
from .display.stage_status_board import remove_stage_status, update_stage_status
@@ -637,6 +644,73 @@ class MaisakaHeartFlowChatting:
self.deferred_tool_specs_by_name = next_specs_by_name
self.discovered_tool_names.intersection_update(next_specs_by_name.keys())
def sync_discovered_deferred_tools_with_context(
self,
selected_history: Sequence[LLMContextMessage],
) -> None:
"""根据当前实际上下文中的 tool_search 调用链同步已发现 deferred tools。
已激活 deferred tool 必须能在本轮上下文中找到对应的 tool_search call 与 result。
当这条调用链被上下文窗口裁掉后,工具会重新折回 deferred tools 提示中。
"""
visible_tool_names = self._extract_visible_tool_search_discoveries(selected_history)
self.discovered_tool_names = visible_tool_names.intersection(self.deferred_tool_specs_by_name.keys())
def _extract_visible_tool_search_discoveries(
self,
selected_history: Sequence[LLMContextMessage],
) -> set[str]:
"""提取当前上下文中仍有完整 tool_search call/result 支撑的工具名。"""
tool_search_call_ids = {
tool_call.call_id
for message in selected_history
if isinstance(message, AssistantMessage)
for tool_call in message.tool_calls
if tool_call.func_name == "tool_search" and tool_call.call_id
}
if not tool_search_call_ids:
return set()
discovered_tool_names: set[str] = set()
for message in selected_history:
if not isinstance(message, ToolResultMessage):
continue
if message.tool_name != "tool_search" or message.tool_call_id not in tool_search_call_ids:
continue
if not message.success:
continue
discovered_tool_names.update(self._parse_tool_search_result_tool_names(message.content))
return discovered_tool_names
def _parse_tool_search_result_tool_names(self, content: str) -> set[str]:
"""从 tool_search 的历史结果文本中解析有效 deferred tool 名称。"""
discovered_tool_names: set[str] = set()
try:
structured_content = json.loads(content)
except (TypeError, ValueError):
structured_content = None
if isinstance(structured_content, dict):
raw_tool_names = structured_content.get("matched_tool_names")
if isinstance(raw_tool_names, list):
for raw_tool_name in raw_tool_names:
normalized_name = str(raw_tool_name).strip()
if normalized_name in self.deferred_tool_specs_by_name:
discovered_tool_names.add(normalized_name)
for raw_line in content.splitlines():
normalized_line = raw_line.strip()
if not normalized_line.startswith("- "):
continue
normalized_name = normalized_line[2:].strip()
if normalized_name in self.deferred_tool_specs_by_name:
discovered_tool_names.add(normalized_name)
return discovered_tool_names
def get_discovered_deferred_tool_specs(self) -> list[ToolSpec]:
"""返回当前会话中已发现、且仍然有效的 deferred tools。"""
@@ -1007,9 +1081,10 @@ class MaisakaHeartFlowChatting:
f"聊天流名称:{getattr(self, 'session_name', self.session_id)}",
f"聊天流ID{self.session_id}",
]
if cycle_id is not None:
body_lines.append(f"循环编号:{cycle_id}")
panel_title = "MaiSaka 循环"
if cycle_id is not None:
panel_title = f"{panel_title} [{cycle_id}]"
panel_subtitle = self._build_cycle_time_records_text(time_records or {})
renderables: list[RenderableType] = [Text("\n".join(body_lines))]
timing_panel = self._build_cycle_stage_panel(
@@ -1019,7 +1094,7 @@ class MaisakaHeartFlowChatting:
prompt_tokens=timing_prompt_tokens,
response_text=timing_response,
prompt_section=timing_prompt_section,
extra_lines=[f"门控动作:{timing_action}"] if timing_action.strip() else None,
extra_lines=None,
)
if timing_panel is not None:
renderables.append(timing_panel)
@@ -1059,7 +1134,7 @@ class MaisakaHeartFlowChatting:
console.print(
Panel(
Group(*renderables),
title="MaiSaka 循环",
title=panel_title,
subtitle=panel_subtitle,
border_style="bright_blue",
padding=(0, 1),
@@ -1090,8 +1165,6 @@ class MaisakaHeartFlowChatting:
return None
body_lines: list[str] = []
if selected_history_count is not None:
body_lines.append(f"上下文占用:{selected_history_count}/{self._max_context_size}")
if prompt_tokens is not None:
body_lines.append(f"本次请求token消耗{format_token_count(prompt_tokens)}")
if extra_lines: