remove:移除tool_use模型,修复Jargon提取问题,修改统计为tool统计

This commit is contained in:
SengokuCola
2026-03-29 16:26:34 +08:00
parent 868438e3c1
commit 82bbf0fd52
25 changed files with 906 additions and 311 deletions

View File

@@ -577,29 +577,6 @@ class DefaultReplyer:
duration = end_time - start_time
return name, result, duration
async def _build_disabled_jargon_explanation(self) -> str:
"""当关闭黑话解释时使用的占位协程避免额外的LLM调用"""
return ""
async def _build_unknown_words_jargon(self, unknown_words: Optional[List[str]], chat_id: str) -> str:
"""针对 Planner 提供的未知词语列表执行黑话检索"""
if not unknown_words:
return ""
# 清洗未知词语列表,只保留非空字符串
concepts: List[str] = []
for item in unknown_words:
if isinstance(item, str):
s = item.strip()
if s:
concepts.append(s)
if not concepts:
return ""
try:
return await retrieve_concepts_with_jargon(concepts, chat_id)
except Exception as e:
logger.error(f"未知词语黑话检索失败: {e}")
return ""
async def _build_jargon_explanation(
self,
chat_id: str,
@@ -609,19 +586,14 @@ class DefaultReplyer:
) -> str:
"""
统一的黑话解释构建函数:
- 根据 enable_jargon_explanation / jargon_mode 决定具体策略
- 根据 enable_jargon_explanation 决定是否启用
"""
del unknown_words
enable_jargon_explanation = getattr(global_config.expression, "enable_jargon_explanation", True)
if not enable_jargon_explanation:
return ""
jargon_mode = getattr(global_config.expression, "jargon_mode", "context")
# planner 模式:仅使用 Planner 的 unknown_words
if jargon_mode == "planner":
return await self._build_unknown_words_jargon(unknown_words, chat_id)
# 默认 / context 模式:使用上下文自动匹配黑话
# 使用上下文自动匹配黑话
try:
return await explain_jargon_in_context(chat_id, messages_short, chat_talking_prompt_short) or ""
except Exception as e:
@@ -1209,7 +1181,7 @@ class DefaultReplyer:
prompt = await prompt_manager.render_prompt(template_prompt)
generation_result = await llm_api.generate(
llm_api.LLMServiceRequest(
task_name="tool_use",
task_name="utils",
request_type="replyer.lpmm_knowledge",
prompt=prompt,
tool_options=[search_knowledge_tool.get_tool_definition()],

View File

@@ -20,8 +20,8 @@ from src.services.llm_service import LLMServiceClient
from src.maisaka.message_adapter import (
get_message_kind,
get_message_role,
get_message_source,
get_message_text,
is_perception_message,
parse_speaker_content,
)
@@ -121,6 +121,9 @@ class MaisakaReplyGenerator:
role = get_message_role(message)
timestamp = self._format_message_time(message)
if get_message_source(message) == "user_reference":
continue
if role == "user":
guided_reply = self._extract_guided_bot_reply(message)
if guided_reply:
@@ -148,7 +151,6 @@ class MaisakaReplyGenerator:
chat_history: List[SessionMessage],
reply_reason: str,
expression_habits: str = "",
jargon_explanation: str = "",
) -> str:
"""构建 Maisaka replyer 提示词。"""
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
@@ -167,8 +169,6 @@ class MaisakaReplyGenerator:
extra_sections: List[str] = []
if expression_habits.strip():
extra_sections.append(expression_habits.strip())
if jargon_explanation.strip():
extra_sections.append(jargon_explanation.strip())
user_sections = [
f"当前时间:{current_time}",
@@ -198,7 +198,6 @@ class MaisakaReplyGenerator:
log_reply: bool = True,
chat_history: Optional[List[SessionMessage]] = None,
expression_habits: str = "",
jargon_explanation: str = "",
selected_expression_ids: Optional[List[int]] = None,
) -> Tuple[bool, ReplyGenerationResult]:
"""结合上下文生成 Maisaka 的最终可见回复。"""
@@ -223,20 +222,20 @@ class MaisakaReplyGenerator:
f"Maisaka replyer start: stream_id={stream_id} reply_reason={reply_reason!r} "
f"history_size={len(chat_history)} target_message_id="
f"{reply_message.message_id if reply_message else None} "
f"expression_count={len(result.selected_expression_ids)} "
f"jargon_enabled={bool(jargon_explanation.strip())}"
f"expression_count={len(result.selected_expression_ids)}"
)
filtered_history = [
message
for message in chat_history
if get_message_role(message) != "system" and get_message_kind(message) != "perception"
if get_message_role(message) != "system"
and get_message_kind(message) != "perception"
and get_message_source(message) != "user_reference"
]
prompt = self._build_prompt(
chat_history=filtered_history,
reply_reason=reply_reason or "",
expression_habits=expression_habits,
jargon_explanation=jargon_explanation,
)
result.completion.request_prompt = prompt