remove:emoji插件,修复tool被截断的问题

This commit is contained in:
SengokuCola
2026-04-01 17:06:44 +08:00
parent cecc58a9e7
commit f2aedf7681
5 changed files with 339 additions and 173 deletions

View File

@@ -0,0 +1,202 @@
"""Maisaka 表情工具内置能力。"""
from dataclasses import dataclass, field
from typing import Sequence
import random
from src.common.data_models.image_data_model import MaiEmoji
from src.common.data_models.llm_service_data_models import LLMGenerationOptions
from src.common.logger import get_logger
from src.common.utils.utils_image import ImageUtils
from src.services import send_service
from .emoji_manager import emoji_manager, emoji_manager_emotion_judge_llm
logger = get_logger("emoji_maisaka_tool")
@dataclass(slots=True)
class MaisakaEmojiSendResult:
"""Maisaka 表情发送结果。"""
success: bool
message: str
emoji_base64: str = ""
description: str = ""
emotions: list[str] = field(default_factory=list)
requested_emotion: str = ""
matched_emotion: str = ""
def _normalize_emotions(emoji: MaiEmoji) -> list[str]:
"""提取并清洗单个表情的情绪标签。"""
return [str(item).strip() for item in emoji.emotion if str(item).strip()]
def _build_recent_context_text(context_texts: Sequence[str], max_items: int = 5) -> str:
"""构建供情绪判断使用的最近上下文文本。"""
normalized_items = [str(item).strip() for item in context_texts if str(item).strip()]
if not normalized_items:
return ""
return "\n".join(normalized_items[-max_items:])
async def _select_emoji_with_llm(
*,
sampled_emojis: Sequence[MaiEmoji],
reasoning: str,
context_text: str,
) -> tuple[MaiEmoji, str]:
"""让模型在采样表情中选择更合适的情绪标签。"""
emotion_map: dict[str, list[MaiEmoji]] = {}
for emoji in sampled_emojis:
for emotion in _normalize_emotions(emoji):
emotion_map.setdefault(emotion, []).append(emoji)
available_emotions = list(emotion_map.keys())
if not available_emotions:
return random.choice(list(sampled_emojis)), ""
prompt = (
"你正在为聊天场景选择一个最合适的表情包情绪标签。\n"
f"发送原因:{reasoning or '辅助表达当前语气和情绪'}\n"
f"最近聊天记录:\n{context_text or '(暂无额外上下文)'}\n\n"
"可选情绪标签如下:\n"
f"{chr(10).join(available_emotions)}\n\n"
"请只返回一个最匹配的情绪标签,不要解释。"
)
try:
llm_result = await emoji_manager_emotion_judge_llm.generate_response(
prompt,
options=LLMGenerationOptions(temperature=0.3, max_tokens=60),
)
chosen_emotion = (llm_result.response or "").strip().strip("\"'")
except Exception as exc:
logger.warning(f"使用 LLM 选择表情情绪失败,将回退为随机选择: {exc}")
chosen_emotion = ""
if chosen_emotion and chosen_emotion in emotion_map:
return random.choice(emotion_map[chosen_emotion]), chosen_emotion
return random.choice(list(sampled_emojis)), ""
async def select_emoji_for_maisaka(
*,
requested_emotion: str = "",
reasoning: str = "",
context_texts: Sequence[str] | None = None,
sample_size: int = 30,
) -> tuple[MaiEmoji | None, str]:
"""为 Maisaka 选择一个合适的表情。"""
available_emojis = list(emoji_manager.emojis)
if not available_emojis:
return None, ""
normalized_requested_emotion = requested_emotion.strip()
if normalized_requested_emotion:
matched_emojis = [
emoji
for emoji in available_emojis
if normalized_requested_emotion.lower() in (emotion.lower() for emotion in _normalize_emotions(emoji))
]
if matched_emojis:
return random.choice(matched_emojis), normalized_requested_emotion
sampled_emojis = random.sample(
available_emojis,
min(max(sample_size, 1), len(available_emojis)),
)
context_text = _build_recent_context_text(context_texts or [])
return await _select_emoji_with_llm(
sampled_emojis=sampled_emojis,
reasoning=reasoning,
context_text=context_text,
)
async def send_emoji_for_maisaka(
*,
stream_id: str,
requested_emotion: str = "",
reasoning: str = "",
context_texts: Sequence[str] | None = None,
) -> MaisakaEmojiSendResult:
"""为 Maisaka 选择并发送一个表情。"""
selected_emoji, matched_emotion = await select_emoji_for_maisaka(
requested_emotion=requested_emotion,
reasoning=reasoning,
context_texts=context_texts,
)
if selected_emoji is None:
return MaisakaEmojiSendResult(
success=False,
message="当前表情包库中没有可用表情。",
requested_emotion=requested_emotion.strip(),
)
try:
emoji_base64 = ImageUtils.image_path_to_base64(str(selected_emoji.full_path))
if not emoji_base64:
raise ValueError("表情图片转换为 base64 失败")
except Exception as exc:
return MaisakaEmojiSendResult(
success=False,
message=f"发送表情包失败:{exc}",
description=selected_emoji.description.strip(),
emotions=_normalize_emotions(selected_emoji),
requested_emotion=requested_emotion.strip(),
matched_emotion=matched_emotion,
)
try:
sent = await send_service.emoji_to_stream(
emoji_base64=emoji_base64,
stream_id=stream_id,
storage_message=True,
set_reply=False,
reply_message=None,
)
except Exception as exc:
return MaisakaEmojiSendResult(
success=False,
message=f"发送表情包时发生异常:{exc}",
description=selected_emoji.description.strip(),
emotions=_normalize_emotions(selected_emoji),
requested_emotion=requested_emotion.strip(),
matched_emotion=matched_emotion,
)
description = selected_emoji.description.strip()
emotions = _normalize_emotions(selected_emoji)
if not sent:
return MaisakaEmojiSendResult(
success=False,
message="发送表情包失败。",
description=description,
emotions=emotions,
requested_emotion=requested_emotion.strip(),
matched_emotion=matched_emotion,
)
emoji_manager.update_emoji_usage(selected_emoji)
success_message = (
f"已发送表情包:{description}(情绪:{', '.join(emotions)}"
if emotions
else f"已发送表情包:{description}"
)
return MaisakaEmojiSendResult(
success=True,
message=success_message,
emoji_base64=emoji_base64,
description=description,
emotions=emotions,
requested_emotion=requested_emotion.strip(),
matched_emotion=matched_emotion,
)

View File

@@ -28,7 +28,7 @@ from src.llm_models.payload_content.tool_option import ToolCall, ToolDefinitionI
from src.services.llm_service import LLMServiceClient
from .builtin_tools import get_builtin_tools
from .context_messages import AssistantMessage, LLMContextMessage, SessionBackedMessage
from .context_messages import AssistantMessage, LLMContextMessage, SessionBackedMessage, ToolResultMessage
from .message_adapter import format_speaker_content
from .prompt_cli_renderer import PromptCLIVisualizer
@@ -609,6 +609,7 @@ class MaisakaChatLoopService:
selected_indices.reverse()
selected_history = [chat_history[index] for index in selected_indices]
selected_history = MaisakaChatLoopService._drop_leading_orphan_tool_results(selected_history)
return (
selected_history,
(
@@ -617,6 +618,36 @@ class MaisakaChatLoopService:
),
)
@staticmethod
def _drop_leading_orphan_tool_results(
selected_history: List[LLMContextMessage],
) -> List[LLMContextMessage]:
"""移除窗口前缀中缺少对应 tool_call 的工具结果消息。"""
if not selected_history:
return selected_history
available_tool_call_ids = {
tool_call.call_id
for message in selected_history
if isinstance(message, AssistantMessage)
for tool_call in message.tool_calls
if tool_call.call_id
}
first_valid_index = 0
while first_valid_index < len(selected_history):
message = selected_history[first_valid_index]
if not isinstance(message, ToolResultMessage):
break
if message.tool_call_id in available_tool_call_ids:
break
first_valid_index += 1
if first_valid_index == 0:
return selected_history
return selected_history[first_valid_index:]
@staticmethod
def build_chat_context(user_text: str) -> List[LLMContextMessage]:
"""根据用户输入构造最小对话上下文。

View File

@@ -328,15 +328,48 @@ class MaisakaReasoningEngine:
trimmed_history = list(self._runtime._chat_history)
removed_count = 0
while conversation_message_count >= self._runtime._max_context_size and trimmed_history:
while conversation_message_count > self._runtime._max_context_size and trimmed_history:
removed_message = trimmed_history.pop(0)
removed_count += 1
if removed_message.count_in_context:
conversation_message_count -= 1
trimmed_history, pruned_orphan_count = self._drop_leading_orphan_tool_results(trimmed_history)
removed_count += pruned_orphan_count
self._runtime._chat_history = trimmed_history
self._runtime._log_history_trimmed(removed_count, conversation_message_count)
@staticmethod
def _drop_leading_orphan_tool_results(
chat_history: list[LLMContextMessage],
) -> tuple[list[LLMContextMessage], int]:
"""清理历史前缀中缺少对应 assistant tool_call 的工具结果消息。"""
if not chat_history:
return chat_history, 0
available_tool_call_ids = {
tool_call.call_id
for message in chat_history
if isinstance(message, AssistantMessage)
for tool_call in message.tool_calls
if tool_call.call_id
}
first_valid_index = 0
while first_valid_index < len(chat_history):
message = chat_history[first_valid_index]
if not isinstance(message, ToolResultMessage):
break
if message.tool_call_id in available_tool_call_ids:
break
first_valid_index += 1
if first_valid_index == 0:
return chat_history, 0
return chat_history[first_valid_index:], first_valid_index
@staticmethod
def _calculate_similarity(text1: str, text2: str) -> float:
"""计算两个文本之间的相似度。
@@ -819,7 +852,77 @@ class MaisakaReasoningEngine:
"""执行 send_emoji 内置工具。"""
del context
return await self._handle_send_emoji(self._build_tool_call_from_invocation(invocation))
return await self._invoke_builtin_send_emoji(self._build_tool_call_from_invocation(invocation))
async def _invoke_builtin_send_emoji(self, tool_call: ToolCall) -> ToolExecutionResult:
"""执行内置表情工具。"""
from src.chat.emoji_system.maisaka_tool import send_emoji_for_maisaka
tool_args = tool_call.args or {}
emotion = str(tool_args.get("emotion") or "").strip()
context_texts = [
message.get_history_text()
for message in self._runtime._chat_history[-5:]
if message.get_history_text().strip()
]
structured_result: dict[str, Any] = {
"success": False,
"message": "",
"description": "",
"emotion": [],
"requested_emotion": emotion,
"matched_emotion": "",
}
logger.info(f"{self._runtime.log_prefix} 触发表情包发送工具,请求情绪={emotion!r}")
try:
send_result = await send_emoji_for_maisaka(
stream_id=self._runtime.session_id,
requested_emotion=emotion,
reasoning=self._last_reasoning_content,
context_texts=context_texts,
)
except Exception as exc:
logger.exception(f"{self._runtime.log_prefix} 发送表情包时发生异常: {exc}")
structured_result["message"] = f"发送表情包时发生异常:{exc}"
return self._build_tool_failure_result(
tool_call.func_name,
structured_result["message"],
structured_content=structured_result,
)
structured_result["description"] = send_result.description
structured_result["emotion"] = list(send_result.emotions)
structured_result["matched_emotion"] = send_result.matched_emotion
structured_result["message"] = send_result.message
if send_result.success:
logger.info(
f"{self._runtime.log_prefix} 表情包发送成功: "
f"描述={send_result.description!r} 情绪标签={send_result.emotions} "
f"请求情绪={emotion!r} 命中情绪={send_result.matched_emotion!r}"
)
self._append_sent_emoji_to_chat_history(
emoji_base64=send_result.emoji_base64,
success_message=send_result.message,
)
structured_result["success"] = True
return self._build_tool_success_result(
tool_call.func_name,
send_result.message,
structured_content=structured_result,
)
logger.warning(
f"{self._runtime.log_prefix} 表情包发送失败: "
f"请求情绪={emotion!r} 错误信息={send_result.message}"
)
return self._build_tool_failure_result(
tool_call.func_name,
structured_result["message"],
structured_content=structured_result,
)
async def _handle_tool_calls(
self,

View File

@@ -1,41 +0,0 @@
{
"manifest_version": 2,
"version": "2.0.0",
"name": "Emoji插件 (Emoji Actions)",
"description": "可以发送和管理 Emoji",
"author": {
"name": "SengokuCola",
"url": "https://github.com/MaiM-with-u"
},
"license": "GPL-v3.0-or-later",
"urls": {
"repository": "https://github.com/MaiM-with-u/maibot",
"homepage": "https://github.com/MaiM-with-u/maibot",
"documentation": "https://github.com/MaiM-with-u/maibot",
"issues": "https://github.com/MaiM-with-u/maibot/issues"
},
"host_application": {
"min_version": "1.0.0",
"max_version": "1.0.0"
},
"sdk": {
"min_version": "2.0.0",
"max_version": "2.99.99"
},
"dependencies": [],
"capabilities": [
"emoji.get_random",
"message.get_recent",
"message.build_readable",
"llm.generate",
"send.emoji",
"config.get"
],
"i18n": {
"default_locale": "zh-CN",
"supported_locales": [
"zh-CN"
]
},
"id": "builtin.emoji-plugin"
}

View File

@@ -1,129 +0,0 @@
"""Emoji 插件 — 新 SDK 版本
根据聊天上下文的情感,使用 LLM 选择并发送合适的表情包。
"""
from maibot_sdk import Action, MaiBotPlugin
from maibot_sdk.types import ActivationType
import random
class EmojiPlugin(MaiBotPlugin):
"""表情包插件"""
@Action(
"emoji",
description="发送表情包辅助表达情绪",
activation_type=ActivationType.RANDOM,
activation_probability=0.3,
parallel_action=True,
action_require=[
"发送表情包辅助表达情绪",
"表达情绪时可以选择使用",
"不要连续发送,如果你已经发过[表情包],就不要选择此动作",
],
associated_types=["emoji"],
)
async def handle_emoji(self, stream_id: str = "", reasoning: str = "", chat_id: str = "", **kwargs):
"""执行表情动作"""
reason = reasoning or "表达当前情绪"
# 1. 随机获取30个表情包
sampled_emojis = await self.ctx.emoji.get_random(30)
if not sampled_emojis:
return False, "无法获取随机表情包"
# 2. 按情感分组
emotion_map: dict[str, list] = {}
for emoji in sampled_emojis:
emo = emoji.get("emotion", "")
if emo not in emotion_map:
emotion_map[emo] = []
emotion_map[emo].append(emoji)
available_emotions = list(emotion_map.keys())
if not available_emotions:
# 无情感标签,随机发送
chosen = random.choice(sampled_emojis)
await self.ctx.send.emoji(chosen["base64"], stream_id)
return True, "随机发送了表情包"
# 3. 获取最近消息作为上下文
messages_text = ""
if chat_id:
recent_messages = await self.ctx.message.get_recent(chat_id=chat_id, limit=5)
if recent_messages:
messages_text = await self.ctx.message.build_readable(
recent_messages,
timestamp_mode="normal_no_YMD",
truncate=False,
)
# 4. 构建 prompt 让 LLM 选择情感
available_emotions_str = "\n".join(available_emotions)
prompt = f"""你正在进行QQ聊天你需要根据聊天记录选出一个合适的情感标签。
请你根据以下原因和聊天记录进行选择
原因:{reason}
聊天记录:
{messages_text}
这里是可用的情感标签:
{available_emotions_str}
请直接返回最匹配的那个情感标签,不要进行任何解释或添加其他多余的文字。
"""
# 5. 调用 LLM
llm_result = await self.ctx.llm.generate(prompt=prompt, model_name="utils")
if not llm_result or not llm_result.get("success"):
chosen = random.choice(sampled_emojis)
await self.ctx.send.emoji(chosen["base64"], stream_id)
return True, "LLM调用失败随机发送了表情包"
chosen_emotion = llm_result.get("response", "").strip().replace('"', "").replace("'", "")
# 6. 根据选择的情感匹配表情包
if chosen_emotion in emotion_map:
chosen = random.choice(emotion_map[chosen_emotion])
else:
chosen = random.choice(sampled_emojis)
# 7. 发送
send_ok = await self.ctx.send.emoji(chosen["base64"], stream_id)
if send_ok:
return True, f"成功发送表情包:[表情包:{chosen_emotion}]"
return False, "发送表情包失败"
async def on_load(self) -> None:
"""处理插件加载。"""
# 从插件配置读取 emoji_chance 来覆盖默认概率
await self.ctx.config.get("emoji.emoji_chance")
async def on_unload(self) -> None:
"""处理插件卸载。"""
async def on_config_update(self, scope: str, config_data: dict[str, object], version: str) -> None:
"""处理配置热重载事件。
Args:
scope: 配置变更范围。
config_data: 最新配置数据。
version: 配置版本号。
"""
del config_data
del version
if scope == "self":
await self.ctx.config.get("emoji.emoji_chance")
def create_plugin() -> EmojiPlugin:
"""创建 Emoji 插件实例。
Returns:
EmojiPlugin: 新的 Emoji 插件实例。
"""
return EmojiPlugin()