feat:给maisaka增加了msg_id

This commit is contained in:
SengokuCola
2026-03-28 16:13:19 +08:00
parent 233114b22d
commit 61819b572d
5 changed files with 45 additions and 194 deletions

View File

@@ -27,11 +27,11 @@ def create_builtin_tools() -> List[ToolOption]:
reply_builder = ToolOptionBuilder()
reply_builder.set_name("reply")
reply_builder.set_description("Generate and emit a visible reply based on the current thought. You must specify the target user message_id to reply to.")
reply_builder.set_description("Generate and emit a visible reply based on the current thought. You must specify the target user msg_id to reply to.")
reply_builder.add_param(
name="message_id",
name="msg_id",
param_type=ToolParamType.STRING,
description="The message_id of the specific user message that this reply should target.",
description="The msg_id of the specific user message that this reply should target.",
required=True,
enum_values=None,
)

View File

@@ -122,7 +122,7 @@ class BufferCLI:
Main inner loop for the Maisaka planner.
Each round may produce internal thoughts and optionally call tools:
- reply(): generate a visible reply for the current round
- reply(msg_id): generate a visible reply for the current round
- no_reply(): skip visible output and continue the loop
- wait(seconds): wait for new user input
- stop(): stop the current inner loop and return to idle

View File

@@ -27,7 +27,7 @@ LLM_ROLE_KEY = "maisaka_llm_role"
TOOL_CALL_ID_KEY = "maisaka_tool_call_id"
TOOL_CALLS_KEY = "maisaka_tool_calls"
SPEAKER_PREFIX_PATTERN = re.compile(
r"^(?:(?P<timestamp>\d{2}:\d{2}:\d{2}))?(?:<mid:(?P<message_id>[^>]+)>)?\[(?P<speaker>[^\]]+)\](?P<content>.*)$",
r"^(?:(?P<timestamp>\d{2}:\d{2}:\d{2}))?(?:\[msg_id:(?P<message_id>[^\]]+)\])?\[(?P<speaker>[^\]]+)\](?P<content>.*)$",
re.DOTALL,
)
@@ -64,6 +64,23 @@ def _deserialize_tool_call(data: dict) -> ToolCall:
)
def _ensure_message_id_in_speaker_content(content: str, message_id: str) -> str:
"""Ensure speaker-formatted visible text carries a msg_id marker."""
match = SPEAKER_PREFIX_PATTERN.match(content or "")
if not match:
return content
existing_message_id = match.group("message_id")
if existing_message_id:
return content
timestamp_text = match.group("timestamp")
speaker_name = match.group("speaker")
visible_content = match.group("content")
timestamp = datetime.strptime(timestamp_text, "%H:%M:%S") if timestamp_text else None
return format_speaker_content(speaker_name, visible_content, timestamp, message_id)
def build_message(
role: str,
content: str = "",
@@ -89,6 +106,7 @@ def build_message(
timestamp=resolved_timestamp,
platform=platform,
)
normalized_content = _ensure_message_id_in_speaker_content(content, message.message_id) if content else content
message.message_info = MessageInfo(
user_info=user_info or _build_user_info_for_role(resolved_role),
group_info=group_info,
@@ -102,9 +120,9 @@ def build_message(
)
message.session_id = session_id
message.raw_message = raw_message if raw_message is not None else MessageSequence([])
if raw_message is None and content:
message.raw_message.text(content)
visible_text = display_text if display_text is not None else content
if raw_message is None and normalized_content:
message.raw_message.text(normalized_content)
visible_text = display_text if display_text is not None else normalized_content
message.processed_plain_text = visible_text
message.display_message = visible_text
message.initialized = True
@@ -119,7 +137,7 @@ def format_speaker_content(
) -> str:
"""Format visible conversation content with an explicit speaker label."""
time_prefix = timestamp.strftime("%H:%M:%S") if timestamp is not None else ""
message_id_prefix = f"<mid:{message_id}>" if message_id else ""
message_id_prefix = f"[msg_id:{message_id}]" if message_id else ""
return f"{time_prefix}{message_id_prefix}[{speaker_name}]{content}"
@@ -141,7 +159,20 @@ def build_visible_text_from_sequence(message_sequence: MessageSequence) -> str:
parts: list[str] = []
for component in message_sequence.components:
if isinstance(component, TextComponent):
parts.append(SPEAKER_PREFIX_PATTERN.sub(r"\g<timestamp>[\g<speaker>]\g<content>", component.text))
match = SPEAKER_PREFIX_PATTERN.match(component.text or "")
if not match:
parts.append(component.text)
continue
normalized_parts: list[str] = []
if match.group("timestamp"):
normalized_parts.append(match.group("timestamp"))
message_id = match.group("message_id")
if message_id:
normalized_parts.append(f"[msg_id:{message_id}]")
normalized_parts.append(f"[{match.group('speaker')}]")
normalized_parts.append(match.group("content"))
parts.append("".join(normalized_parts))
continue
if isinstance(component, EmojiComponent):

View File

@@ -237,17 +237,18 @@ class MaisakaReasoningEngine:
latest_thought: str,
anchor_message: SessionMessage,
) -> bool:
target_message_id = str((tool_call.args or {}).get("message_id", "")).strip()
tool_args = tool_call.args or {}
target_message_id = str(tool_args.get("msg_id") or "").strip()
if not target_message_id:
self._runtime._chat_history.append(
self._build_tool_message(tool_call, "reply requires a valid message_id argument.")
self._build_tool_message(tool_call, "reply requires a valid msg_id argument.")
)
return False
target_message = self._runtime._source_messages_by_id.get(target_message_id)
if target_message is None:
self._runtime._chat_history.append(
self._build_tool_message(tool_call, f"reply target message_id not found: {target_message_id}")
self._build_tool_message(tool_call, f"reply target msg_id not found: {target_message_id}")
)
return False

View File

@@ -29,11 +29,8 @@ from .message_adapter import (
)
from .reasoning_engine import MaisakaReasoningEngine
from .tool_handlers import (
handle_list_files,
handle_mcp_tool,
handle_read_file,
handle_unknown_tool,
handle_write_file,
)
logger = get_logger("maisaka_runtime")
@@ -165,62 +162,6 @@ class MaisakaHeartFlowChatting:
except asyncio.CancelledError:
logger.info(f"{self.log_prefix} MaiSaka runtime loop cancelled")
async def _internal_loop(self) -> None:
"""处理一批缓存消息,并执行对应的内部思考轮次。"""
try:
while self._running:
cached_messages = await self._internal_turn_queue.get()
if not cached_messages:
self._internal_turn_queue.task_done()
continue
self._agent_state = self._STATE_RUNNING
await self._ingest_messages(cached_messages)
anchor_message = cached_messages[-1]
try:
for round_index in range(self._max_internal_rounds):
cycle_detail = self._start_cycle()
logger.info(
f"{self.log_prefix} MaiSaka cycle={cycle_detail.cycle_id} "
f"round={round_index + 1}/{self._max_internal_rounds} "
f"context_size={len(self._chat_history)}"
)
try:
planner_started_at = time.time()
response = await self._llm_service.chat_loop_step(self._chat_history)
cycle_detail.time_records["planner"] = time.time() - planner_started_at
response.raw_message.platform = anchor_message.platform
response.raw_message.session_id = self.session_id
response.raw_message.message_info.group_info = self._build_group_info(anchor_message)
self._chat_history.append(response.raw_message)
if response.tool_calls:
tool_started_at = time.time()
should_pause = await self._handle_tool_calls(
response.tool_calls,
response.content or "",
anchor_message,
)
cycle_detail.time_records["tool_calls"] = time.time() - tool_started_at
if should_pause:
break
continue
if response.content:
continue
break
finally:
self._end_cycle(cycle_detail)
finally:
if self._agent_state == self._STATE_RUNNING:
self._agent_state = self._STATE_STOP
self._internal_turn_queue.task_done()
except asyncio.CancelledError:
logger.info(f"{self.log_prefix} Maisaka internal loop cancelled")
async def _wait_for_trigger(self) -> bool:
"""等待外部触发。返回 True 表示有新消息事件,返回 False 表示等待超时。"""
if self._agent_state != self._STATE_WAIT:
@@ -383,128 +324,6 @@ class MaisakaHeartFlowChatting:
f"remaining_user_messages={user_message_count}"
)
async def _handle_tool_calls(
self,
tool_calls: list[ToolCall],
latest_thought: str,
anchor_message: SessionMessage,
) -> bool:
for tool_call in tool_calls:
if tool_call.func_name == "reply":
reply_sent = await self._handle_reply(tool_call, latest_thought, anchor_message)
if reply_sent:
return True
continue
if tool_call.func_name == "no_reply":
self._chat_history.append(
self._build_tool_message(
tool_call,
"No visible reply was sent for this round.",
)
)
continue
if tool_call.func_name == "wait":
seconds = (tool_call.args or {}).get("seconds", 30)
try:
wait_seconds = int(seconds)
except (TypeError, ValueError):
wait_seconds = 30
wait_seconds = max(0, wait_seconds)
self._chat_history.append(
self._build_tool_message(
tool_call,
f"Waiting for future input for up to {wait_seconds} seconds.",
)
)
self._enter_wait_state(seconds=wait_seconds)
return True
if tool_call.func_name == "stop":
self._chat_history.append(
self._build_tool_message(
tool_call,
"Conversation loop paused until a new message arrives.",
)
)
self._enter_stop_state()
return True
if False and tool_call.func_name == "write_file" and global_config.maisaka.enable_write_file:
await handle_write_file(tool_call, self._chat_history)
continue
if False and tool_call.func_name == "read_file" and global_config.maisaka.enable_read_file:
await handle_read_file(tool_call, self._chat_history)
continue
if False and tool_call.func_name == "list_files" and global_config.maisaka.enable_list_files:
await handle_list_files(tool_call, self._chat_history)
continue
if self._mcp_manager and self._mcp_manager.is_mcp_tool(tool_call.func_name):
await handle_mcp_tool(tool_call, self._chat_history, self._mcp_manager)
continue
await handle_unknown_tool(tool_call, self._chat_history)
return False
async def _handle_reply(self, tool_call: ToolCall, latest_thought: str, anchor_message: SessionMessage) -> bool:
target_message_id = str((tool_call.args or {}).get("message_id", "")).strip()
if not target_message_id:
self._chat_history.append(
self._build_tool_message(tool_call, "reply requires a valid message_id argument.")
)
return False
target_message = self._source_messages_by_id.get(target_message_id)
if target_message is None:
self._chat_history.append(
self._build_tool_message(tool_call, f"reply target message_id not found: {target_message_id}")
)
return False
reply_text = await self._llm_service.generate_reply(latest_thought, self._chat_history)
sent = await send_service.text_to_stream(
text=reply_text,
stream_id=self.session_id,
set_reply=True,
reply_message=target_message,
typing=False,
)
tool_result = "Visible reply generated and sent." if sent else "Visible reply generation succeeded but send failed."
self._chat_history.append(self._build_tool_message(tool_call, tool_result))
if not sent:
return False
bot_name = global_config.bot.nickname.strip() or "MaiSaka"
self._chat_history.append(
build_message(
role="user",
content=format_speaker_content(bot_name, reply_text, datetime.now()),
source="guided_reply",
platform=target_message.platform or anchor_message.platform,
session_id=self.session_id,
group_info=self._build_group_info(target_message),
user_info=self._build_runtime_user_info(),
)
)
return True
def _build_tool_message(self, tool_call: ToolCall, content: str) -> SessionMessage:
return build_message(
role="tool",
content=content,
source="tool",
tool_call_id=tool_call.call_id,
platform=self.chat_stream.platform,
session_id=self.session_id,
group_info=self._build_group_info(),
user_info=UserInfo(user_id="maisaka_tool", user_nickname="tool", user_cardname=None),
)
def _build_runtime_user_info(self) -> UserInfo:
if self.chat_stream.user_id:
return UserInfo(