Merge branch 'Mai-with-u:r-dev' into r-dev
This commit is contained in:
@@ -1,12 +0,0 @@
|
|||||||
你是一个对话上下文总结模块。你的任务是对早期的对话内容进行简洁的总结,以便存入记忆系统。
|
|
||||||
|
|
||||||
总结要求:
|
|
||||||
1. 提取对话中的关键信息(人名、事件、时间、地点等)
|
|
||||||
2. 记录用户的态度、情绪和偏好
|
|
||||||
3. 保留重要的对话内容和结论
|
|
||||||
4. 总结要简洁明了,便于后续检索和理解
|
|
||||||
5. 用第三人称客观叙述,不要包含「我记得」「之前说过」等指代词
|
|
||||||
|
|
||||||
输出格式:
|
|
||||||
- 2-5 句话的简洁总结
|
|
||||||
- 直接输出总结内容,不要有前缀或格式标题
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
你是一个对话上下文总结模块。你的任务是对早期的对话内容进行简洁的总结,以便存入记忆系统。
|
|
||||||
|
|
||||||
总结要求:
|
|
||||||
1. 提取对话中的关键信息(人名、事件、时间、地点等)
|
|
||||||
2. 记录用户的态度、情绪和偏好
|
|
||||||
3. 保留重要的对话内容和结论
|
|
||||||
4. 总结要简洁明了,便于后续检索和理解
|
|
||||||
5. 用第三人称客观叙述,不要包含「我记得」「之前说过」等指代词
|
|
||||||
|
|
||||||
输出格式:
|
|
||||||
- 2-5 句话的简洁总结
|
|
||||||
- 直接输出总结内容,不要有前缀或格式标题
|
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
你的任务是分析聊天和聊天中的互动情况。
|
你的任务是分析聊天和聊天中的互动情况。
|
||||||
你需要关注 麦麦(AI) 与用户的对话来为选择正确的动作和行为提供建议
|
你需要关注 {bot_name}(AI) 与不同用户的对话来为选择正确的动作和行为提供建议
|
||||||
|
|
||||||
【参考信息】
|
【参考信息】
|
||||||
麦麦的人设:{identity}
|
{bot_name}的人设:{identity}
|
||||||
【参考信息结束】
|
【参考信息结束】
|
||||||
|
|
||||||
你需要根据提供的参考信息,当前场景和输出规则来进行分析
|
你需要根据提供的参考信息,当前场景和输出规则来进行分析
|
||||||
@@ -11,16 +11,17 @@
|
|||||||
|
|
||||||
|
|
||||||
你可以使用这些工具:
|
你可以使用这些工具:
|
||||||
• wait(seconds) - 暂时停止对话,等待(seconds)秒,把话语权交给用户,等待对方新的发言。
|
- wait(seconds) - 暂时停止对话,等待(seconds)秒,把话语权交给用户,等待对方新的发言。
|
||||||
• stop() - 结束对话,不进行任何回复,直到对方有新消息。
|
- stop() - 结束对话,不进行任何回复,直到对方有新消息。
|
||||||
- `reply()`:当你判断现在应该正式对用户发出一条可见回复时调用。调用后系统会基于你当前这轮的想法生成一条真正展示给用户的回复。
|
- reply():当你判断现在应该正式对用户发出一条可见回复时调用。调用后系统会基于你当前这轮的想法生成一条真正展示给用户的回复。
|
||||||
- `no_reply()`:当你判断现在不应该发言,应该继续内部思考时调用。这个工具不会做任何外部行为,只会继续下一轮循环。
|
- no_reply():当你判断现在不应该发言,应该继续内部思考时调用。这个工具不会做任何外部行为,只会继续下一轮循环。
|
||||||
{file_tools_section}
|
{file_tools_section}
|
||||||
|
|
||||||
工具使用规则:
|
工具使用规则:
|
||||||
1.如果麦麦已经回复,但用户暂时没有新的回复,且没有新信息需要搜集,使用wait或者stop进行等待
|
1.如果麦麦已经回复,但用户暂时没有新的回复,且没有新信息需要搜集,使用wait或者stop进行等待
|
||||||
2.如果用户有新发言,但是你评估用户还有后续发言尚未发送,可以适当等待让用户说完
|
2.如果用户有新发言,但是你评估用户还有后续发言尚未发送,可以适当等待让用户说完
|
||||||
3.如果你想指导麦麦直接发言,可以不使用任何工具
|
3.在特定情况下也可以连续回复,例如想要追问,或者补充自己先前的发言,可以不使用stop或者wait
|
||||||
|
4.如果你想指导麦麦直接发言,可以不使用任何工具
|
||||||
|
|
||||||
你的输出规则:
|
你的输出规则:
|
||||||
1. 默认直接输出你当前的最新分析,不要重复之前的分析内容。
|
1. 默认直接输出你当前的最新分析,不要重复之前的分析内容。
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
你是一个对话上下文总结模块。你的任务是对早期的对话内容进行简洁的总结,以便存入记忆系统。
|
|
||||||
|
|
||||||
总结要求:
|
|
||||||
1. 提取对话中的关键信息(人名、事件、时间、地点等)
|
|
||||||
2. 记录用户的态度、情绪和偏好
|
|
||||||
3. 保留重要的对话内容和结论
|
|
||||||
4. 总结要简洁明了,便于后续检索和理解
|
|
||||||
5. 用第三人称客观叙述,不要包含「我记得」「之前说过」等指代词
|
|
||||||
|
|
||||||
输出格式:
|
|
||||||
- 2-5 句话的简洁总结
|
|
||||||
- 直接输出总结内容,不要有前缀或格式标题
|
|
||||||
@@ -56,7 +56,7 @@ CONFIG_DIR: Path = PROJECT_ROOT / "config"
|
|||||||
BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
|
BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
|
||||||
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
|
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
|
||||||
MMC_VERSION: str = "1.0.0"
|
MMC_VERSION: str = "1.0.0"
|
||||||
CONFIG_VERSION: str = "8.1.2"
|
CONFIG_VERSION: str = "8.1.4"
|
||||||
MODEL_CONFIG_VERSION: str = "1.12.0"
|
MODEL_CONFIG_VERSION: str = "1.12.0"
|
||||||
|
|
||||||
logger = get_logger("config")
|
logger = get_logger("config")
|
||||||
|
|||||||
@@ -1618,6 +1618,24 @@ class MaiSakaConfig(ConfigBase):
|
|||||||
)
|
)
|
||||||
"""是否在 CLI 中显示 analyze_timing 的 Prompt"""
|
"""是否在 CLI 中显示 analyze_timing 的 Prompt"""
|
||||||
|
|
||||||
|
show_thinking: bool = Field(
|
||||||
|
default=True,
|
||||||
|
json_schema_extra={
|
||||||
|
"x-widget": "switch",
|
||||||
|
"x-icon": "brain",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
"""鏄惁鍦?CLI 涓樉绀哄唴蹇冩€濊€冨拰瀹屾暣 Prompt"""
|
||||||
|
|
||||||
|
user_name: str = Field(
|
||||||
|
default="用户",
|
||||||
|
json_schema_extra={
|
||||||
|
"x-widget": "input",
|
||||||
|
"x-icon": "user",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
"""MaiSaka 涓敤鎴风殑鏄剧ず鍚嶇О"""
|
||||||
|
|
||||||
class PluginRuntimeConfig(ConfigBase):
|
class PluginRuntimeConfig(ConfigBase):
|
||||||
"""插件运行时配置类"""
|
"""插件运行时配置类"""
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
MaiSaka built-in tool definitions.
|
MaiSaka built-in tool definitions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from typing import Any, Dict, List
|
from typing import List
|
||||||
|
|
||||||
from src.llm_models.payload_content.tool_option import ToolOption, ToolParamType
|
from src.llm_models.payload_content.tool_option import ToolOption, ToolParamType
|
||||||
|
|
||||||
@@ -43,44 +43,6 @@ def create_builtin_tools() -> List[ToolOption]:
|
|||||||
return tools
|
return tools
|
||||||
|
|
||||||
|
|
||||||
def builtin_tools_as_dicts() -> List[Dict[str, Any]]:
|
|
||||||
"""Return built-in tools as plain dictionaries."""
|
|
||||||
return [
|
|
||||||
{
|
|
||||||
"name": "wait",
|
|
||||||
"description": "Pause speaking and wait for the user to provide more input.",
|
|
||||||
"parameters": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"seconds": {
|
|
||||||
"type": "number",
|
|
||||||
"description": "How many seconds to wait before timing out.",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": ["seconds"],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "reply",
|
|
||||||
"description": "Generate and emit a visible reply based on the current thought.",
|
|
||||||
"parameters": {"type": "object", "properties": {}, "required": []},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "no_reply",
|
|
||||||
"description": "Do not emit a visible reply this round and continue thinking.",
|
|
||||||
"parameters": {"type": "object", "properties": {}, "required": []},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stop",
|
|
||||||
"description": "Stop the current inner loop and return control to the outer chat flow.",
|
|
||||||
"parameters": {"type": "object", "properties": {}, "required": []},
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def get_builtin_tools() -> List[ToolOption]:
|
def get_builtin_tools() -> List[ToolOption]:
|
||||||
"""Return built-in tools."""
|
"""Return built-in tools."""
|
||||||
return create_builtin_tools()
|
return create_builtin_tools()
|
||||||
|
|
||||||
|
|
||||||
BUILTIN_TOOLS_DICTS = builtin_tools_as_dicts()
|
|
||||||
|
|||||||
1019
src/maisaka/cli.py
1019
src/maisaka/cli.py
File diff suppressed because it is too large
Load Diff
@@ -27,6 +27,8 @@ ENABLE_READ_FILE = global_config.maisaka.enable_read_file
|
|||||||
ENABLE_LIST_FILES = global_config.maisaka.enable_list_files
|
ENABLE_LIST_FILES = global_config.maisaka.enable_list_files
|
||||||
SHOW_ANALYZE_COGNITION_PROMPT = global_config.maisaka.show_analyze_cognition_prompt
|
SHOW_ANALYZE_COGNITION_PROMPT = global_config.maisaka.show_analyze_cognition_prompt
|
||||||
SHOW_ANALYZE_TIMING_PROMPT = global_config.maisaka.show_analyze_timing_prompt
|
SHOW_ANALYZE_TIMING_PROMPT = global_config.maisaka.show_analyze_timing_prompt
|
||||||
|
SHOW_THINKING = global_config.maisaka.show_thinking
|
||||||
|
USER_NAME = global_config.maisaka.user_name.strip() or "用户"
|
||||||
|
|
||||||
|
|
||||||
# ──────────────────── Rich 主题 & Console ────────────────────
|
# ──────────────────── Rich 主题 & Console ────────────────────
|
||||||
|
|||||||
@@ -8,8 +8,13 @@ MaiSaka - Emotion 模块
|
|||||||
|
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from src.common.data_models.mai_message_data_model import MaiMessage
|
||||||
|
|
||||||
def extract_user_messages(chat_history: List[dict], limit: Optional[int] = None) -> List[dict]:
|
from .config import USER_NAME
|
||||||
|
from .message_adapter import get_message_role, get_message_text
|
||||||
|
|
||||||
|
|
||||||
|
def extract_user_messages(chat_history: List[MaiMessage], limit: Optional[int] = None) -> List[MaiMessage]:
|
||||||
"""
|
"""
|
||||||
从对话历史中提取用户消息。
|
从对话历史中提取用户消息。
|
||||||
|
|
||||||
@@ -20,13 +25,13 @@ def extract_user_messages(chat_history: List[dict], limit: Optional[int] = None)
|
|||||||
Returns:
|
Returns:
|
||||||
只包含用户消息的列表
|
只包含用户消息的列表
|
||||||
"""
|
"""
|
||||||
user_messages = [msg for msg in chat_history if msg.get("role") == "user"]
|
user_messages = [msg for msg in chat_history if get_message_role(msg) == "user"]
|
||||||
if limit and len(user_messages) > limit:
|
if limit and len(user_messages) > limit:
|
||||||
return user_messages[-limit:]
|
return user_messages[-limit:]
|
||||||
return user_messages
|
return user_messages
|
||||||
|
|
||||||
|
|
||||||
def build_emotion_context(chat_history: List[dict]) -> str:
|
def build_emotion_context(chat_history: List[MaiMessage]) -> str:
|
||||||
"""
|
"""
|
||||||
构建用于情绪分析的对话上下文文本。
|
构建用于情绪分析的对话上下文文本。
|
||||||
|
|
||||||
@@ -41,11 +46,11 @@ def build_emotion_context(chat_history: List[dict]) -> str:
|
|||||||
|
|
||||||
context_parts = []
|
context_parts = []
|
||||||
for msg in recent_messages:
|
for msg in recent_messages:
|
||||||
role = msg.get("role", "")
|
role = get_message_role(msg)
|
||||||
content = msg.get("content", "")
|
content = get_message_text(msg)
|
||||||
|
|
||||||
if role == "user":
|
if role == "user":
|
||||||
context_parts.append(f"用户: {content}")
|
context_parts.append(f"{USER_NAME}: {content}")
|
||||||
elif role == "assistant":
|
elif role == "assistant":
|
||||||
# 只显示 assistant 的实际发言,跳过感知信息
|
# 只显示 assistant 的实际发言,跳过感知信息
|
||||||
if "【AI 感知】" not in content:
|
if "【AI 感知】" not in content:
|
||||||
|
|||||||
@@ -1,171 +1,58 @@
|
|||||||
"""
|
"""
|
||||||
MaiSaka - 了解模块
|
MaiSaka knowledge retrieval helpers.
|
||||||
负责从对话中提取和存储用户个人特征信息。
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
from src.common.data_models.mai_message_data_model import MaiMessage
|
||||||
|
|
||||||
from .knowledge_store import KNOWLEDGE_CATEGORIES, get_knowledge_store
|
from .knowledge_store import KNOWLEDGE_CATEGORIES, get_knowledge_store
|
||||||
|
|
||||||
|
NO_RESULT_KEYWORDS = [
|
||||||
def build_knowledge_summary() -> str:
|
"\u65e0",
|
||||||
"""
|
"\u6ca1\u6709",
|
||||||
构建了解分类摘要,用于 LLM 请求。
|
"\u4e0d\u9002\u7528",
|
||||||
|
"\u65e0\u9700",
|
||||||
Returns:
|
"\u65e0\u76f8\u5173",
|
||||||
格式化的分类列表文本
|
]
|
||||||
"""
|
|
||||||
store = get_knowledge_store()
|
|
||||||
return store.get_categories_summary()
|
|
||||||
|
|
||||||
|
|
||||||
def extract_category_ids_from_result(result: str) -> List[str]:
|
def extract_category_ids_from_result(result: str) -> List[str]:
|
||||||
"""
|
"""Extract valid category ids from an LLM result string."""
|
||||||
从 LLM 返回结果中提取分类编号。
|
|
||||||
|
|
||||||
Args:
|
|
||||||
result: LLM 返回的结果文本
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
分类编号列表
|
|
||||||
"""
|
|
||||||
if not result:
|
if not result:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# 检查是否表示"无相关内容"
|
normalized = result.strip()
|
||||||
if any(keyword in result for keyword in ["无", "没有", "不适用", "无需", "无相关"]):
|
if not normalized:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# 解析编号(支持逗号分隔、空格分隔、换行分隔)
|
lowered = normalized.lower()
|
||||||
category_ids = []
|
if any(keyword in lowered for keyword in ["none", "no relevant", "no_need", "no need"]):
|
||||||
for part in result.replace(",", " ").replace(",", " ").replace("\n", " ").split():
|
return []
|
||||||
part = part.strip()
|
if any(keyword in normalized for keyword in NO_RESULT_KEYWORDS):
|
||||||
if part in KNOWLEDGE_CATEGORIES:
|
return []
|
||||||
category_ids.append(part)
|
|
||||||
|
category_ids: List[str] = []
|
||||||
|
for part in normalized.replace(",", " ").replace("\uff0c", " ").replace("\n", " ").split():
|
||||||
|
candidate = part.strip()
|
||||||
|
if candidate in KNOWLEDGE_CATEGORIES and candidate not in category_ids:
|
||||||
|
category_ids.append(candidate)
|
||||||
|
|
||||||
return category_ids
|
return category_ids
|
||||||
|
|
||||||
|
|
||||||
def format_context_for_memory(context_messages: List[dict]) -> str:
|
|
||||||
"""
|
|
||||||
格式化上下文消息为文本,用于记忆分析。
|
|
||||||
|
|
||||||
Args:
|
|
||||||
context_messages: 上下文消息列表
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
格式化后的文本
|
|
||||||
"""
|
|
||||||
parts = []
|
|
||||||
for msg in context_messages:
|
|
||||||
role = msg.get("role", "")
|
|
||||||
content = msg.get("content", "")
|
|
||||||
|
|
||||||
if role == "user":
|
|
||||||
parts.append(f"用户: {content}")
|
|
||||||
elif role == "assistant":
|
|
||||||
# 跳过感知消息
|
|
||||||
if "【AI 感知】" not in content:
|
|
||||||
parts.append(f"助手: {content}")
|
|
||||||
|
|
||||||
return "\n".join(parts)
|
|
||||||
|
|
||||||
|
|
||||||
async def store_knowledge_from_context(
|
|
||||||
llm_service,
|
|
||||||
context_messages: List[dict],
|
|
||||||
store_result_callback=None,
|
|
||||||
) -> int:
|
|
||||||
"""
|
|
||||||
记忆部分:从上下文中提取并存储了解信息。
|
|
||||||
|
|
||||||
在上下文裁切时触发:
|
|
||||||
1. 请求 LLM 分析聊天内容涉及哪些分类
|
|
||||||
2. 为每个分类创建 subAgent 提取相关内容
|
|
||||||
3. 存入了解列表
|
|
||||||
|
|
||||||
Args:
|
|
||||||
llm_service: LLM 服务实例
|
|
||||||
context_messages: 需要分析的上下文消息
|
|
||||||
store_result_callback: 存储结果回调函数
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
成功存储的了解信息数量
|
|
||||||
"""
|
|
||||||
store = get_knowledge_store()
|
|
||||||
context_text = format_context_for_memory(context_messages)
|
|
||||||
categories_summary = build_knowledge_summary()
|
|
||||||
|
|
||||||
if not context_text:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
try:
|
|
||||||
# 第一步:分析涉及哪些分类
|
|
||||||
category_ids = await llm_service.analyze_knowledge_categories(context_messages, categories_summary)
|
|
||||||
|
|
||||||
if not category_ids:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
# 第二步:为每个分类提取内容并存储
|
|
||||||
stored_count = 0
|
|
||||||
for category_id in category_ids:
|
|
||||||
try:
|
|
||||||
# 提取该分类的相关内容
|
|
||||||
extracted_content = await llm_service.extract_knowledge_for_category(
|
|
||||||
context_messages, category_id, store.get_category_name(category_id)
|
|
||||||
)
|
|
||||||
|
|
||||||
if extracted_content:
|
|
||||||
# 存储到了解列表
|
|
||||||
success = store.add_knowledge(
|
|
||||||
category_id=category_id, content=extracted_content, metadata={"source": "context_compression"}
|
|
||||||
)
|
|
||||||
if success:
|
|
||||||
stored_count += 1
|
|
||||||
if store_result_callback:
|
|
||||||
store_result_callback(category_id, store.get_category_name(category_id), extracted_content)
|
|
||||||
except Exception:
|
|
||||||
# 单个分类失败不影响其他分类
|
|
||||||
continue
|
|
||||||
|
|
||||||
return stored_count
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
|
|
||||||
async def retrieve_relevant_knowledge(
|
async def retrieve_relevant_knowledge(
|
||||||
llm_service,
|
llm_service,
|
||||||
chat_history: List[dict],
|
chat_history: List[MaiMessage],
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""Retrieve formatted knowledge snippets relevant to the current chat history."""
|
||||||
提取部分:根据当前上下文检索相关的了解信息。
|
|
||||||
|
|
||||||
在每次对话前触发(EQ 模块和 timing 模块位置):
|
|
||||||
1. 请求 LLM 分析需要哪些分类的了解内容
|
|
||||||
2. 提取对应分类的所有内容并拼接
|
|
||||||
3. 返回格式化后的了解内容
|
|
||||||
|
|
||||||
Args:
|
|
||||||
llm_service: LLM 服务实例
|
|
||||||
chat_history: 当前对话历史
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
格式化后的了解内容文本
|
|
||||||
"""
|
|
||||||
store = get_knowledge_store()
|
store = get_knowledge_store()
|
||||||
categories_summary = store.get_categories_summary()
|
categories_summary = store.get_categories_summary()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# 分析需要哪些分类
|
|
||||||
category_ids = await llm_service.analyze_knowledge_need(chat_history, categories_summary)
|
category_ids = await llm_service.analyze_knowledge_need(chat_history, categories_summary)
|
||||||
|
|
||||||
if not category_ids:
|
if not category_ids:
|
||||||
return ""
|
return ""
|
||||||
|
return store.get_formatted_knowledge(category_ids)
|
||||||
# 获取并格式化了解内容
|
|
||||||
formatted_knowledge = store.get_formatted_knowledge(category_ids)
|
|
||||||
|
|
||||||
return formatted_knowledge
|
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
return ""
|
return ""
|
||||||
|
|||||||
@@ -5,78 +5,47 @@ MaiSaka LLM 服务 - 使用主项目 LLM 系统
|
|||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import json
|
|
||||||
import random
|
import random
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any, List, Literal, Optional
|
from typing import Any, List, Optional
|
||||||
|
|
||||||
from rich.console import Group
|
from rich.console import Group
|
||||||
from rich.panel import Panel
|
from rich.panel import Panel
|
||||||
from rich.pretty import Pretty
|
from rich.pretty import Pretty
|
||||||
from rich.text import Text
|
from rich.text import Text
|
||||||
|
|
||||||
|
from src.common.data_models.mai_message_data_model import MaiMessage
|
||||||
from src.common.logger import get_logger
|
from src.common.logger import get_logger
|
||||||
from src.config.config import config_manager, global_config
|
from src.config.config import config_manager, global_config
|
||||||
from src.llm_models.payload_content.message import MessageBuilder, RoleType
|
from src.llm_models.payload_content.message import Message, MessageBuilder, RoleType
|
||||||
from src.llm_models.payload_content.tool_option import ToolCall as ToolCallOption, ToolOption
|
from src.llm_models.payload_content.tool_option import ToolCall, ToolOption
|
||||||
from src.llm_models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.prompt.prompt_manager import prompt_manager
|
from src.prompt.prompt_manager import prompt_manager
|
||||||
|
|
||||||
from . import config
|
from . import config
|
||||||
from .config import console
|
from .config import console
|
||||||
from .builtin_tools import get_builtin_tools
|
from .builtin_tools import get_builtin_tools
|
||||||
|
from .message_adapter import (
|
||||||
|
build_message,
|
||||||
|
format_speaker_content,
|
||||||
|
get_message_kind,
|
||||||
|
get_message_role,
|
||||||
|
get_message_text,
|
||||||
|
get_tool_call_id,
|
||||||
|
get_tool_calls,
|
||||||
|
remove_last_perception,
|
||||||
|
to_llm_message,
|
||||||
|
)
|
||||||
|
|
||||||
logger = get_logger("maisaka_llm")
|
logger = get_logger("maisaka_llm")
|
||||||
|
|
||||||
# ──────────────────── 消息类型 ────────────────────
|
|
||||||
|
|
||||||
MessageType = Literal["user", "assistant", "system", "perception"]
|
|
||||||
|
|
||||||
# 内部使用的字段前缀,用于标记不应发送给 API 的元数据
|
|
||||||
INTERNAL_FIELD_PREFIX = "_"
|
|
||||||
|
|
||||||
# 消息类型字段名
|
|
||||||
MSG_TYPE_FIELD = "_type"
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ToolCall:
|
|
||||||
"""工具调用信息"""
|
|
||||||
|
|
||||||
id: str
|
|
||||||
name: str
|
|
||||||
arguments: dict
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ChatResponse:
|
class ChatResponse:
|
||||||
"""LLM 对话循环单步响应"""
|
"""LLM 对话循环单步响应"""
|
||||||
|
|
||||||
content: Optional[str]
|
content: Optional[str]
|
||||||
tool_calls: List[ToolCall]
|
tool_calls: List[ToolCall]
|
||||||
raw_message: dict # 可直接追加到对话历史的消息字典
|
raw_message: MaiMessage
|
||||||
|
|
||||||
|
|
||||||
# ──────────────────── 工具函数 ────────────────────
|
|
||||||
|
|
||||||
|
|
||||||
def build_message(role: str, content: str, msg_type: MessageType = "user", **kwargs) -> dict:
|
|
||||||
"""构建消息字典,包含消息类型标记。"""
|
|
||||||
msg = {
|
|
||||||
"role": role,
|
|
||||||
"content": content,
|
|
||||||
MSG_TYPE_FIELD: msg_type,
|
|
||||||
"_time": datetime.now().strftime("%H:%M:%S"),
|
|
||||||
**kwargs,
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
|
|
||||||
|
|
||||||
def remove_last_perception(messages: list[dict]) -> None:
|
|
||||||
"""移除最后一条感知消息(直接修改原列表)。"""
|
|
||||||
for i in range(len(messages) - 1, -1, -1):
|
|
||||||
if messages[i].get(MSG_TYPE_FIELD) == "perception":
|
|
||||||
messages.pop(i)
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
class MaiSakaLLMService:
|
class MaiSakaLLMService:
|
||||||
@@ -132,7 +101,6 @@ class MaiSakaLLMService:
|
|||||||
if chat_system_prompt is None:
|
if chat_system_prompt is None:
|
||||||
try:
|
try:
|
||||||
chat_prompt = prompt_manager.get_prompt("maidairy_chat")
|
chat_prompt = prompt_manager.get_prompt("maidairy_chat")
|
||||||
logger.info("成功加载 maidairy_chat 提示词模板")
|
|
||||||
tools_section = ""
|
tools_section = ""
|
||||||
if config.ENABLE_WRITE_FILE:
|
if config.ENABLE_WRITE_FILE:
|
||||||
tools_section += "\n• write_file(filename, content) — 在 mai_files 目录下写入文件。"
|
tools_section += "\n• write_file(filename, content) — 在 mai_files 目录下写入文件。"
|
||||||
@@ -142,6 +110,7 @@ class MaiSakaLLMService:
|
|||||||
tools_section += "\n• list_files() — 获取 mai_files 目录下所有文件的元信息列表。"
|
tools_section += "\n• list_files() — 获取 mai_files 目录下所有文件的元信息列表。"
|
||||||
|
|
||||||
chat_prompt.add_context("file_tools_section", tools_section if tools_section else "")
|
chat_prompt.add_context("file_tools_section", tools_section if tools_section else "")
|
||||||
|
chat_prompt.add_context("bot_name", global_config.bot.nickname)
|
||||||
chat_prompt.add_context("identity", personality_prompt)
|
chat_prompt.add_context("identity", personality_prompt)
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
@@ -167,8 +136,6 @@ class MaiSakaLLMService:
|
|||||||
self._emotion_prompt: Optional[str] = None
|
self._emotion_prompt: Optional[str] = None
|
||||||
self._cognition_prompt: Optional[str] = None
|
self._cognition_prompt: Optional[str] = None
|
||||||
self._timing_prompt: Optional[str] = None
|
self._timing_prompt: Optional[str] = None
|
||||||
self._context_summarize_prompt: Optional[str] = None
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
@@ -184,9 +151,6 @@ class MaiSakaLLMService:
|
|||||||
self._timing_prompt = loop.run_until_complete(
|
self._timing_prompt = loop.run_until_complete(
|
||||||
prompt_manager.render_prompt(prompt_manager.get_prompt("maidairy_timing"))
|
prompt_manager.render_prompt(prompt_manager.get_prompt("maidairy_timing"))
|
||||||
)
|
)
|
||||||
self._context_summarize_prompt = loop.run_until_complete(
|
|
||||||
prompt_manager.render_prompt(prompt_manager.get_prompt("maidairy_context_summarize"))
|
|
||||||
)
|
|
||||||
logger.info("成功加载 MaiSaka 子模块提示词")
|
logger.info("成功加载 MaiSaka 子模块提示词")
|
||||||
finally:
|
finally:
|
||||||
loop.close()
|
loop.close()
|
||||||
@@ -367,12 +331,12 @@ class MaiSakaLLMService:
|
|||||||
params.append((param.name, param.param_type, param.description, param.required, param.enum_values))
|
params.append((param.name, param.param_type, param.description, param.required, param.enum_values))
|
||||||
return {"name": tool.name, "description": tool.description, "parameters": params}
|
return {"name": tool.name, "description": tool.description, "parameters": params}
|
||||||
|
|
||||||
async def chat_loop_step(self, chat_history: List[dict]) -> ChatResponse:
|
async def chat_loop_step(self, chat_history: list[MaiMessage]) -> ChatResponse:
|
||||||
"""执行对话循环的一步 - 使用 tool_use 模型"""
|
"""执行对话循环的一步 - 使用 tool_use 模型"""
|
||||||
|
|
||||||
def message_factory(client) -> List:
|
def message_factory(client) -> list[Message]:
|
||||||
"""将 MaiSaka 的 chat_history 转换为主项目的 Message 格式"""
|
"""将 MaiSaka 的 chat_history 转换为主项目的 Message 格式"""
|
||||||
messages = []
|
messages: list[Message] = []
|
||||||
|
|
||||||
# 首先添加系统提示词
|
# 首先添加系统提示词
|
||||||
system_msg = MessageBuilder().set_role(RoleType.System)
|
system_msg = MessageBuilder().set_role(RoleType.System)
|
||||||
@@ -381,48 +345,9 @@ class MaiSakaLLMService:
|
|||||||
|
|
||||||
# 然后添加对话历史
|
# 然后添加对话历史
|
||||||
for msg in chat_history:
|
for msg in chat_history:
|
||||||
role = msg.get("role", "")
|
llm_message = to_llm_message(msg)
|
||||||
content = msg.get("content", "")
|
if llm_message is not None:
|
||||||
|
messages.append(llm_message)
|
||||||
# 跳过内部字段类型的消息和系统消息(已经有系统提示词了)
|
|
||||||
if role in ("perception", "system"):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# 映射角色类型
|
|
||||||
if role == "user":
|
|
||||||
role_type = RoleType.User
|
|
||||||
elif role == "assistant":
|
|
||||||
role_type = RoleType.Assistant
|
|
||||||
elif role == "tool":
|
|
||||||
role_type = RoleType.Tool
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
|
|
||||||
builder = MessageBuilder().set_role(role_type)
|
|
||||||
|
|
||||||
# 处理工具调用
|
|
||||||
if role == "assistant" and "tool_calls" in msg:
|
|
||||||
# 转换 tool_calls 格式:从 MaiSaka 格式转为主项目格式
|
|
||||||
tool_calls_list = []
|
|
||||||
for tc in msg["tool_calls"]:
|
|
||||||
tc_func = tc.get("function", {})
|
|
||||||
# 主项目的 ToolCall: call_id, func_name, args
|
|
||||||
tool_calls_list.append(
|
|
||||||
ToolCallOption(
|
|
||||||
call_id=tc.get("id", ""),
|
|
||||||
func_name=tc_func.get("name", ""),
|
|
||||||
args=json.loads(tc_func.get("arguments", "{}")) if tc_func.get("arguments") else {},
|
|
||||||
)
|
|
||||||
)
|
|
||||||
builder.set_tool_calls(tool_calls_list)
|
|
||||||
elif role == "tool" and "tool_call_id" in msg:
|
|
||||||
builder.add_tool_call(msg["tool_call_id"])
|
|
||||||
|
|
||||||
# 添加文本内容
|
|
||||||
if content:
|
|
||||||
builder.add_text_content(content)
|
|
||||||
|
|
||||||
messages.append(builder.build())
|
|
||||||
|
|
||||||
return messages
|
return messages
|
||||||
|
|
||||||
@@ -435,33 +360,18 @@ class MaiSakaLLMService:
|
|||||||
# 打印消息列表
|
# 打印消息列表
|
||||||
built_messages = message_factory(None)
|
built_messages = message_factory(None)
|
||||||
|
|
||||||
# 将消息分为普通消息和 tool 消息
|
ordered_panels = [self._render_message_panel(msg, index + 1) for index, msg in enumerate(built_messages)]
|
||||||
non_tool_panels = []
|
|
||||||
tool_panels = []
|
|
||||||
|
|
||||||
for index, msg in enumerate(built_messages):
|
if config.SHOW_THINKING and ordered_panels:
|
||||||
panel = self._render_message_panel(msg, index + 1)
|
|
||||||
role = msg.role.value if hasattr(msg.role, "value") else str(msg.role)
|
|
||||||
|
|
||||||
if role == "tool":
|
|
||||||
tool_panels.append(panel)
|
|
||||||
else:
|
|
||||||
non_tool_panels.append(panel)
|
|
||||||
|
|
||||||
# 先显示普通消息(group 在一个 panel 内)
|
|
||||||
if non_tool_panels:
|
|
||||||
console.print(
|
console.print(
|
||||||
Panel(
|
Panel(
|
||||||
Group(*non_tool_panels),
|
Group(*ordered_panels),
|
||||||
title="MaiSaka LLM Request - chat_loop_step",
|
title="MaiSaka LLM Request - chat_loop_step",
|
||||||
border_style="cyan",
|
border_style="cyan",
|
||||||
padding=(0, 1),
|
padding=(0, 1),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# tool 消息作为单独的块展示
|
|
||||||
for panel in tool_panels:
|
|
||||||
console.print(panel)
|
|
||||||
|
|
||||||
response, (reasoning, model, tool_calls) = await self._llm_chat.generate_response_with_message_async(
|
response, (reasoning, model, tool_calls) = await self._llm_chat.generate_response_with_message_async(
|
||||||
message_factory=message_factory,
|
message_factory=message_factory,
|
||||||
@@ -469,86 +379,60 @@ class MaiSakaLLMService:
|
|||||||
temperature=self._temperature,
|
temperature=self._temperature,
|
||||||
max_tokens=self._max_tokens,
|
max_tokens=self._max_tokens,
|
||||||
)
|
)
|
||||||
|
raw_message = build_message(
|
||||||
# 转换 tool_calls 格式:从主项目格式转为 MaiSaka 格式
|
role=RoleType.Assistant.value,
|
||||||
converted_tool_calls = []
|
content=response or "",
|
||||||
if tool_calls:
|
source="assistant",
|
||||||
for tc in tool_calls:
|
tool_calls=tool_calls or None,
|
||||||
# 主项目的 ToolCall 有 call_id, func_name, args
|
)
|
||||||
call_id = tc.call_id if hasattr(tc, "call_id") else ""
|
|
||||||
func_name = tc.func_name if hasattr(tc, "func_name") else ""
|
|
||||||
args = tc.args if hasattr(tc, "args") else {}
|
|
||||||
|
|
||||||
converted_tool_calls.append(
|
|
||||||
ToolCall(
|
|
||||||
id=call_id,
|
|
||||||
name=func_name,
|
|
||||||
arguments=args,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# 构建原始消息格式(MaiSaka 风格)
|
|
||||||
raw_message = {
|
|
||||||
"role": "assistant",
|
|
||||||
"content": response,
|
|
||||||
"_time": datetime.now().strftime("%H:%M:%S"),
|
|
||||||
}
|
|
||||||
if converted_tool_calls:
|
|
||||||
raw_message["tool_calls"] = [
|
|
||||||
{
|
|
||||||
"id": tc.id,
|
|
||||||
"type": "function",
|
|
||||||
"function": {
|
|
||||||
"name": tc.name,
|
|
||||||
"arguments": json.dumps(tc.arguments),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for tc in converted_tool_calls
|
|
||||||
]
|
|
||||||
|
|
||||||
return ChatResponse(
|
return ChatResponse(
|
||||||
content=response,
|
content=response,
|
||||||
tool_calls=converted_tool_calls,
|
tool_calls=tool_calls or [],
|
||||||
raw_message=raw_message,
|
raw_message=raw_message,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _filter_for_api(self, chat_history: List[dict]) -> str:
|
def _filter_for_api(self, chat_history: list[MaiMessage]) -> str:
|
||||||
"""过滤对话历史为 API 格式"""
|
"""过滤对话历史为 API 格式"""
|
||||||
parts = []
|
parts = []
|
||||||
for msg in chat_history:
|
for msg in chat_history:
|
||||||
role = msg.get("role", "")
|
role = get_message_role(msg)
|
||||||
content = msg.get("content", "")
|
content = get_message_text(msg)
|
||||||
|
|
||||||
# 跳过内部字段
|
# 跳过内部字段
|
||||||
if role in ("perception", "tool"):
|
if get_message_kind(msg) == "perception" or role == RoleType.Tool.value:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if role == "system":
|
if role == RoleType.System.value:
|
||||||
parts.append(f"System: {content}")
|
parts.append(f"System: {content}")
|
||||||
elif role == "user":
|
elif role == RoleType.User.value:
|
||||||
parts.append(f"User: {content}")
|
parts.append(f"User: {content}")
|
||||||
elif role == "assistant":
|
elif role == RoleType.Assistant.value:
|
||||||
# 处理工具调用
|
# 处理工具调用
|
||||||
if "tool_calls" in msg:
|
tool_calls = get_tool_calls(msg)
|
||||||
tool_desc = ", ".join([tc.get("name", "") for tc in msg["tool_calls"]])
|
if tool_calls:
|
||||||
|
tool_desc = ", ".join([tc.func_name for tc in tool_calls if tc.func_name])
|
||||||
parts.append(f"Assistant (called tools: {tool_desc})")
|
parts.append(f"Assistant (called tools: {tool_desc})")
|
||||||
else:
|
else:
|
||||||
parts.append(f"Assistant: {content}")
|
parts.append(f"Assistant: {content}")
|
||||||
|
|
||||||
return "\n\n".join(parts)
|
return "\n\n".join(parts)
|
||||||
|
|
||||||
def build_chat_context(self, user_text: str) -> List[dict]:
|
def build_chat_context(self, user_text: str) -> list[MaiMessage]:
|
||||||
"""构建对话上下文"""
|
"""构建对话上下文"""
|
||||||
return [
|
return [
|
||||||
{"role": "system", "content": self._chat_system_prompt},
|
build_message(
|
||||||
{"role": "user", "content": user_text},
|
role=RoleType.User.value,
|
||||||
|
content=format_speaker_content(config.USER_NAME, user_text),
|
||||||
|
source="user",
|
||||||
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
# ──────── 分析模块(使用 utils 模型) ────────
|
# ──────── 分析模块(使用 utils 模型) ────────
|
||||||
|
|
||||||
async def analyze_emotion(self, chat_history: List[dict]) -> str:
|
async def analyze_emotion(self, chat_history: list[MaiMessage]) -> str:
|
||||||
"""情绪分析 - 使用 utils 模型"""
|
"""情绪分析 - 使用 utils 模型"""
|
||||||
filtered = [m for m in chat_history if m.get("_type") != "perception"]
|
filtered = [m for m in chat_history if get_message_kind(m) != "perception"]
|
||||||
recent = filtered[-10:] if len(filtered) > 10 else filtered
|
recent = filtered[-10:] if len(filtered) > 10 else filtered
|
||||||
|
|
||||||
# 使用加载的系统提示词
|
# 使用加载的系统提示词
|
||||||
@@ -556,17 +440,20 @@ class MaiSakaLLMService:
|
|||||||
|
|
||||||
prompt_parts = [f"{system_prompt}\n\n【对话内容】\n"]
|
prompt_parts = [f"{system_prompt}\n\n【对话内容】\n"]
|
||||||
for msg in recent:
|
for msg in recent:
|
||||||
if msg.get("role") == "user":
|
role = get_message_role(msg)
|
||||||
prompt_parts.append(f"用户: {msg.get('content', '')}")
|
content = get_message_text(msg)
|
||||||
elif msg.get("role") == "assistant":
|
if role == RoleType.User.value:
|
||||||
prompt_parts.append(f"助手: {msg.get('content', '')}")
|
prompt_parts.append(f"{config.USER_NAME}: {content}")
|
||||||
|
elif role == RoleType.Assistant.value:
|
||||||
|
prompt_parts.append(f"助手: {content}")
|
||||||
|
|
||||||
prompt = "\n".join(prompt_parts)
|
prompt = "\n".join(prompt_parts)
|
||||||
|
|
||||||
print("\n" + "=" * 60)
|
if config.SHOW_THINKING:
|
||||||
print("MaiSaka LLM Request - analyze_emotion:")
|
print("\n" + "=" * 60)
|
||||||
print(f" {prompt}")
|
print("MaiSaka LLM Request - analyze_emotion:")
|
||||||
print("=" * 60 + "\n")
|
print(f" {prompt}")
|
||||||
|
print("=" * 60 + "\n")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response, _ = await self._llm_utils.generate_response_async(
|
response, _ = await self._llm_utils.generate_response_async(
|
||||||
@@ -580,9 +467,9 @@ class MaiSakaLLMService:
|
|||||||
logger.error(f"情绪分析 LLM 调用出错: {e}")
|
logger.error(f"情绪分析 LLM 调用出错: {e}")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
async def analyze_cognition(self, chat_history: List[dict]) -> str:
|
async def analyze_cognition(self, chat_history: list[MaiMessage]) -> str:
|
||||||
"""认知分析 - 使用 utils 模型"""
|
"""认知分析 - 使用 utils 模型"""
|
||||||
filtered = [m for m in chat_history if m.get("_type") != "perception"]
|
filtered = [m for m in chat_history if get_message_kind(m) != "perception"]
|
||||||
recent = filtered[-10:] if len(filtered) > 10 else filtered
|
recent = filtered[-10:] if len(filtered) > 10 else filtered
|
||||||
|
|
||||||
# 使用加载的系统提示词
|
# 使用加载的系统提示词
|
||||||
@@ -590,14 +477,16 @@ class MaiSakaLLMService:
|
|||||||
|
|
||||||
prompt_parts = [f"{system_prompt}\n\n【对话内容】\n"]
|
prompt_parts = [f"{system_prompt}\n\n【对话内容】\n"]
|
||||||
for msg in recent:
|
for msg in recent:
|
||||||
if msg.get("role") == "user":
|
role = get_message_role(msg)
|
||||||
prompt_parts.append(f"用户: {msg.get('content', '')}")
|
content = get_message_text(msg)
|
||||||
elif msg.get("role") == "assistant":
|
if role == RoleType.User.value:
|
||||||
prompt_parts.append(f"助手: {msg.get('content', '')}")
|
prompt_parts.append(f"{config.USER_NAME}: {content}")
|
||||||
|
elif role == RoleType.Assistant.value:
|
||||||
|
prompt_parts.append(f"助手: {content}")
|
||||||
|
|
||||||
prompt = "\n".join(prompt_parts)
|
prompt = "\n".join(prompt_parts)
|
||||||
|
|
||||||
if config.SHOW_ANALYZE_COGNITION_PROMPT:
|
if config.SHOW_THINKING and config.SHOW_ANALYZE_COGNITION_PROMPT:
|
||||||
print("\n" + "=" * 60)
|
print("\n" + "=" * 60)
|
||||||
print("MaiSaka LLM Request - analyze_cognition:")
|
print("MaiSaka LLM Request - analyze_cognition:")
|
||||||
print(f" {prompt}")
|
print(f" {prompt}")
|
||||||
@@ -615,25 +504,29 @@ class MaiSakaLLMService:
|
|||||||
logger.error(f"认知分析 LLM 调用出错: {e}")
|
logger.error(f"认知分析 LLM 调用出错: {e}")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
async def analyze_timing(self, chat_history: List[dict], timing_info: str) -> str:
|
async def analyze_timing(self, chat_history: list[MaiMessage], timing_info: str) -> str:
|
||||||
"""时间分析 - 使用 utils 模型"""
|
"""时间分析 - 使用 utils 模型"""
|
||||||
filtered = [m for m in chat_history if m.get("_type") not in ("perception", "system")]
|
filtered = [
|
||||||
|
m
|
||||||
|
for m in chat_history
|
||||||
|
if get_message_kind(m) != "perception" and get_message_role(m) != RoleType.System.value
|
||||||
|
]
|
||||||
|
|
||||||
# 使用加载的系统提示词
|
# 使用加载的系统提示词
|
||||||
system_prompt = self._timing_prompt or "请分析以下对话的时间节奏和用户状态:"
|
system_prompt = self._timing_prompt or "请分析以下对话的时间节奏和用户状态:"
|
||||||
|
|
||||||
prompt_parts = [f"{system_prompt}\n\n【系统时间戳信息】\n{timing_info}\n\n【当前对话记录】\n"]
|
prompt_parts = [f"{system_prompt}\n\n【系统时间戳信息】\n{timing_info}\n\n【当前对话记录】\n"]
|
||||||
for msg in filtered:
|
for msg in filtered:
|
||||||
role = msg.get("role", "")
|
role = get_message_role(msg)
|
||||||
content = msg.get("content", "")
|
content = get_message_text(msg)
|
||||||
if role == "user":
|
if role == RoleType.User.value:
|
||||||
prompt_parts.append(f"用户: {content}")
|
prompt_parts.append(f"{config.USER_NAME}: {content}")
|
||||||
elif role == "assistant":
|
elif role == RoleType.Assistant.value:
|
||||||
prompt_parts.append(f"助手: {content}")
|
prompt_parts.append(f"助手: {content}")
|
||||||
|
|
||||||
prompt = "\n".join(prompt_parts)
|
prompt = "\n".join(prompt_parts)
|
||||||
|
|
||||||
if config.SHOW_ANALYZE_TIMING_PROMPT:
|
if config.SHOW_THINKING and config.SHOW_ANALYZE_TIMING_PROMPT:
|
||||||
print("\n" + "=" * 60)
|
print("\n" + "=" * 60)
|
||||||
print("MaiSaka LLM Request - analyze_timing:")
|
print("MaiSaka LLM Request - analyze_timing:")
|
||||||
print(f" {prompt}")
|
print(f" {prompt}")
|
||||||
@@ -651,44 +544,9 @@ class MaiSakaLLMService:
|
|||||||
logger.error(f"时间分析 LLM 调用出错: {e}")
|
logger.error(f"时间分析 LLM 调用出错: {e}")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
async def summarize_context(self, context_messages: List[dict]) -> str:
|
|
||||||
"""上下文总结 - 使用 utils 模型"""
|
|
||||||
filtered = [m for m in context_messages if m.get("role") != "system"]
|
|
||||||
|
|
||||||
# 使用加载的系统提示词
|
|
||||||
system_prompt = self._context_summarize_prompt or "请对以下对话内容进行总结:"
|
|
||||||
|
|
||||||
prompt_parts = [f"{system_prompt}\n\n【对话内容】\n"]
|
|
||||||
for msg in filtered:
|
|
||||||
role = msg.get("role", "")
|
|
||||||
content = msg.get("content", "")
|
|
||||||
if role == "user":
|
|
||||||
prompt_parts.append(f"用户: {content}")
|
|
||||||
elif role == "assistant":
|
|
||||||
prompt_parts.append(f"助手: {content}")
|
|
||||||
|
|
||||||
prompt = "\n".join(prompt_parts)
|
|
||||||
|
|
||||||
print("\n" + "=" * 60)
|
|
||||||
print("MaiSaka LLM Request - summarize_context:")
|
|
||||||
print(f" {prompt}")
|
|
||||||
print("=" * 60 + "\n")
|
|
||||||
|
|
||||||
try:
|
|
||||||
response, _ = await self._llm_utils.generate_response_async(
|
|
||||||
prompt=prompt,
|
|
||||||
temperature=0.3,
|
|
||||||
max_tokens=1024,
|
|
||||||
)
|
|
||||||
|
|
||||||
return response
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"上下文总结 LLM 调用出错: {e}")
|
|
||||||
return ""
|
|
||||||
|
|
||||||
# ──────── 回复生成(使用 replyer 模型) ────────
|
# ──────── 回复生成(使用 replyer 模型) ────────
|
||||||
|
|
||||||
async def generate_reply(self, reason: str, chat_history: List[dict]) -> str:
|
async def generate_reply(self, reason: str, chat_history: list[MaiMessage]) -> str:
|
||||||
"""
|
"""
|
||||||
生成回复 - 使用 replyer 模型
|
生成回复 - 使用 replyer 模型
|
||||||
可供 Replyer 类直接调用
|
可供 Replyer 类直接调用
|
||||||
@@ -700,7 +558,9 @@ class MaiSakaLLMService:
|
|||||||
|
|
||||||
# 格式化对话历史
|
# 格式化对话历史
|
||||||
filtered_history = [
|
filtered_history = [
|
||||||
msg for msg in chat_history if msg.get("role") != "system" and msg.get("_type") != "perception"
|
msg
|
||||||
|
for msg in chat_history
|
||||||
|
if get_message_role(msg) != RoleType.System.value and get_message_kind(msg) != "perception"
|
||||||
]
|
]
|
||||||
formatted_history = format_chat_history(filtered_history)
|
formatted_history = format_chat_history(filtered_history)
|
||||||
|
|
||||||
@@ -717,10 +577,11 @@ class MaiSakaLLMService:
|
|||||||
|
|
||||||
messages = f"System: {system_prompt}\n\nUser: {user_prompt}"
|
messages = f"System: {system_prompt}\n\nUser: {user_prompt}"
|
||||||
|
|
||||||
print("\n" + "=" * 60)
|
if config.SHOW_THINKING:
|
||||||
print("MaiSaka LLM Request - generate_reply:")
|
print("\n" + "=" * 60)
|
||||||
print(f" {messages}")
|
print("MaiSaka LLM Request - generate_reply:")
|
||||||
print("=" * 60 + "\n")
|
print(f" {messages}")
|
||||||
|
print("=" * 60 + "\n")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response, _ = await self._llm_replyer.generate_response_async(
|
response, _ = await self._llm_replyer.generate_response_async(
|
||||||
|
|||||||
181
src/maisaka/message_adapter.py
Normal file
181
src/maisaka/message_adapter.py
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
"""
|
||||||
|
MaiSaka message adapters built on top of the main project's MaiMessage model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
import re
|
||||||
|
from typing import Optional
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from src.common.data_models.mai_message_data_model import MaiMessage, MessageInfo, UserInfo
|
||||||
|
from src.common.data_models.message_component_data_model import MessageSequence
|
||||||
|
from src.config.config import global_config
|
||||||
|
from src.llm_models.payload_content.message import Message, MessageBuilder, RoleType
|
||||||
|
from src.llm_models.payload_content.tool_option import ToolCall
|
||||||
|
|
||||||
|
from .config import USER_NAME
|
||||||
|
|
||||||
|
MAISAKA_PLATFORM = "maisaka"
|
||||||
|
MAISAKA_SESSION_ID = "maisaka_cli"
|
||||||
|
MESSAGE_KIND_KEY = "maisaka_message_kind"
|
||||||
|
SOURCE_KEY = "maisaka_source"
|
||||||
|
LLM_ROLE_KEY = "maisaka_llm_role"
|
||||||
|
TOOL_CALL_ID_KEY = "maisaka_tool_call_id"
|
||||||
|
TOOL_CALLS_KEY = "maisaka_tool_calls"
|
||||||
|
SPEAKER_PREFIX_PATTERN = re.compile(r"^\[(?P<speaker>[^\]]+)\](?P<content>.*)$", re.DOTALL)
|
||||||
|
|
||||||
|
|
||||||
|
def _build_user_info_for_role(role: str) -> UserInfo:
|
||||||
|
if role == RoleType.User.value:
|
||||||
|
return UserInfo(user_id="maisaka_user", user_nickname=USER_NAME, user_cardname=None)
|
||||||
|
if role == RoleType.Tool.value:
|
||||||
|
return UserInfo(user_id="maisaka_tool", user_nickname="tool", user_cardname=None)
|
||||||
|
return UserInfo(
|
||||||
|
user_id="maisaka_assistant",
|
||||||
|
user_nickname=global_config.bot.nickname.strip() or "MaiSaka",
|
||||||
|
user_cardname=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _serialize_tool_call(tool_call: ToolCall) -> dict:
|
||||||
|
return {
|
||||||
|
"call_id": tool_call.call_id,
|
||||||
|
"func_name": tool_call.func_name,
|
||||||
|
"args": tool_call.args or {},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _deserialize_tool_call(data: dict) -> ToolCall:
|
||||||
|
return ToolCall(
|
||||||
|
call_id=str(data.get("call_id", "")),
|
||||||
|
func_name=str(data.get("func_name", "")),
|
||||||
|
args=data.get("args", {}) or {},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def build_message(
|
||||||
|
role: str,
|
||||||
|
content: str,
|
||||||
|
*,
|
||||||
|
message_kind: str = "normal",
|
||||||
|
source: Optional[str] = None,
|
||||||
|
tool_call_id: Optional[str] = None,
|
||||||
|
tool_calls: Optional[list[ToolCall]] = None,
|
||||||
|
timestamp: Optional[datetime] = None,
|
||||||
|
message_id: Optional[str] = None,
|
||||||
|
) -> MaiMessage:
|
||||||
|
"""Build a MaiMessage for the Maisaka session history."""
|
||||||
|
resolved_timestamp = timestamp or datetime.now()
|
||||||
|
resolved_role = role.value if isinstance(role, RoleType) else role
|
||||||
|
message = MaiMessage(
|
||||||
|
message_id=message_id or f"maisaka_{uuid4().hex}",
|
||||||
|
timestamp=resolved_timestamp,
|
||||||
|
platform=MAISAKA_PLATFORM,
|
||||||
|
)
|
||||||
|
message.message_info = MessageInfo(
|
||||||
|
user_info=_build_user_info_for_role(resolved_role),
|
||||||
|
group_info=None,
|
||||||
|
additional_config={
|
||||||
|
LLM_ROLE_KEY: resolved_role,
|
||||||
|
MESSAGE_KIND_KEY: message_kind,
|
||||||
|
SOURCE_KEY: source or resolved_role,
|
||||||
|
TOOL_CALL_ID_KEY: tool_call_id,
|
||||||
|
TOOL_CALLS_KEY: [_serialize_tool_call(tool_call) for tool_call in (tool_calls or [])],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
message.session_id = MAISAKA_SESSION_ID
|
||||||
|
message.raw_message = MessageSequence([])
|
||||||
|
if content:
|
||||||
|
message.raw_message.text(content)
|
||||||
|
message.processed_plain_text = content
|
||||||
|
message.display_message = content
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
def format_speaker_content(speaker_name: str, content: str) -> str:
|
||||||
|
"""Format visible conversation content with an explicit speaker label."""
|
||||||
|
return f"[{speaker_name}]{content}"
|
||||||
|
|
||||||
|
|
||||||
|
def parse_speaker_content(content: str) -> tuple[Optional[str], str]:
|
||||||
|
"""Parse content formatted as [speaker]message."""
|
||||||
|
match = SPEAKER_PREFIX_PATTERN.match(content or "")
|
||||||
|
if not match:
|
||||||
|
return None, content or ""
|
||||||
|
return match.group("speaker"), match.group("content")
|
||||||
|
|
||||||
|
|
||||||
|
def get_message_text(message: MaiMessage) -> str:
|
||||||
|
if message.processed_plain_text is not None:
|
||||||
|
return message.processed_plain_text
|
||||||
|
if message.display_message is not None:
|
||||||
|
return message.display_message
|
||||||
|
|
||||||
|
parts: list[str] = []
|
||||||
|
for component in message.raw_message.components:
|
||||||
|
text = getattr(component, "text", None)
|
||||||
|
if isinstance(text, str):
|
||||||
|
parts.append(text)
|
||||||
|
return "".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def get_message_role(message: MaiMessage) -> str:
|
||||||
|
return str(message.message_info.additional_config.get(LLM_ROLE_KEY, RoleType.User.value))
|
||||||
|
|
||||||
|
|
||||||
|
def get_message_kind(message: MaiMessage) -> str:
|
||||||
|
return str(message.message_info.additional_config.get(MESSAGE_KIND_KEY, "normal"))
|
||||||
|
|
||||||
|
|
||||||
|
def get_message_source(message: MaiMessage) -> str:
|
||||||
|
return str(message.message_info.additional_config.get(SOURCE_KEY, get_message_role(message)))
|
||||||
|
|
||||||
|
|
||||||
|
def is_perception_message(message: MaiMessage) -> bool:
|
||||||
|
return get_message_kind(message) == "perception"
|
||||||
|
|
||||||
|
|
||||||
|
def get_tool_call_id(message: MaiMessage) -> Optional[str]:
|
||||||
|
value = message.message_info.additional_config.get(TOOL_CALL_ID_KEY)
|
||||||
|
return str(value) if value else None
|
||||||
|
|
||||||
|
|
||||||
|
def get_tool_calls(message: MaiMessage) -> list[ToolCall]:
|
||||||
|
raw_tool_calls = message.message_info.additional_config.get(TOOL_CALLS_KEY, [])
|
||||||
|
if not isinstance(raw_tool_calls, list):
|
||||||
|
return []
|
||||||
|
return [_deserialize_tool_call(item) for item in raw_tool_calls if isinstance(item, dict)]
|
||||||
|
|
||||||
|
|
||||||
|
def remove_last_perception(messages: list[MaiMessage]) -> None:
|
||||||
|
for index in range(len(messages) - 1, -1, -1):
|
||||||
|
if is_perception_message(messages[index]):
|
||||||
|
messages.pop(index)
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
def to_llm_message(message: MaiMessage) -> Optional[Message]:
|
||||||
|
role = get_message_role(message)
|
||||||
|
content = get_message_text(message)
|
||||||
|
tool_call_id = get_tool_call_id(message)
|
||||||
|
tool_calls = get_tool_calls(message)
|
||||||
|
|
||||||
|
if role == RoleType.System.value:
|
||||||
|
role_type = RoleType.System
|
||||||
|
elif role == RoleType.User.value:
|
||||||
|
role_type = RoleType.User
|
||||||
|
elif role == RoleType.Assistant.value:
|
||||||
|
role_type = RoleType.Assistant
|
||||||
|
elif role == RoleType.Tool.value:
|
||||||
|
role_type = RoleType.Tool
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
builder = MessageBuilder().set_role(role_type)
|
||||||
|
if role_type == RoleType.Assistant and tool_calls:
|
||||||
|
builder.set_tool_calls(tool_calls)
|
||||||
|
if role_type == RoleType.Tool and tool_call_id:
|
||||||
|
builder.add_tool_call(tool_call_id)
|
||||||
|
if content:
|
||||||
|
builder.add_text_content(content)
|
||||||
|
return builder.build()
|
||||||
@@ -2,14 +2,14 @@
|
|||||||
MaiSaka reply helper.
|
MaiSaka reply helper.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from datetime import datetime
|
from typing import Optional
|
||||||
from typing import Any, Optional
|
|
||||||
|
|
||||||
|
from src.common.data_models.mai_message_data_model import MaiMessage
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
|
|
||||||
|
from .config import USER_NAME
|
||||||
from .llm_service import MaiSakaLLMService
|
from .llm_service import MaiSakaLLMService
|
||||||
|
from .message_adapter import get_message_role, get_message_text, is_perception_message, parse_speaker_content
|
||||||
VISIBLE_REPLY_PREFIX = "\u3010\u9ea6\u9ea6\u7684\u53d1\u8a00\u3011"
|
|
||||||
|
|
||||||
|
|
||||||
def _normalize_content(content: str, limit: int = 500) -> str:
|
def _normalize_content(content: str, limit: int = 500) -> str:
|
||||||
@@ -19,57 +19,49 @@ def _normalize_content(content: str, limit: int = 500) -> str:
|
|||||||
return normalized
|
return normalized
|
||||||
|
|
||||||
|
|
||||||
def _format_message_time(_: dict[str, Any]) -> str:
|
def _format_message_time(message: MaiMessage) -> str:
|
||||||
return datetime.now().strftime("%H:%M:%S")
|
return message.timestamp.strftime("%H:%M:%S")
|
||||||
|
|
||||||
|
|
||||||
def _extract_visible_assistant_reply(message: dict[str, Any]) -> str:
|
def _extract_visible_assistant_reply(message: MaiMessage) -> str:
|
||||||
if message.get("_type") == "perception":
|
if is_perception_message(message):
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
content = (message.get("content", "") or "").strip()
|
|
||||||
if not content:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
marker = "[generated_reply]"
|
|
||||||
if marker in content:
|
|
||||||
_, visible_reply = content.rsplit(marker, 1)
|
|
||||||
return _normalize_content(visible_reply)
|
|
||||||
|
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def _extract_guided_bot_reply(message: dict[str, Any]) -> str:
|
def _extract_guided_bot_reply(message: MaiMessage) -> str:
|
||||||
content = (message.get("content", "") or "").strip()
|
speaker_name, body = parse_speaker_content(get_message_text(message).strip())
|
||||||
if content.startswith(VISIBLE_REPLY_PREFIX):
|
bot_nickname = global_config.bot.nickname.strip() or "Bot"
|
||||||
return _normalize_content(content[len(VISIBLE_REPLY_PREFIX) :].strip())
|
if speaker_name == bot_nickname:
|
||||||
|
return _normalize_content(body.strip())
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def format_chat_history(messages: list[dict[str, Any]]) -> str:
|
def format_chat_history(messages: list[MaiMessage]) -> str:
|
||||||
"""Format visible chat history for reply generation."""
|
"""Format visible chat history for reply generation."""
|
||||||
bot_nickname = global_config.bot.nickname.strip() or "Bot"
|
bot_nickname = global_config.bot.nickname.strip() or "Bot"
|
||||||
parts: list[str] = []
|
parts: list[str] = []
|
||||||
|
|
||||||
for message in messages:
|
for message in messages:
|
||||||
role = message.get("role", "")
|
role = get_message_role(message)
|
||||||
timestamp = _format_message_time(message)
|
timestamp = _format_message_time(message)
|
||||||
|
|
||||||
if role == "user":
|
if role == "user":
|
||||||
guided_reply = _extract_guided_bot_reply(message)
|
guided_reply = _extract_guided_bot_reply(message)
|
||||||
if guided_reply:
|
if guided_reply:
|
||||||
parts.append(f"{timestamp} {bot_nickname}(分析器指导的麦麦发言):{guided_reply}")
|
parts.append(f"{timestamp} {bot_nickname}(you): {guided_reply}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
content = _normalize_content(message.get("content", "") or "")
|
_, content_body = parse_speaker_content(get_message_text(message))
|
||||||
|
content = _normalize_content(content_body)
|
||||||
if content:
|
if content:
|
||||||
parts.append(f"{timestamp} 用户:{content}")
|
parts.append(f"{timestamp} {USER_NAME}: {content}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if role == "assistant":
|
if role == "assistant":
|
||||||
visible_reply = _extract_visible_assistant_reply(message)
|
visible_reply = _extract_visible_assistant_reply(message)
|
||||||
if visible_reply:
|
if visible_reply:
|
||||||
parts.append(f"{timestamp} {bot_nickname}(你):{visible_reply}")
|
parts.append(f"{timestamp} {bot_nickname}(you): {visible_reply}")
|
||||||
|
|
||||||
return "\n".join(parts)
|
return "\n".join(parts)
|
||||||
|
|
||||||
@@ -87,7 +79,7 @@ class Replyer:
|
|||||||
def set_enabled(self, enabled: bool) -> None:
|
def set_enabled(self, enabled: bool) -> None:
|
||||||
self._enabled = enabled
|
self._enabled = enabled
|
||||||
|
|
||||||
async def reply(self, reason: str, chat_history: list[dict[str, Any]]) -> str:
|
async def reply(self, reason: str, chat_history: list[MaiMessage]) -> str:
|
||||||
if not self._enabled or not reason or self._llm_service is None:
|
if not self._enabled or not reason or self._llm_service is None:
|
||||||
return "..."
|
return "..."
|
||||||
|
|
||||||
|
|||||||
@@ -1,75 +1,67 @@
|
|||||||
"""
|
"""
|
||||||
MaiSaka - Timing 模块(含自我反思功能)
|
MaiSaka timing helpers.
|
||||||
构建对话时间戳信息,供 Timing 分析模块使用。
|
|
||||||
该模块同时负责分析对话的时间维度和进行自我反思分析。
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
def _format_duration(total_seconds: int) -> str:
|
||||||
|
hours, remainder = divmod(total_seconds, 3600)
|
||||||
|
minutes, seconds = divmod(remainder, 60)
|
||||||
|
if hours > 0:
|
||||||
|
return f"{hours}h {minutes}m {seconds}s"
|
||||||
|
if minutes > 0:
|
||||||
|
return f"{minutes}m {seconds}s"
|
||||||
|
return f"{seconds}s"
|
||||||
|
|
||||||
|
|
||||||
|
def _get_time_period_label(hour: int) -> str:
|
||||||
|
if 0 <= hour < 6:
|
||||||
|
return "late_night"
|
||||||
|
if 6 <= hour < 9:
|
||||||
|
return "morning"
|
||||||
|
if 9 <= hour < 12:
|
||||||
|
return "late_morning"
|
||||||
|
if 12 <= hour < 14:
|
||||||
|
return "noon"
|
||||||
|
if 14 <= hour < 18:
|
||||||
|
return "afternoon"
|
||||||
|
if 18 <= hour < 22:
|
||||||
|
return "evening"
|
||||||
|
return "night"
|
||||||
|
|
||||||
|
|
||||||
def build_timing_info(
|
def build_timing_info(
|
||||||
chat_start_time: Optional[datetime],
|
chat_start_time: Optional[datetime],
|
||||||
last_user_input_time: Optional[datetime],
|
last_user_input_time: Optional[datetime],
|
||||||
last_assistant_response_time: Optional[datetime],
|
last_assistant_response_time: Optional[datetime],
|
||||||
user_input_times: list[datetime],
|
user_input_times: list[datetime],
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""Build readable timing context for the timing analysis prompt."""
|
||||||
构建当前时间戳信息文本,供 Timing 模块分析。
|
|
||||||
|
|
||||||
Args:
|
|
||||||
chat_start_time: 对话开始时间
|
|
||||||
last_user_input_time: 用户上次输入时间
|
|
||||||
last_assistant_response_time: 助手上次回复时间
|
|
||||||
user_input_times: 所有用户输入时间戳列表
|
|
||||||
"""
|
|
||||||
now = datetime.now()
|
now = datetime.now()
|
||||||
parts: list[str] = []
|
parts: list[str] = [f"Current time: {now.strftime('%Y-%m-%d %H:%M:%S')}"]
|
||||||
|
|
||||||
parts.append(f"当前时间: {now.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
||||||
|
|
||||||
if chat_start_time:
|
if chat_start_time:
|
||||||
elapsed = now - chat_start_time
|
elapsed_seconds = int((now - chat_start_time).total_seconds())
|
||||||
minutes, seconds = divmod(int(elapsed.total_seconds()), 60)
|
parts.append(f"Conversation duration: {_format_duration(elapsed_seconds)}")
|
||||||
hours, minutes = divmod(minutes, 60)
|
|
||||||
if hours > 0:
|
|
||||||
parts.append(f"对话已持续: {hours}小时{minutes}分{seconds}秒")
|
|
||||||
elif minutes > 0:
|
|
||||||
parts.append(f"对话已持续: {minutes}分{seconds}秒")
|
|
||||||
else:
|
|
||||||
parts.append(f"对话已持续: {seconds}秒")
|
|
||||||
|
|
||||||
if last_user_input_time:
|
if last_user_input_time:
|
||||||
since_user = now - last_user_input_time
|
since_user_seconds = int((now - last_user_input_time).total_seconds())
|
||||||
parts.append(f"距用户上次输入: {int(since_user.total_seconds())}秒")
|
parts.append(f"Seconds since last user input: {since_user_seconds}")
|
||||||
|
|
||||||
if last_assistant_response_time:
|
if last_assistant_response_time:
|
||||||
since_assistant = now - last_assistant_response_time
|
since_assistant_seconds = int((now - last_assistant_response_time).total_seconds())
|
||||||
parts.append(f"距助手上次回复: {int(since_assistant.total_seconds())}秒")
|
parts.append(f"Seconds since last Maisaka reply: {since_assistant_seconds}")
|
||||||
|
|
||||||
if len(user_input_times) >= 2:
|
if len(user_input_times) >= 2:
|
||||||
intervals = [
|
intervals = [
|
||||||
(user_input_times[i] - user_input_times[i - 1]).total_seconds() for i in range(1, len(user_input_times))
|
int((user_input_times[index] - user_input_times[index - 1]).total_seconds())
|
||||||
|
for index in range(1, len(user_input_times))
|
||||||
]
|
]
|
||||||
avg_interval = sum(intervals) / len(intervals)
|
average_interval = sum(intervals) / len(intervals)
|
||||||
parts.append(f"用户平均回复间隔: {int(avg_interval)}秒")
|
parts.append(f"Average user input interval: {int(average_interval)}s")
|
||||||
parts.append(f"用户总共发言次数: {len(user_input_times)}")
|
parts.append(f"Total user input count: {len(user_input_times)}")
|
||||||
|
|
||||||
# 时段判断
|
|
||||||
hour = now.hour
|
|
||||||
if 0 <= hour < 6:
|
|
||||||
parts.append("当前时段: 深夜/凌晨")
|
|
||||||
elif 6 <= hour < 9:
|
|
||||||
parts.append("当前时段: 早晨")
|
|
||||||
elif 9 <= hour < 12:
|
|
||||||
parts.append("当前时段: 上午")
|
|
||||||
elif 12 <= hour < 14:
|
|
||||||
parts.append("当前时段: 中午")
|
|
||||||
elif 14 <= hour < 18:
|
|
||||||
parts.append("当前时段: 下午")
|
|
||||||
elif 18 <= hour < 22:
|
|
||||||
parts.append("当前时段: 晚上")
|
|
||||||
else:
|
|
||||||
parts.append("当前时段: 深夜")
|
|
||||||
|
|
||||||
|
parts.append(f"Current time period: {_get_time_period_label(now.hour)}")
|
||||||
return "\n".join(parts)
|
return "\n".join(parts)
|
||||||
|
|||||||
@@ -9,13 +9,15 @@ from typing import TYPE_CHECKING, Any, Optional
|
|||||||
import json as _json
|
import json as _json
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from rich.markdown import Markdown
|
|
||||||
from rich.panel import Panel
|
from rich.panel import Panel
|
||||||
|
|
||||||
|
from src.common.data_models.mai_message_data_model import MaiMessage
|
||||||
|
from src.llm_models.payload_content.tool_option import ToolCall
|
||||||
|
|
||||||
from .config import console
|
from .config import console
|
||||||
from .input_reader import InputReader
|
from .input_reader import InputReader
|
||||||
from .llm_service import MaiSakaLLMService
|
from .llm_service import MaiSakaLLMService
|
||||||
from .replyer import Replyer
|
from .message_adapter import build_message
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from .mcp_client import MCPManager
|
from .mcp_client import MCPManager
|
||||||
@@ -23,18 +25,6 @@ if TYPE_CHECKING:
|
|||||||
|
|
||||||
MAI_FILES_DIR = Path(os.path.join(os.path.dirname(os.path.abspath(__file__)), "mai_files"))
|
MAI_FILES_DIR = Path(os.path.join(os.path.dirname(os.path.abspath(__file__)), "mai_files"))
|
||||||
|
|
||||||
_replyer: Optional[Replyer] = None
|
|
||||||
|
|
||||||
|
|
||||||
def get_replyer(llm_service: MaiSakaLLMService) -> Replyer:
|
|
||||||
"""Return a shared replyer instance."""
|
|
||||||
global _replyer
|
|
||||||
if _replyer is None:
|
|
||||||
_replyer = Replyer(llm_service)
|
|
||||||
elif _replyer._llm_service is None:
|
|
||||||
_replyer.set_llm_service(llm_service)
|
|
||||||
return _replyer
|
|
||||||
|
|
||||||
|
|
||||||
class ToolHandlerContext:
|
class ToolHandlerContext:
|
||||||
"""Shared context for tool handlers."""
|
"""Shared context for tool handlers."""
|
||||||
@@ -51,78 +41,22 @@ class ToolHandlerContext:
|
|||||||
self.last_user_input_time: Optional[datetime] = None
|
self.last_user_input_time: Optional[datetime] = None
|
||||||
|
|
||||||
|
|
||||||
async def handle_send_message(tc: Any, chat_history: list[dict[str, Any]], ctx: ToolHandlerContext) -> None:
|
async def handle_stop(tc: ToolCall, chat_history: list[MaiMessage]) -> None:
|
||||||
"""Backward-compatible handler for legacy send-message style tools."""
|
|
||||||
reason = tc.arguments.get("reason", "")
|
|
||||||
console.print("[accent]Calling tool: send_message(...)[/accent]")
|
|
||||||
|
|
||||||
if not reason:
|
|
||||||
chat_history.append(
|
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": "Missing required argument: reason",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
console.print(
|
|
||||||
Panel(
|
|
||||||
Markdown(reason),
|
|
||||||
title="Reply Reason",
|
|
||||||
border_style="dim",
|
|
||||||
padding=(0, 1),
|
|
||||||
style="dim",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
with console.status("[info]Generating visible reply...[/info]", spinner="dots"):
|
|
||||||
replyer = get_replyer(ctx.llm_service)
|
|
||||||
reply = await replyer.reply(reason, chat_history)
|
|
||||||
|
|
||||||
console.print(
|
|
||||||
Panel(
|
|
||||||
Markdown(reply),
|
|
||||||
title="MaiSaka",
|
|
||||||
border_style="magenta",
|
|
||||||
padding=(1, 2),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
chat_history.append(
|
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": f"Visible reply generated:\n{reply}",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_stop(tc: Any, chat_history: list[dict[str, Any]]) -> None:
|
|
||||||
"""Handle the stop tool."""
|
"""Handle the stop tool."""
|
||||||
console.print("[accent]Calling tool: stop()[/accent]")
|
console.print("[accent]Calling tool: stop()[/accent]")
|
||||||
chat_history.append(
|
chat_history.append(
|
||||||
{
|
build_message(role="tool", content="Conversation loop will stop after this round.", tool_call_id=tc.call_id)
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": "Conversation loop will stop after this round.",
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
async def handle_wait(tc: Any, chat_history: list[dict[str, Any]], ctx: ToolHandlerContext) -> str:
|
async def handle_wait(tc: ToolCall, chat_history: list[MaiMessage], ctx: ToolHandlerContext) -> str:
|
||||||
"""Handle the wait tool."""
|
"""Handle the wait tool."""
|
||||||
seconds = tc.arguments.get("seconds", 30)
|
seconds = (tc.args or {}).get("seconds", 30)
|
||||||
seconds = max(5, min(seconds, 300))
|
seconds = max(5, min(seconds, 300))
|
||||||
console.print(f"[accent]Calling tool: wait({seconds})[/accent]")
|
console.print(f"[accent]Calling tool: wait({seconds})[/accent]")
|
||||||
|
|
||||||
tool_result = await _do_wait(seconds, ctx)
|
tool_result = await _do_wait(seconds, ctx)
|
||||||
chat_history.append(
|
chat_history.append(build_message(role="tool", content=tool_result, tool_call_id=tc.call_id))
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": tool_result,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return tool_result
|
return tool_result
|
||||||
|
|
||||||
|
|
||||||
@@ -152,49 +86,37 @@ async def _do_wait(seconds: int, ctx: ToolHandlerContext) -> str:
|
|||||||
return f"User input received: {user_input}"
|
return f"User input received: {user_input}"
|
||||||
|
|
||||||
|
|
||||||
async def handle_mcp_tool(tc: Any, chat_history: list[dict[str, Any]], mcp_manager: "MCPManager") -> None:
|
async def handle_mcp_tool(tc: ToolCall, chat_history: list[MaiMessage], mcp_manager: "MCPManager") -> None:
|
||||||
"""Handle an MCP tool call."""
|
"""Handle an MCP tool call."""
|
||||||
args_str = _json.dumps(tc.arguments, ensure_ascii=False)
|
args_str = _json.dumps(tc.args or {}, ensure_ascii=False)
|
||||||
args_preview = args_str if len(args_str) <= 120 else args_str[:120] + "..."
|
args_preview = args_str if len(args_str) <= 120 else args_str[:120] + "..."
|
||||||
console.print(f"[accent]Calling MCP tool: {tc.name}({args_preview})[/accent]")
|
console.print(f"[accent]Calling MCP tool: {tc.func_name}({args_preview})[/accent]")
|
||||||
|
|
||||||
with console.status(f"[info]Running MCP tool {tc.name}...[/info]", spinner="dots"):
|
with console.status(f"[info]Running MCP tool {tc.func_name}...[/info]", spinner="dots"):
|
||||||
result = await mcp_manager.call_tool(tc.name, tc.arguments)
|
result = await mcp_manager.call_tool(tc.func_name, tc.args or {})
|
||||||
|
|
||||||
display_text = result if len(result) <= 800 else result[:800] + "\n... (truncated)"
|
display_text = result if len(result) <= 800 else result[:800] + "\n... (truncated)"
|
||||||
console.print(
|
console.print(
|
||||||
Panel(
|
Panel(
|
||||||
display_text,
|
display_text,
|
||||||
title=f"MCP: {tc.name}",
|
title=f"MCP: {tc.func_name}",
|
||||||
border_style="bright_green",
|
border_style="bright_green",
|
||||||
padding=(0, 1),
|
padding=(0, 1),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
chat_history.append(
|
chat_history.append(build_message(role="tool", content=result, tool_call_id=tc.call_id))
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": result,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_unknown_tool(tc: Any, chat_history: list[dict[str, Any]]) -> None:
|
async def handle_unknown_tool(tc: ToolCall, chat_history: list[MaiMessage]) -> None:
|
||||||
"""Handle an unknown tool call."""
|
"""Handle an unknown tool call."""
|
||||||
console.print(f"[accent]Calling unknown tool: {tc.name}({tc.arguments})[/accent]")
|
console.print(f"[accent]Calling unknown tool: {tc.func_name}({tc.args})[/accent]")
|
||||||
chat_history.append(
|
chat_history.append(build_message(role="tool", content=f"Unknown tool: {tc.func_name}", tool_call_id=tc.call_id))
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": f"Unknown tool: {tc.name}",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_write_file(tc: Any, chat_history: list[dict[str, Any]]) -> None:
|
async def handle_write_file(tc: ToolCall, chat_history: list[MaiMessage]) -> None:
|
||||||
"""Write a file under the local mai_files workspace."""
|
"""Write a file under the local mai_files workspace."""
|
||||||
filename = tc.arguments.get("filename", "")
|
filename = (tc.args or {}).get("filename", "")
|
||||||
content = tc.arguments.get("content", "")
|
content = (tc.args or {}).get("content", "")
|
||||||
console.print(f'[accent]Calling tool: write_file("{filename}")[/accent]')
|
console.print(f'[accent]Calling tool: write_file("{filename}")[/accent]')
|
||||||
|
|
||||||
MAI_FILES_DIR.mkdir(parents=True, exist_ok=True)
|
MAI_FILES_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
@@ -215,27 +137,21 @@ async def handle_write_file(tc: Any, chat_history: list[dict[str, Any]]) -> None
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
chat_history.append(
|
chat_history.append(
|
||||||
{
|
build_message(
|
||||||
"role": "tool",
|
role="tool",
|
||||||
"tool_call_id": tc.id,
|
content=f"File written successfully: {filename} ({file_size} bytes)",
|
||||||
"content": f"File written successfully: {filename} ({file_size} bytes)",
|
tool_call_id=tc.call_id,
|
||||||
}
|
)
|
||||||
)
|
)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
error_msg = f"Failed to write file: {exc}"
|
error_msg = f"Failed to write file: {exc}"
|
||||||
console.print(f"[error]{error_msg}[/error]")
|
console.print(f"[error]{error_msg}[/error]")
|
||||||
chat_history.append(
|
chat_history.append(build_message(role="tool", content=error_msg, tool_call_id=tc.call_id))
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": error_msg,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_read_file(tc: Any, chat_history: list[dict[str, Any]]) -> None:
|
async def handle_read_file(tc: ToolCall, chat_history: list[MaiMessage]) -> None:
|
||||||
"""Read a file from the local mai_files workspace."""
|
"""Read a file from the local mai_files workspace."""
|
||||||
filename = tc.arguments.get("filename", "")
|
filename = (tc.args or {}).get("filename", "")
|
||||||
console.print(f'[accent]Calling tool: read_file("{filename}")[/accent]')
|
console.print(f'[accent]Calling tool: read_file("{filename}")[/accent]')
|
||||||
|
|
||||||
file_path = MAI_FILES_DIR / filename
|
file_path = MAI_FILES_DIR / filename
|
||||||
@@ -244,25 +160,13 @@ async def handle_read_file(tc: Any, chat_history: list[dict[str, Any]]) -> None:
|
|||||||
if not file_path.exists():
|
if not file_path.exists():
|
||||||
error_msg = f"File does not exist: {filename}"
|
error_msg = f"File does not exist: {filename}"
|
||||||
console.print(f"[warning]{error_msg}[/warning]")
|
console.print(f"[warning]{error_msg}[/warning]")
|
||||||
chat_history.append(
|
chat_history.append(build_message(role="tool", content=error_msg, tool_call_id=tc.call_id))
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": error_msg,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if not file_path.is_file():
|
if not file_path.is_file():
|
||||||
error_msg = f"Path is not a file: {filename}"
|
error_msg = f"Path is not a file: {filename}"
|
||||||
console.print(f"[warning]{error_msg}[/warning]")
|
console.print(f"[warning]{error_msg}[/warning]")
|
||||||
chat_history.append(
|
chat_history.append(build_message(role="tool", content=error_msg, tool_call_id=tc.call_id))
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": error_msg,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return
|
return
|
||||||
|
|
||||||
with open(file_path, "r", encoding="utf-8") as file:
|
with open(file_path, "r", encoding="utf-8") as file:
|
||||||
@@ -278,25 +182,15 @@ async def handle_read_file(tc: Any, chat_history: list[dict[str, Any]]) -> None:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
chat_history.append(
|
chat_history.append(
|
||||||
{
|
build_message(role="tool", content=f"File content of {filename}:\n{file_content}", tool_call_id=tc.call_id)
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": f"File content of {filename}:\n{file_content}",
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
error_msg = f"Failed to read file: {exc}"
|
error_msg = f"Failed to read file: {exc}"
|
||||||
console.print(f"[error]{error_msg}[/error]")
|
console.print(f"[error]{error_msg}[/error]")
|
||||||
chat_history.append(
|
chat_history.append(build_message(role="tool", content=error_msg, tool_call_id=tc.call_id))
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": error_msg,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_list_files(tc: Any, chat_history: list[dict[str, Any]]) -> None:
|
async def handle_list_files(tc: ToolCall, chat_history: list[MaiMessage]) -> None:
|
||||||
"""List files under the local mai_files workspace."""
|
"""List files under the local mai_files workspace."""
|
||||||
console.print("[accent]Calling tool: list_files()[/accent]")
|
console.print("[accent]Calling tool: list_files()[/accent]")
|
||||||
|
|
||||||
@@ -332,23 +226,11 @@ async def handle_list_files(tc: Any, chat_history: list[dict[str, Any]]) -> None
|
|||||||
padding=(0, 1),
|
padding=(0, 1),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
chat_history.append(
|
chat_history.append(build_message(role="tool", content=result_text, tool_call_id=tc.call_id))
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": result_text,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
error_msg = f"Failed to list files: {exc}"
|
error_msg = f"Failed to list files: {exc}"
|
||||||
console.print(f"[error]{error_msg}[/error]")
|
console.print(f"[error]{error_msg}[/error]")
|
||||||
chat_history.append(
|
chat_history.append(build_message(role="tool", content=error_msg, tool_call_id=tc.call_id))
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"tool_call_id": tc.id,
|
|
||||||
"content": error_msg,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|||||||
Reference in New Issue
Block a user