remove:两个弃用模块

This commit is contained in:
SengokuCola
2026-03-27 13:11:51 +08:00
parent 414f7ad9a0
commit 0959193808
5 changed files with 2 additions and 230 deletions

View File

@@ -17,8 +17,6 @@ from src.common.data_models.mai_message_data_model import MaiMessage
from src.config.config import global_config
from .config import (
ENABLE_COGNITION_MODULE,
ENABLE_EMOTION_MODULE,
ENABLE_KNOWLEDGE_MODULE,
ENABLE_MCP,
SHOW_THINKING,
@@ -135,7 +133,7 @@ class BufferCLI:
- stop(): stop the current inner loop and return to idle
Per round:
1. Run enabled analysis modules in parallel when the previous round used tools.
1. Run enabled perception modules in parallel when the previous round used tools.
2. Call the planner model with the current history.
3. Append the assistant thought and execute any requested tools.
"""
@@ -147,12 +145,6 @@ class BufferCLI:
tasks = []
status_text_parts = []
if ENABLE_EMOTION_MODULE:
tasks.append(("eq", self.llm_service.analyze_emotion(chat_history)))
status_text_parts.append("emotion")
if ENABLE_COGNITION_MODULE:
tasks.append(("cognition", self.llm_service.analyze_cognition(chat_history)))
status_text_parts.append("cognition")
if ENABLE_KNOWLEDGE_MODULE:
tasks.append(("knowledge", retrieve_relevant_knowledge(self.llm_service, chat_history)))
status_text_parts.append("knowledge")
@@ -163,54 +155,9 @@ class BufferCLI:
):
results = await asyncio.gather(*[task for _, task in tasks], return_exceptions=True)
eq_result, cognition_result, knowledge_result = None, None, None
result_idx = 0
if ENABLE_EMOTION_MODULE:
eq_result = results[result_idx]
result_idx += 1
if ENABLE_COGNITION_MODULE:
cognition_result = results[result_idx]
result_idx += 1
if ENABLE_KNOWLEDGE_MODULE:
knowledge_result = results[result_idx]
result_idx += 1
eq_analysis = ""
if ENABLE_EMOTION_MODULE:
if isinstance(eq_result, Exception):
console.print(f"[warning]Emotion analysis failed: {eq_result}[/warning]")
elif eq_result:
eq_analysis = eq_result
if SHOW_THINKING:
console.print(
Panel(
Markdown(eq_analysis),
title="Emotion",
border_style="bright_yellow",
padding=(0, 1),
style="dim",
)
)
cognition_analysis = ""
if ENABLE_COGNITION_MODULE:
if isinstance(cognition_result, Exception):
console.print(f"[warning]Cognition analysis failed: {cognition_result}[/warning]")
elif cognition_result:
cognition_analysis = cognition_result
if SHOW_THINKING:
console.print(
Panel(
Markdown(cognition_analysis),
title="Cognition",
border_style="bright_cyan",
padding=(0, 1),
style="dim",
)
)
knowledge_analysis = ""
if ENABLE_KNOWLEDGE_MODULE:
knowledge_result = results[0] if results else None
if isinstance(knowledge_result, Exception):
console.print(f"[warning]Knowledge analysis failed: {knowledge_result}[/warning]")
elif knowledge_result:
@@ -229,10 +176,6 @@ class BufferCLI:
remove_last_perception(chat_history)
perception_parts = []
if eq_analysis:
perception_parts.append(f"Emotion\n{eq_analysis}")
if cognition_analysis:
perception_parts.append(f"Cognition\n{cognition_analysis}")
if knowledge_analysis:
perception_parts.append(f"Knowledge\n{knowledge_analysis}")

View File

@@ -17,14 +17,11 @@ if str(_root) not in sys.path:
sys.path.insert(0, str(_root))
# ──────────────────── 模块开关配置 ────────────────────
ENABLE_EMOTION_MODULE = global_config.maisaka.enable_emotion_module
ENABLE_COGNITION_MODULE = global_config.maisaka.enable_cognition_module
ENABLE_KNOWLEDGE_MODULE = global_config.maisaka.enable_knowledge_module
ENABLE_MCP = global_config.maisaka.enable_mcp
ENABLE_WRITE_FILE = global_config.maisaka.enable_write_file
ENABLE_READ_FILE = global_config.maisaka.enable_read_file
ENABLE_LIST_FILES = global_config.maisaka.enable_list_files
SHOW_ANALYZE_COGNITION_PROMPT = global_config.maisaka.show_analyze_cognition_prompt
SHOW_THINKING = global_config.maisaka.show_thinking
USER_NAME = global_config.maisaka.user_name.strip() or "用户"
DIRECT_IMAGE_INPUT = global_config.maisaka.direct_image_input

View File

@@ -1,59 +0,0 @@
"""
MaiSaka - Emotion 模块
情绪感知分析,分析用户的情绪状态和言语态度。
注意emotion.prompt 已迁移至主项目 prompts/ 目录
使用 prompt_manager.get_prompt("maidairy_emotion") 加载。
"""
from typing import List, Optional
from src.common.data_models.mai_message_data_model import MaiMessage
from .config import USER_NAME
from .message_adapter import get_message_role, get_message_text
def extract_user_messages(chat_history: List[MaiMessage], limit: Optional[int] = None) -> List[MaiMessage]:
"""
从对话历史中提取用户消息。
Args:
chat_history: 完整的对话历史
limit: 最多提取多少条用户消息None 表示不限制
Returns:
只包含用户消息的列表
"""
user_messages = [msg for msg in chat_history if get_message_role(msg) == "user"]
if limit and len(user_messages) > limit:
return user_messages[-limit:]
return user_messages
def build_emotion_context(chat_history: List[MaiMessage]) -> str:
"""
构建用于情绪分析的对话上下文文本。
Args:
chat_history: 完整的对话历史
Returns:
格式化后的对话上下文文本
"""
# 获取最近的对话(约 8-10 条消息)
recent_messages = chat_history[-10:] if len(chat_history) > 10 else chat_history
context_parts = []
for msg in recent_messages:
role = get_message_role(msg)
content = get_message_text(msg)
if role == "user":
context_parts.append(f"{USER_NAME}: {content}")
elif role == "assistant":
# 只显示 assistant 的实际发言,跳过感知信息
if "【AI 感知】" not in content:
context_parts.append(f"助手: {content}")
return "\n".join(context_parts)

View File

@@ -112,10 +112,6 @@ class MaiSakaLLMService:
else:
self._chat_system_prompt = chat_system_prompt
# 子模块提示词同样采用懒加载
self._emotion_prompt: Optional[str] = None
self._cognition_prompt: Optional[str] = None
def get_current_model_name(self) -> str:
"""获取当前 Maisaka 对话主模型名称。
@@ -230,13 +226,6 @@ class MaiSakaLLMService:
logger.error(f"加载系统提示词失败: {e}")
self._chat_system_prompt = f"{self._personality_prompt}\n\n你是一个友好的 AI 助手。"
try:
self._emotion_prompt = load_prompt("maidairy_emotion")
self._cognition_prompt = load_prompt("maidairy_cognition")
logger.info("成功加载 MaiSaka 子模块提示词")
except Exception as e:
logger.warning(f"加载子模块提示词失败,将使用默认提示词: {e}")
self._prompts_loaded = True
@staticmethod
@@ -583,98 +572,6 @@ class MaiSakaLLMService:
)
]
# ──────── 分析模块(使用 utils 模型) ────────
async def analyze_emotion(self, chat_history: List[MaiMessage]) -> str:
"""执行情绪分析。
Args:
chat_history: 当前对话历史。
Returns:
str: 情绪分析文本。
"""
await self._ensure_prompts_loaded()
filtered = [m for m in chat_history if get_message_kind(m) != "perception"]
recent = filtered[-10:] if len(filtered) > 10 else filtered
# 使用加载的系统提示词
system_prompt = self._emotion_prompt or "请分析以下对话中用户的情绪状态和言语态度:"
prompt_parts = [f"{system_prompt}\n\n【对话内容】\n"]
for msg in recent:
role = get_message_role(msg)
content = get_message_text(msg)
if role == RoleType.User.value:
prompt_parts.append(f"{config.USER_NAME}: {content}")
elif role == RoleType.Assistant.value:
prompt_parts.append(f"助手: {content}")
prompt = "\n".join(prompt_parts)
if config.SHOW_THINKING:
print("\n" + "=" * 60)
print("MaiSaka LLM Request - analyze_emotion:")
print(f" {prompt}")
print("=" * 60 + "\n")
try:
generation_result = await self._llm_utils.generate_response(
prompt=prompt,
options=LLMGenerationOptions(temperature=0.3, max_tokens=512),
)
response = generation_result.response
return response
except Exception as e:
logger.error(f"情绪分析 LLM 调用出错: {e}")
return ""
async def analyze_cognition(self, chat_history: List[MaiMessage]) -> str:
"""执行认知分析。
Args:
chat_history: 当前对话历史。
Returns:
str: 认知分析文本。
"""
await self._ensure_prompts_loaded()
filtered = [m for m in chat_history if get_message_kind(m) != "perception"]
recent = filtered[-10:] if len(filtered) > 10 else filtered
# 使用加载的系统提示词
system_prompt = self._cognition_prompt or "请分析以下对话中用户的意图、认知状态和目的:"
prompt_parts = [f"{system_prompt}\n\n【对话内容】\n"]
for msg in recent:
role = get_message_role(msg)
content = get_message_text(msg)
if role == RoleType.User.value:
prompt_parts.append(f"{config.USER_NAME}: {content}")
elif role == RoleType.Assistant.value:
prompt_parts.append(f"助手: {content}")
prompt = "\n".join(prompt_parts)
if config.SHOW_THINKING and config.SHOW_ANALYZE_COGNITION_PROMPT:
print("\n" + "=" * 60)
print("MaiSaka LLM Request - analyze_cognition:")
print(f" {prompt}")
print("=" * 60 + "\n")
try:
generation_result = await self._llm_utils.generate_response(
prompt=prompt,
options=LLMGenerationOptions(temperature=0.3, max_tokens=512),
)
response = generation_result.response
return response
except Exception as e:
logger.error(f"认知分析 LLM 调用出错: {e}")
return ""
async def _removed_analyze_timing(self, chat_history: List[MaiMessage], timing_info: str) -> str:
"""执行时间节奏分析。

View File

@@ -19,8 +19,6 @@ from src.services import send_service
from .config import (
DIRECT_IMAGE_INPUT,
ENABLE_COGNITION_MODULE,
ENABLE_EMOTION_MODULE,
ENABLE_KNOWLEDGE_MODULE,
ENABLE_LIST_FILES,
ENABLE_MCP,
@@ -331,10 +329,6 @@ class MaisakaHeartFlowChatting:
async def _append_perception_snapshot(self) -> None:
tasks = []
if ENABLE_EMOTION_MODULE:
tasks.append(("emotion", self._llm_service.analyze_emotion(self._chat_history)))
if ENABLE_COGNITION_MODULE:
tasks.append(("cognition", self._llm_service.analyze_cognition(self._chat_history)))
if ENABLE_KNOWLEDGE_MODULE:
tasks.append(("knowledge", retrieve_relevant_knowledge(self._llm_service, self._chat_history)))