feat: Enhance OpenAI compatibility and introduce unified LLM service data models

- Refactored model fetching logic to support various authentication methods for OpenAI-compatible APIs.
- Introduced new data models for LLM service requests and responses to standardize interactions across layers.
- Added an adapter base class for unified request execution across different providers.
- Implemented utility functions for building OpenAI-compatible client configurations and request overrides.
This commit is contained in:
DrSmoothl
2026-03-26 16:15:42 +08:00
parent 6e7daae55d
commit 777d4cb0d2
48 changed files with 5443 additions and 2945 deletions

View File

@@ -2,8 +2,8 @@ import time
from typing import Tuple, Optional # 增加了 Optional
from src.common.logger import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config, model_config
from src.services.llm_service import LLMServiceClient
from src.config.config import global_config
import random
from .chat_observer import ChatObserver
from .pfc_utils import get_items_from_json
@@ -109,8 +109,8 @@ class ActionPlanner:
"""行动规划器"""
def __init__(self, stream_id: str, private_name: str):
self.llm = LLMRequest(
model_set=model_config.model_task_config.planner,
self.llm = LLMServiceClient(
task_name="planner",
request_type="action_planning",
)
self.personality_info = self._get_personality_prompt()
@@ -398,7 +398,8 @@ class ActionPlanner:
logger.debug(f"[私聊][{self.private_name}]发送到LLM的最终提示词:\n------\n{prompt}\n------")
try:
content, _ = await self.llm.generate_response_async(prompt)
generation_result = await self.llm.generate_response(prompt)
content = generation_result.response
logger.debug(f"[私聊][{self.private_name}]LLM (行动规划) 原始返回内容: {content}")
# --- 初始行动规划解析 ---
@@ -427,7 +428,8 @@ class ActionPlanner:
f"[私聊][{self.private_name}]发送到LLM的结束决策提示词:\n------\n{end_decision_prompt}\n------"
)
try:
end_content, _ = await self.llm.generate_response_async(end_decision_prompt) # 再次调用LLM
end_generation_result = await self.llm.generate_response(end_decision_prompt)
end_content = end_generation_result.response # 再次调用LLM
logger.debug(f"[私聊][{self.private_name}]LLM (结束决策) 原始返回内容: {end_content}")
# 解析结束决策的JSON

View File

@@ -1,7 +1,7 @@
from typing import List, Tuple, TYPE_CHECKING
from src.common.logger import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config, model_config
from src.services.llm_service import LLMServiceClient
from src.config.config import global_config
import random
from .chat_observer import ChatObserver
from .pfc_utils import get_items_from_json
@@ -43,7 +43,9 @@ class GoalAnalyzer:
"""对话目标分析器"""
def __init__(self, stream_id: str, private_name: str):
self.llm = LLMRequest(model_set=model_config.model_task_config.planner, request_type="conversation_goal")
self.llm = LLMServiceClient(
task_name="planner", request_type="conversation_goal"
)
self.personality_info = self._get_personality_prompt()
self.name = global_config.bot.nickname
@@ -157,7 +159,8 @@ class GoalAnalyzer:
logger.debug(f"[私聊][{self.private_name}]发送到LLM的提示词: {prompt}")
try:
content, _ = await self.llm.generate_response_async(prompt)
generation_result = await self.llm.generate_response(prompt)
content = generation_result.response
logger.debug(f"[私聊][{self.private_name}]LLM原始返回内容: {content}")
except Exception as e:
logger.error(f"[私聊][{self.private_name}]分析对话目标时出错: {str(e)}")
@@ -271,7 +274,8 @@ class GoalAnalyzer:
}}"""
try:
content, _ = await self.llm.generate_response_async(prompt)
generation_result = await self.llm.generate_response(prompt)
content = generation_result.response
logger.debug(f"[私聊][{self.private_name}]LLM原始返回内容: {content}")
# 尝试解析JSON

View File

@@ -3,8 +3,7 @@ from src.common.logger import get_logger
# NOTE: HippocampusManager doesn't exist in v0.12.2 - memory system was redesigned
# from src.plugins.memory_system.Hippocampus import HippocampusManager
from src.llm_models.utils_model import LLMRequest
from src.config.config import model_config
from src.services.llm_service import LLMServiceClient
from src.chat.knowledge import qa_manager
logger = get_logger("knowledge_fetcher")
@@ -14,7 +13,7 @@ class KnowledgeFetcher:
"""知识调取器"""
def __init__(self, private_name: str):
self.llm = LLMRequest(model_set=model_config.model_task_config.utils)
self.llm = LLMServiceClient(task_name="utils")
self.private_name = private_name
def _lpmm_get_knowledge(self, query: str) -> str:

View File

@@ -2,8 +2,8 @@ import json
import random
from typing import Tuple, List, Dict, Any
from src.common.logger import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config, model_config
from src.services.llm_service import LLMServiceClient
from src.config.config import global_config
from .chat_observer import ChatObserver
from maim_message import UserInfo
@@ -14,7 +14,7 @@ class ReplyChecker:
"""回复检查器"""
def __init__(self, stream_id: str, private_name: str):
self.llm = LLMRequest(model_set=model_config.model_task_config.utils, request_type="reply_check")
self.llm = LLMServiceClient(task_name="utils", request_type="reply_check")
self.personality_info = self._get_personality_prompt()
self.name = global_config.bot.nickname
self.private_name = private_name
@@ -137,7 +137,8 @@ class ReplyChecker:
注意请严格按照JSON格式输出不要包含任何其他内容。"""
try:
content, _ = await self.llm.generate_response_async(prompt)
generation_result = await self.llm.generate_response(prompt)
content = generation_result.response
logger.debug(f"[私聊][{self.private_name}]检查回复的原始返回: {content}")
# 清理内容尝试提取JSON部分

View File

@@ -1,7 +1,7 @@
from typing import Tuple, List, Dict, Any
from src.common.logger import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config, model_config
from src.services.llm_service import LLMServiceClient
from src.config.config import global_config
import random
from .chat_observer import ChatObserver
from .reply_checker import ReplyChecker
@@ -87,8 +87,8 @@ class ReplyGenerator:
"""回复生成器"""
def __init__(self, stream_id: str, private_name: str):
self.llm = LLMRequest(
model_set=model_config.model_task_config.replyer,
self.llm = LLMServiceClient(
task_name="replyer",
request_type="reply_generation",
)
self.personality_info = self._get_personality_prompt()
@@ -223,7 +223,8 @@ class ReplyGenerator:
# --- 调用 LLM 生成 ---
logger.debug(f"[私聊][{self.private_name}]发送到LLM的生成提示词:\n------\n{prompt}\n------")
try:
content, _ = await self.llm.generate_response_async(prompt)
generation_result = await self.llm.generate_response(prompt)
content = generation_result.response
logger.debug(f"[私聊][{self.private_name}]生成的回复: {content}")
# 移除旧的检查新消息逻辑,这应该由 conversation 控制流处理
return content

View File

@@ -17,9 +17,9 @@ from src.chat.utils.utils import get_chat_type_and_target_info
from src.common.data_models.info_data_model import ActionPlannerInfo
from src.common.logger import get_logger
from src.common.utils.utils_action import ActionUtils
from src.config.config import global_config, model_config
from src.config.config import global_config
from src.core.types import ActionActivationType, ActionInfo, ComponentType
from src.llm_models.utils_model import LLMRequest
from src.services.llm_service import LLMServiceClient
from src.plugin_runtime.component_query import component_query_service
from src.prompt.prompt_manager import prompt_manager
from src.services.message_service import (
@@ -43,8 +43,8 @@ class BrainPlanner:
self.log_prefix = f"[{_chat_manager.get_session_name(chat_id) or chat_id}]"
self.action_manager = action_manager
# LLM规划器配置
self.planner_llm = LLMRequest(
model_set=model_config.model_task_config.planner, request_type="planner"
self.planner_llm = LLMServiceClient(
task_name="planner", request_type="planner"
) # 用于动作规划
self.last_obs_time_mark = 0.0
@@ -412,7 +412,9 @@ class BrainPlanner:
try:
# 调用LLM
llm_start = time.perf_counter()
llm_content, (reasoning_content, _, _) = await self.planner_llm.generate_response_async(prompt=prompt)
generation_result = await self.planner_llm.generate_response(prompt=prompt)
llm_content = generation_result.response
reasoning_content = generation_result.reasoning
llm_duration_ms = (time.perf_counter() - llm_start) * 1000
llm_reasoning = reasoning_content