feat: Enhance OpenAI compatibility and introduce unified LLM service data models

- Refactored model fetching logic to support various authentication methods for OpenAI-compatible APIs.
- Introduced new data models for LLM service requests and responses to standardize interactions across layers.
- Added an adapter base class for unified request execution across different providers.
- Implemented utility functions for building OpenAI-compatible client configurations and request overrides.
This commit is contained in:
DrSmoothl
2026-03-26 16:15:42 +08:00
parent 6e7daae55d
commit 777d4cb0d2
48 changed files with 5443 additions and 2945 deletions

View File

@@ -1,33 +1,80 @@
from typing import Any, Dict
from typing import Any, Dict, List
from src.common.logger import get_logger
from src.config.config import global_config
from src.llm_models.payload_content.tool_option import ToolCall
logger = get_logger("plugin_runtime.integration")
def _get_nested_config_value(source: Any, key: str, default: Any = None) -> Any:
"""从嵌套对象或字典中读取配置值。
Args:
source: 配置对象或字典。
key: 以点号分隔的路径。
default: 未命中时返回的默认值。
Returns:
Any: 命中的值;读取失败时返回默认值。
"""
current = source
try:
for part in key.split("."):
if isinstance(current, dict) and part in current:
current = current[part]
elif hasattr(current, part):
continue
if hasattr(current, part):
current = getattr(current, part)
else:
raise KeyError(part)
continue
raise KeyError(part)
return current
except Exception:
return default
def _normalize_prompt_arg(prompt: Any) -> str | List[Dict[str, Any]]:
"""校验并规范化插件传入的提示参数。
Args:
prompt: 原始提示参数。
Returns:
str | List[Dict[str, Any]]: 规范化后的提示输入。
Raises:
ValueError: 提示参数缺失或结构不受支持时抛出。
"""
if isinstance(prompt, str):
if not prompt.strip():
raise ValueError("缺少必要参数 prompt")
return prompt
if isinstance(prompt, list) and prompt:
for index, prompt_message in enumerate(prompt, start=1):
if not isinstance(prompt_message, dict):
raise ValueError(f"prompt 第 {index} 项必须为字典")
return prompt
raise ValueError("缺少必要参数 prompt")
class RuntimeCoreCapabilityMixin:
"""插件运行时的核心能力混入。"""
async def _cap_send_text(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送文本消息。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
text: str = args.get("text", "")
stream_id: str = args.get("stream_id", "")
text = str(args.get("text", ""))
stream_id = str(args.get("stream_id", ""))
if not text or not stream_id:
return {"success": False, "error": "缺少必要参数 text 或 stream_id"}
@@ -35,20 +82,31 @@ class RuntimeCoreCapabilityMixin:
result = await send_api.text_to_stream(
text=text,
stream_id=stream_id,
typing=args.get("typing", False),
set_reply=args.get("set_reply", False),
storage_message=args.get("storage_message", True),
typing=bool(args.get("typing", False)),
set_reply=bool(args.get("set_reply", False)),
storage_message=bool(args.get("storage_message", True)),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.text] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.text] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_send_emoji(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送表情图片。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
emoji_base64: str = args.get("emoji_base64", "")
stream_id: str = args.get("stream_id", "")
emoji_base64 = str(args.get("emoji_base64", ""))
stream_id = str(args.get("stream_id", ""))
if not emoji_base64 or not stream_id:
return {"success": False, "error": "缺少必要参数 emoji_base64 或 stream_id"}
@@ -56,18 +114,29 @@ class RuntimeCoreCapabilityMixin:
result = await send_api.emoji_to_stream(
emoji_base64=emoji_base64,
stream_id=stream_id,
storage_message=args.get("storage_message", True),
storage_message=bool(args.get("storage_message", True)),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.emoji] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.emoji] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_send_image(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送图片。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
image_base64: str = args.get("image_base64", "")
stream_id: str = args.get("stream_id", "")
image_base64 = str(args.get("image_base64", ""))
stream_id = str(args.get("stream_id", ""))
if not image_base64 or not stream_id:
return {"success": False, "error": "缺少必要参数 image_base64 或 stream_id"}
@@ -75,18 +144,29 @@ class RuntimeCoreCapabilityMixin:
result = await send_api.image_to_stream(
image_base64=image_base64,
stream_id=stream_id,
storage_message=args.get("storage_message", True),
storage_message=bool(args.get("storage_message", True)),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.image] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.image] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_send_command(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送命令消息。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
command = args.get("command", "")
stream_id: str = args.get("stream_id", "")
command = str(args.get("command", ""))
stream_id = str(args.get("stream_id", ""))
if not command or not stream_id:
return {"success": False, "error": "缺少必要参数 command 或 stream_id"}
@@ -95,22 +175,33 @@ class RuntimeCoreCapabilityMixin:
message_type="command",
content=command,
stream_id=stream_id,
storage_message=args.get("storage_message", True),
display_message=args.get("display_message", ""),
storage_message=bool(args.get("storage_message", True)),
display_message=str(args.get("display_message", "")),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.command] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.command] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_send_custom(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送自定义消息。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
message_type: str = args.get("message_type", "") or args.get("custom_type", "")
message_type = str(args.get("message_type", "") or args.get("custom_type", ""))
content = args.get("content")
if content is None:
content = args.get("data", "")
stream_id: str = args.get("stream_id", "")
stream_id = str(args.get("stream_id", ""))
if not message_type or not stream_id:
return {"success": False, "error": "缺少必要参数 message_type 或 stream_id"}
@@ -119,114 +210,116 @@ class RuntimeCoreCapabilityMixin:
message_type=message_type,
content=content,
stream_id=stream_id,
display_message=args.get("display_message", ""),
typing=args.get("typing", False),
storage_message=args.get("storage_message", True),
display_message=str(args.get("display_message", "")),
typing=bool(args.get("typing", False)),
storage_message=bool(args.get("storage_message", True)),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.custom] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.custom] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_llm_generate(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""执行无工具的 LLM 生成能力。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 标准化后的 LLM 响应结构。
"""
del capability
from src.services import llm_service as llm_api
prompt: str = args.get("prompt", "")
if not prompt:
return {"success": False, "error": "缺少必要参数 prompt"}
model_name: str = args.get("model", "") or args.get("model_name", "")
temperature = args.get("temperature")
max_tokens = args.get("max_tokens")
try:
models = llm_api.get_available_models()
if model_name and model_name in models:
model_config = models[model_name]
else:
if not models:
return {"success": False, "error": "没有可用的模型配置"}
model_config = next(iter(models.values()))
success, response, reasoning, used_model = await llm_api.generate_with_model(
prompt=prompt,
model_config=model_config,
request_type=f"plugin.{plugin_id}",
temperature=temperature,
max_tokens=max_tokens,
prompt = _normalize_prompt_arg(args.get("prompt"))
task_name = llm_api.resolve_task_name(str(args.get("model", "") or args.get("model_name", "")))
result = await llm_api.generate(
llm_api.LLMServiceRequest(
task_name=task_name,
request_type=f"plugin.{plugin_id}",
prompt=prompt,
temperature=args.get("temperature"),
max_tokens=args.get("max_tokens"),
)
)
return {
"success": success,
"response": response,
"reasoning": reasoning,
"model_name": used_model,
}
except Exception as e:
logger.error(f"[cap.llm.generate] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
return result.to_capability_payload()
except Exception as exc:
logger.error(f"[cap.llm.generate] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_llm_generate_with_tools(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""执行带工具的 LLM 生成能力。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 标准化后的 LLM 响应结构。
"""
del capability
from src.services import llm_service as llm_api
prompt: str = args.get("prompt", "")
if not prompt:
return {"success": False, "error": "缺少必要参数 prompt"}
model_name: str = args.get("model", "") or args.get("model_name", "")
tool_options = args.get("tools") or args.get("tool_options")
temperature = args.get("temperature")
max_tokens = args.get("max_tokens")
if tool_options is not None and not isinstance(tool_options, list):
return {"success": False, "error": "tools 必须为列表"}
try:
models = llm_api.get_available_models()
if model_name and model_name in models:
model_config = models[model_name]
else:
if not models:
return {"success": False, "error": "没有可用的模型配置"}
model_config = next(iter(models.values()))
success, response, reasoning, used_model, tool_calls = await llm_api.generate_with_model_with_tools(
prompt=prompt,
model_config=model_config,
tool_options=tool_options,
request_type=f"plugin.{plugin_id}",
temperature=temperature,
max_tokens=max_tokens,
prompt = _normalize_prompt_arg(args.get("prompt"))
task_name = llm_api.resolve_task_name(str(args.get("model", "") or args.get("model_name", "")))
result = await llm_api.generate(
llm_api.LLMServiceRequest(
task_name=task_name,
request_type=f"plugin.{plugin_id}",
prompt=prompt,
tool_options=tool_options,
temperature=args.get("temperature"),
max_tokens=args.get("max_tokens"),
)
)
serialized_tool_calls = None
if tool_calls:
serialized_tool_calls = [
{
"id": tool_call.call_id,
"function": {"name": tool_call.func_name, "arguments": tool_call.args or {}},
}
for tool_call in tool_calls
if isinstance(tool_call, ToolCall)
]
return {
"success": success,
"response": response,
"reasoning": reasoning,
"model_name": used_model,
"tool_calls": serialized_tool_calls,
}
except Exception as e:
logger.error(f"[cap.llm.generate_with_tools] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
return result.to_capability_payload()
except Exception as exc:
logger.error(f"[cap.llm.generate_with_tools] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_llm_get_available_models(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""获取当前宿主可用的模型任务列表。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 可用模型列表。
"""
del plugin_id, capability, args
from src.services import llm_service as llm_api
try:
models = llm_api.get_available_models()
return {"success": True, "models": list(models.keys())}
except Exception as e:
logger.error(f"[cap.llm.get_available_models] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.llm.get_available_models] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_config_get(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
key: str = args.get("key", "")
"""读取宿主全局配置中的单个字段。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 配置读取结果。
"""
del plugin_id, capability
key = str(args.get("key", ""))
default = args.get("default")
if not key:
return {"success": False, "value": None, "error": "缺少必要参数 key"}
@@ -234,37 +327,57 @@ class RuntimeCoreCapabilityMixin:
try:
value = _get_nested_config_value(global_config, key, default)
return {"success": True, "value": value}
except Exception as e:
return {"success": False, "value": None, "error": str(e)}
except Exception as exc:
return {"success": False, "value": None, "error": str(exc)}
async def _cap_config_get_plugin(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""读取指定插件的配置。
Args:
plugin_id: 当前插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 配置读取结果。
"""
del capability
from src.plugin_runtime.component_query import component_query_service
plugin_name: str = args.get("plugin_name", plugin_id)
key: str = args.get("key", "")
plugin_name = str(args.get("plugin_name", plugin_id))
key = str(args.get("key", ""))
default = args.get("default")
try:
config = component_query_service.get_plugin_config(plugin_name)
if config is None:
return {"success": False, "value": default, "error": f"未找到插件 {plugin_name} 的配置"}
if key:
value = _get_nested_config_value(config, key, default)
return {"success": True, "value": value}
return {"success": True, "value": config}
except Exception as e:
return {"success": False, "value": default, "error": str(e)}
except Exception as exc:
return {"success": False, "value": default, "error": str(exc)}
async def _cap_config_get_all(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""读取指定插件的全部配置。
Args:
plugin_id: 当前插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 配置读取结果。
"""
del capability
from src.plugin_runtime.component_query import component_query_service
plugin_name: str = args.get("plugin_name", plugin_id)
plugin_name = str(args.get("plugin_name", plugin_id))
try:
config = component_query_service.get_plugin_config(plugin_name)
if config is None:
return {"success": True, "value": {}}
return {"success": True, "value": config}
except Exception as e:
return {"success": False, "value": {}, "error": str(e)}
except Exception as exc:
return {"success": False, "value": {}, "error": str(exc)}

View File

@@ -10,7 +10,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tupl
from src.common.logger import get_logger
from src.core.types import ActionActivationType, ActionInfo, CommandInfo, ComponentInfo, ComponentType, ToolInfo
from src.llm_models.payload_content.tool_option import ToolParamType
from src.llm_models.payload_content.tool_option import normalize_tool_option
if TYPE_CHECKING:
from src.plugin_runtime.host.component_registry import ActionEntry, CommandEntry, ComponentEntry, ToolEntry
@@ -28,13 +28,6 @@ _HOST_COMPONENT_TYPE_MAP: Dict[ComponentType, str] = {
ComponentType.COMMAND: "COMMAND",
ComponentType.TOOL: "TOOL",
}
_TOOL_PARAM_TYPE_MAP: Dict[str, ToolParamType] = {
"string": ToolParamType.STRING,
"integer": ToolParamType.INTEGER,
"float": ToolParamType.FLOAT,
"boolean": ToolParamType.BOOLEAN,
"bool": ToolParamType.BOOLEAN,
}
class ComponentQueryService:
@@ -171,11 +164,9 @@ class ComponentQueryService:
return ActionInfo(
name=entry.name,
component_type=ComponentType.ACTION,
description=str(metadata.get("description", "") or ""),
enabled=bool(entry.enabled),
plugin_name=entry.plugin_id,
metadata=metadata,
action_parameters=action_parameters,
action_require=action_require,
associated_types=associated_types,
@@ -202,72 +193,48 @@ class ComponentQueryService:
metadata = dict(entry.metadata)
return CommandInfo(
name=entry.name,
component_type=ComponentType.COMMAND,
description=str(metadata.get("description", "") or ""),
enabled=bool(entry.enabled),
plugin_name=entry.plugin_id,
metadata=metadata,
command_pattern=str(metadata.get("command_pattern", "") or ""),
)
@staticmethod
def _coerce_tool_param_type(raw_value: Any) -> ToolParamType:
"""规范化工具参数类型
Args:
raw_value: 原始工具参数类型值。
Returns:
ToolParamType: 规范化后的工具参数类型。
"""
normalized_value = str(raw_value or "").strip().lower()
return _TOOL_PARAM_TYPE_MAP.get(normalized_value, ToolParamType.STRING)
@staticmethod
def _build_tool_parameters(entry: "ToolEntry") -> list[tuple[str, ToolParamType, str, bool, list[str] | None]]:
"""将运行时工具参数元数据转换为核心 ToolInfo 参数列表。
def _build_tool_definition(entry: "ToolEntry") -> dict[str, Any]:
"""将运行时 Tool 条目转换为原始工具定义字典
Args:
entry: 插件运行时中的 Tool 条目。
Returns:
list[tuple[str, ToolParamType, str, bool, list[str] | None]]: 转换后的参数列表
dict[str, Any]: 可交给 `normalize_tool_option()` 的原始工具定义
"""
raw_definition: dict[str, Any] = {
"name": entry.name,
"description": entry.description,
}
if isinstance(entry.parameters_raw, dict) and entry.parameters_raw:
raw_definition["parameters_schema"] = entry.parameters_raw
return raw_definition
if isinstance(entry.parameters, list) and entry.parameters:
raw_definition["parameters"] = entry.parameters
return raw_definition
if isinstance(entry.parameters_raw, list) and entry.parameters_raw:
raw_definition["parameters"] = entry.parameters_raw
return raw_definition
return raw_definition
structured_parameters = entry.parameters if isinstance(entry.parameters, list) else []
if not structured_parameters and isinstance(entry.parameters_raw, dict):
structured_parameters = [
{"name": key, **value}
for key, value in entry.parameters_raw.items()
if isinstance(value, dict)
]
@staticmethod
def _build_tool_parameters_schema(entry: "ToolEntry") -> dict[str, Any] | None:
"""将运行时 Tool 条目转换为对象级参数 Schema。
normalized_parameters: list[tuple[str, ToolParamType, str, bool, list[str] | None]] = []
for parameter in structured_parameters:
if not isinstance(parameter, dict):
continue
Args:
entry: 插件运行时中的 Tool 条目。
parameter_name = str(parameter.get("name", "") or "").strip()
if not parameter_name:
continue
enum_values = parameter.get("enum")
normalized_enum_values = (
[str(item) for item in enum_values if item is not None]
if isinstance(enum_values, list)
else None
)
normalized_parameters.append(
(
parameter_name,
ComponentQueryService._coerce_tool_param_type(parameter.get("param_type") or parameter.get("type")),
str(parameter.get("description", "") or ""),
bool(parameter.get("required", True)),
normalized_enum_values,
)
)
return normalized_parameters
Returns:
dict[str, Any] | None: 规范化后的对象级参数 Schema。
"""
normalized_option = normalize_tool_option(ComponentQueryService._build_tool_definition(entry))
return normalized_option.parameters_schema
@staticmethod
def _build_tool_info(entry: "ToolEntry") -> ToolInfo:
@@ -282,13 +249,10 @@ class ComponentQueryService:
return ToolInfo(
name=entry.name,
component_type=ComponentType.TOOL,
description=entry.description,
enabled=bool(entry.enabled),
plugin_name=entry.plugin_id,
metadata=dict(entry.metadata),
tool_parameters=ComponentQueryService._build_tool_parameters(entry),
tool_description=entry.description,
parameters_schema=ComponentQueryService._build_tool_parameters_schema(entry),
)
@staticmethod

View File

@@ -91,7 +91,7 @@ class ToolEntry(ComponentEntry):
def __init__(self, name: str, component_type: str, plugin_id: str, metadata: Dict[str, Any]) -> None:
self.description: str = metadata.get("description", "")
self.parameters: List[Dict[str, Any]] = metadata.get("parameters", [])
self.parameters_raw: List[Dict[str, Any]] = metadata.get("parameters_raw", [])
self.parameters_raw: Dict[str, Any] | List[Dict[str, Any]] = metadata.get("parameters_raw", {})
super().__init__(name, component_type, plugin_id, metadata)