Merge remote-tracking branch 'upstream/r-dev' into sync/pr-1564-upstream-20260331

# Conflicts:
#	src/chat/brain_chat/PFC/conversation.py
#	src/chat/brain_chat/PFC/pfc_KnowledgeFetcher.py
#	src/chat/knowledge/lpmm_ops.py
This commit is contained in:
A-Dawn
2026-03-31 10:43:55 +08:00
179 changed files with 21829 additions and 20118 deletions

View File

@@ -29,6 +29,19 @@ class _RuntimeComponentManagerProtocol(Protocol):
def _build_api_unavailable_error(self, entry: "APIEntry") -> str: ...
def _collect_api_reference_matches(
self,
caller_plugin_id: str,
normalized_api_name: str,
normalized_version: str,
) -> tuple[List[tuple["PluginSupervisor", "APIEntry"]], List[tuple["PluginSupervisor", "APIEntry"]], bool]: ...
def _collect_api_toggle_reference_matches(
self,
normalized_name: str,
normalized_version: str,
) -> List[tuple["PluginSupervisor", "APIEntry"]]: ...
def _get_supervisor_for_plugin(self, plugin_id: str) -> Optional["PluginSupervisor"]: ...
def _resolve_api_target(
@@ -58,6 +71,73 @@ class _RuntimeComponentManagerProtocol(Protocol):
class RuntimeComponentCapabilityMixin:
def _collect_api_reference_matches(
self: _RuntimeComponentManagerProtocol,
caller_plugin_id: str,
normalized_api_name: str,
normalized_version: str,
) -> tuple[List[tuple["PluginSupervisor", "APIEntry"]], List[tuple["PluginSupervisor", "APIEntry"]], bool]:
"""按 API 完整名或短名精确收集匹配项。
该辅助方法用于兼容名字中本身包含 ``.`` 的 API。对于这类 API
不能简单按最后一个点号拆成 ``plugin_id.api_name``。
Args:
caller_plugin_id: 调用方插件 ID。
normalized_api_name: 已规范化的 API 名称。
normalized_version: 已规范化的版本号。
Returns:
tuple[List[tuple[PluginSupervisor, APIEntry]], List[tuple[PluginSupervisor, APIEntry]], bool]:
依次为可见且启用的匹配项、可见但已禁用的匹配项、是否存在不可见匹配项。
"""
visible_enabled_matches: List[tuple["PluginSupervisor", "APIEntry"]] = []
visible_disabled_matches: List[tuple["PluginSupervisor", "APIEntry"]] = []
hidden_match_exists = False
for supervisor in self.supervisors:
for entry in supervisor.api_registry.get_apis(
version=normalized_version,
enabled_only=False,
):
if entry.name != normalized_api_name and entry.full_name != normalized_api_name:
continue
if self._is_api_visible_to_plugin(entry, caller_plugin_id):
if entry.enabled:
visible_enabled_matches.append((supervisor, entry))
else:
visible_disabled_matches.append((supervisor, entry))
else:
hidden_match_exists = True
return visible_enabled_matches, visible_disabled_matches, hidden_match_exists
def _collect_api_toggle_reference_matches(
self: _RuntimeComponentManagerProtocol,
normalized_name: str,
normalized_version: str,
) -> List[tuple["PluginSupervisor", "APIEntry"]]:
"""按 API 完整名或短名精确收集启停操作匹配项。
Args:
normalized_name: 已规范化的 API 名称。
normalized_version: 已规范化的版本号。
Returns:
List[tuple[PluginSupervisor, APIEntry]]: 匹配到的 API 条目列表。
"""
matches: List[tuple["PluginSupervisor", "APIEntry"]] = []
for supervisor in self.supervisors:
for entry in supervisor.api_registry.get_apis(
version=normalized_version,
enabled_only=False,
):
if entry.name == normalized_name or entry.full_name == normalized_name:
matches.append((supervisor, entry))
return matches
@staticmethod
def _normalize_component_type(component_type: str) -> str:
"""规范化组件类型名称。
@@ -69,7 +149,10 @@ class RuntimeComponentCapabilityMixin:
str: 统一转为大写后的组件类型名。
"""
return str(component_type or "").strip().upper()
normalized_component_type = str(component_type or "").strip().upper()
if normalized_component_type == "ACTION":
return "TOOL"
return normalized_component_type
@classmethod
def _is_api_component_type(cls, component_type: str) -> bool:
@@ -190,6 +273,20 @@ class RuntimeComponentCapabilityMixin:
if not normalized_api_name:
return None, None, "缺少必要参数 api_name"
exact_visible_enabled_matches, exact_visible_disabled_matches, exact_hidden_match_exists = (
self._collect_api_reference_matches(caller_plugin_id, normalized_api_name, normalized_version)
)
if len(exact_visible_enabled_matches) == 1:
return exact_visible_enabled_matches[0][0], exact_visible_enabled_matches[0][1], None
if len(exact_visible_enabled_matches) > 1:
return None, None, f"API 名称不唯一: {normalized_api_name},请显式指定 version"
if exact_visible_disabled_matches:
if len(exact_visible_disabled_matches) == 1:
return None, None, self._build_api_unavailable_error(exact_visible_disabled_matches[0][1])
return None, None, f"API {normalized_api_name} 存在多个已下线版本,请显式指定 version"
if exact_hidden_match_exists:
return None, None, f"API {normalized_api_name} 未公开,禁止跨插件调用"
if "." in normalized_api_name:
target_plugin_id, target_api_name = normalized_api_name.rsplit(".", 1)
try:
@@ -207,9 +304,7 @@ class RuntimeComponentCapabilityMixin:
enabled_only=False,
)
visible_enabled_entries = [
entry
for entry in entries
if self._is_api_visible_to_plugin(entry, caller_plugin_id) and entry.enabled
entry for entry in entries if self._is_api_visible_to_plugin(entry, caller_plugin_id) and entry.enabled
]
visible_disabled_entries = [
entry
@@ -281,6 +376,12 @@ class RuntimeComponentCapabilityMixin:
if not normalized_name:
return None, None, "缺少必要参数 name"
exact_matches = self._collect_api_toggle_reference_matches(normalized_name, normalized_version)
if len(exact_matches) == 1:
return exact_matches[0][0], exact_matches[0][1], None
if len(exact_matches) > 1:
return None, None, f"API 名称不唯一: {normalized_name},请显式指定 version"
if "." in normalized_name:
plugin_id, api_name = normalized_name.rsplit(".", 1)
try:

View File

@@ -1,33 +1,80 @@
from typing import Any, Dict
from typing import Any, Dict, List
from src.common.logger import get_logger
from src.config.config import global_config
from src.llm_models.payload_content.tool_option import ToolCall
logger = get_logger("plugin_runtime.integration")
def _get_nested_config_value(source: Any, key: str, default: Any = None) -> Any:
"""从嵌套对象或字典中读取配置值。
Args:
source: 配置对象或字典。
key: 以点号分隔的路径。
default: 未命中时返回的默认值。
Returns:
Any: 命中的值;读取失败时返回默认值。
"""
current = source
try:
for part in key.split("."):
if isinstance(current, dict) and part in current:
current = current[part]
elif hasattr(current, part):
continue
if hasattr(current, part):
current = getattr(current, part)
else:
raise KeyError(part)
continue
raise KeyError(part)
return current
except Exception:
return default
def _normalize_prompt_arg(prompt: Any) -> str | List[Dict[str, Any]]:
"""校验并规范化插件传入的提示参数。
Args:
prompt: 原始提示参数。
Returns:
str | List[Dict[str, Any]]: 规范化后的提示输入。
Raises:
ValueError: 提示参数缺失或结构不受支持时抛出。
"""
if isinstance(prompt, str):
if not prompt.strip():
raise ValueError("缺少必要参数 prompt")
return prompt
if isinstance(prompt, list) and prompt:
for index, prompt_message in enumerate(prompt, start=1):
if not isinstance(prompt_message, dict):
raise ValueError(f"prompt 第 {index} 项必须为字典")
return prompt
raise ValueError("缺少必要参数 prompt")
class RuntimeCoreCapabilityMixin:
"""插件运行时的核心能力混入。"""
async def _cap_send_text(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送文本消息。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
text: str = args.get("text", "")
stream_id: str = args.get("stream_id", "")
text = str(args.get("text", ""))
stream_id = str(args.get("stream_id", ""))
if not text or not stream_id:
return {"success": False, "error": "缺少必要参数 text 或 stream_id"}
@@ -35,20 +82,31 @@ class RuntimeCoreCapabilityMixin:
result = await send_api.text_to_stream(
text=text,
stream_id=stream_id,
typing=args.get("typing", False),
set_reply=args.get("set_reply", False),
storage_message=args.get("storage_message", True),
typing=bool(args.get("typing", False)),
set_reply=bool(args.get("set_reply", False)),
storage_message=bool(args.get("storage_message", True)),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.text] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.text] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_send_emoji(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送表情图片。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
emoji_base64: str = args.get("emoji_base64", "")
stream_id: str = args.get("stream_id", "")
emoji_base64 = str(args.get("emoji_base64", ""))
stream_id = str(args.get("stream_id", ""))
if not emoji_base64 or not stream_id:
return {"success": False, "error": "缺少必要参数 emoji_base64 或 stream_id"}
@@ -56,18 +114,29 @@ class RuntimeCoreCapabilityMixin:
result = await send_api.emoji_to_stream(
emoji_base64=emoji_base64,
stream_id=stream_id,
storage_message=args.get("storage_message", True),
storage_message=bool(args.get("storage_message", True)),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.emoji] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.emoji] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_send_image(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送图片。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
image_base64: str = args.get("image_base64", "")
stream_id: str = args.get("stream_id", "")
image_base64 = str(args.get("image_base64", ""))
stream_id = str(args.get("stream_id", ""))
if not image_base64 or not stream_id:
return {"success": False, "error": "缺少必要参数 image_base64 或 stream_id"}
@@ -75,18 +144,29 @@ class RuntimeCoreCapabilityMixin:
result = await send_api.image_to_stream(
image_base64=image_base64,
stream_id=stream_id,
storage_message=args.get("storage_message", True),
storage_message=bool(args.get("storage_message", True)),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.image] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.image] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_send_command(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送命令消息。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
command = args.get("command", "")
stream_id: str = args.get("stream_id", "")
command = str(args.get("command", ""))
stream_id = str(args.get("stream_id", ""))
if not command or not stream_id:
return {"success": False, "error": "缺少必要参数 command 或 stream_id"}
@@ -95,22 +175,33 @@ class RuntimeCoreCapabilityMixin:
message_type="command",
content=command,
stream_id=stream_id,
storage_message=args.get("storage_message", True),
display_message=args.get("display_message", ""),
storage_message=bool(args.get("storage_message", True)),
display_message=str(args.get("display_message", "")),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.command] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.command] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_send_custom(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""向指定流发送自定义消息。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 能力执行结果。
"""
del plugin_id, capability
from src.services import send_service as send_api
message_type: str = args.get("message_type", "") or args.get("custom_type", "")
message_type = str(args.get("message_type", "") or args.get("custom_type", ""))
content = args.get("content")
if content is None:
content = args.get("data", "")
stream_id: str = args.get("stream_id", "")
stream_id = str(args.get("stream_id", ""))
if not message_type or not stream_id:
return {"success": False, "error": "缺少必要参数 message_type 或 stream_id"}
@@ -119,114 +210,116 @@ class RuntimeCoreCapabilityMixin:
message_type=message_type,
content=content,
stream_id=stream_id,
display_message=args.get("display_message", ""),
typing=args.get("typing", False),
storage_message=args.get("storage_message", True),
display_message=str(args.get("display_message", "")),
typing=bool(args.get("typing", False)),
storage_message=bool(args.get("storage_message", True)),
)
return {"success": result}
except Exception as e:
logger.error(f"[cap.send.custom] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.send.custom] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_llm_generate(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""执行无工具的 LLM 生成能力。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 标准化后的 LLM 响应结构。
"""
del capability
from src.services import llm_service as llm_api
prompt: str = args.get("prompt", "")
if not prompt:
return {"success": False, "error": "缺少必要参数 prompt"}
model_name: str = args.get("model", "") or args.get("model_name", "")
temperature = args.get("temperature")
max_tokens = args.get("max_tokens")
try:
models = llm_api.get_available_models()
if model_name and model_name in models:
model_config = models[model_name]
else:
if not models:
return {"success": False, "error": "没有可用的模型配置"}
model_config = next(iter(models.values()))
success, response, reasoning, used_model = await llm_api.generate_with_model(
prompt=prompt,
model_config=model_config,
request_type=f"plugin.{plugin_id}",
temperature=temperature,
max_tokens=max_tokens,
prompt = _normalize_prompt_arg(args.get("prompt"))
task_name = llm_api.resolve_task_name(str(args.get("model", "") or args.get("model_name", "")))
result = await llm_api.generate(
llm_api.LLMServiceRequest(
task_name=task_name,
request_type=f"plugin.{plugin_id}",
prompt=prompt,
temperature=args.get("temperature"),
max_tokens=args.get("max_tokens"),
)
)
return {
"success": success,
"response": response,
"reasoning": reasoning,
"model_name": used_model,
}
except Exception as e:
logger.error(f"[cap.llm.generate] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
return result.to_capability_payload()
except Exception as exc:
logger.error(f"[cap.llm.generate] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_llm_generate_with_tools(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""执行带工具的 LLM 生成能力。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 标准化后的 LLM 响应结构。
"""
del capability
from src.services import llm_service as llm_api
prompt: str = args.get("prompt", "")
if not prompt:
return {"success": False, "error": "缺少必要参数 prompt"}
model_name: str = args.get("model", "") or args.get("model_name", "")
tool_options = args.get("tools") or args.get("tool_options")
temperature = args.get("temperature")
max_tokens = args.get("max_tokens")
if tool_options is not None and not isinstance(tool_options, list):
return {"success": False, "error": "tools 必须为列表"}
try:
models = llm_api.get_available_models()
if model_name and model_name in models:
model_config = models[model_name]
else:
if not models:
return {"success": False, "error": "没有可用的模型配置"}
model_config = next(iter(models.values()))
success, response, reasoning, used_model, tool_calls = await llm_api.generate_with_model_with_tools(
prompt=prompt,
model_config=model_config,
tool_options=tool_options,
request_type=f"plugin.{plugin_id}",
temperature=temperature,
max_tokens=max_tokens,
prompt = _normalize_prompt_arg(args.get("prompt"))
task_name = llm_api.resolve_task_name(str(args.get("model", "") or args.get("model_name", "")))
result = await llm_api.generate(
llm_api.LLMServiceRequest(
task_name=task_name,
request_type=f"plugin.{plugin_id}",
prompt=prompt,
tool_options=tool_options,
temperature=args.get("temperature"),
max_tokens=args.get("max_tokens"),
)
)
serialized_tool_calls = None
if tool_calls:
serialized_tool_calls = [
{
"id": tool_call.call_id,
"function": {"name": tool_call.func_name, "arguments": tool_call.args or {}},
}
for tool_call in tool_calls
if isinstance(tool_call, ToolCall)
]
return {
"success": success,
"response": response,
"reasoning": reasoning,
"model_name": used_model,
"tool_calls": serialized_tool_calls,
}
except Exception as e:
logger.error(f"[cap.llm.generate_with_tools] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
return result.to_capability_payload()
except Exception as exc:
logger.error(f"[cap.llm.generate_with_tools] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_llm_get_available_models(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""获取当前宿主可用的模型任务列表。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 可用模型列表。
"""
del plugin_id, capability, args
from src.services import llm_service as llm_api
try:
models = llm_api.get_available_models()
return {"success": True, "models": list(models.keys())}
except Exception as e:
logger.error(f"[cap.llm.get_available_models] 执行失败: {e}", exc_info=True)
return {"success": False, "error": str(e)}
except Exception as exc:
logger.error(f"[cap.llm.get_available_models] 执行失败: {exc}", exc_info=True)
return {"success": False, "error": str(exc)}
async def _cap_config_get(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
key: str = args.get("key", "")
"""读取宿主全局配置中的单个字段。
Args:
plugin_id: 插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 配置读取结果。
"""
del plugin_id, capability
key = str(args.get("key", ""))
default = args.get("default")
if not key:
return {"success": False, "value": None, "error": "缺少必要参数 key"}
@@ -234,37 +327,57 @@ class RuntimeCoreCapabilityMixin:
try:
value = _get_nested_config_value(global_config, key, default)
return {"success": True, "value": value}
except Exception as e:
return {"success": False, "value": None, "error": str(e)}
except Exception as exc:
return {"success": False, "value": None, "error": str(exc)}
async def _cap_config_get_plugin(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""读取指定插件的配置。
Args:
plugin_id: 当前插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 配置读取结果。
"""
del capability
from src.plugin_runtime.component_query import component_query_service
plugin_name: str = args.get("plugin_name", plugin_id)
key: str = args.get("key", "")
plugin_name = str(args.get("plugin_name", plugin_id))
key = str(args.get("key", ""))
default = args.get("default")
try:
config = component_query_service.get_plugin_config(plugin_name)
if config is None:
return {"success": False, "value": default, "error": f"未找到插件 {plugin_name} 的配置"}
if key:
value = _get_nested_config_value(config, key, default)
return {"success": True, "value": value}
return {"success": True, "value": config}
except Exception as e:
return {"success": False, "value": default, "error": str(e)}
except Exception as exc:
return {"success": False, "value": default, "error": str(exc)}
async def _cap_config_get_all(self, plugin_id: str, capability: str, args: Dict[str, Any]) -> Any:
"""读取指定插件的全部配置。
Args:
plugin_id: 当前插件标识。
capability: 能力名称。
args: 能力调用参数。
Returns:
Any: 配置读取结果。
"""
del capability
from src.plugin_runtime.component_query import component_query_service
plugin_name: str = args.get("plugin_name", plugin_id)
plugin_name = str(args.get("plugin_name", plugin_id))
try:
config = component_query_service.get_plugin_config(plugin_name)
if config is None:
return {"success": True, "value": {}}
return {"success": True, "value": config}
except Exception as e:
return {"success": False, "value": {}, "error": str(e)}
except Exception as exc:
return {"success": False, "value": {}, "error": str(exc)}