pref:优化webui界面,增加prompt模板元信息

This commit is contained in:
SengokuCola
2026-05-05 17:57:19 +08:00
parent 0d43d3ec05
commit a5e4ac8531
42 changed files with 826 additions and 410 deletions

View File

@@ -1,8 +1,12 @@
from __future__ import annotations
from dataclasses import dataclass
from functools import lru_cache
from pathlib import Path
from typing import Any
from tomlkit import parse as parse_toml
import json
import logging
import os
import re
@@ -22,6 +26,19 @@ STRICT_ENV_VALUES = {"1", "true", "yes", "on"}
extract_prompt_placeholders = extract_placeholders
@dataclass(frozen=True)
class PromptMetadata:
display_name: str = ""
advanced: bool = False
description: str = ""
@dataclass(frozen=True)
class PromptTemplateInfo:
path: Path
metadata: PromptMetadata
def get_prompts_root(prompts_root: Path | None = None) -> Path:
return (prompts_root or PROMPTS_ROOT).resolve()
@@ -80,24 +97,86 @@ def iter_prompt_files(directory: Path, recursive: bool = True) -> list[Path]:
def _raise_duplicate_prompt_name(name: str, first_path: Path, second_path: Path, prompts_root: Path) -> None:
path_a = first_path.relative_to(prompts_root).as_posix()
path_b = second_path.relative_to(prompts_root).as_posix()
raise ValueError(
t(
"prompt.duplicate_template_name",
name=name,
path_a=first_path.relative_to(prompts_root),
path_b=second_path.relative_to(prompts_root),
path_a=path_a,
path_b=path_b,
)
)
def _scan_prompt_directory(directory: Path, prompts_root: Path) -> dict[str, Path]:
prompt_paths: dict[str, Path] = {}
def _coerce_metadata(raw_metadata: Any) -> PromptMetadata:
if not isinstance(raw_metadata, dict):
return PromptMetadata()
display_name = raw_metadata.get("display_name", "")
advanced = raw_metadata.get("advanced", False)
description = raw_metadata.get("description", "")
return PromptMetadata(
display_name=display_name if isinstance(display_name, str) else "",
advanced=advanced if isinstance(advanced, bool) else False,
description=description if isinstance(description, str) else "",
)
def _read_metadata_file(metadata_path: Path) -> dict[str, Any]:
if not metadata_path.is_file():
return {}
try:
if metadata_path.suffix == ".json":
metadata = json.loads(metadata_path.read_text(encoding="utf-8"))
else:
metadata = parse_toml(metadata_path.read_text(encoding="utf-8"))
except Exception as exc:
logger.warning("读取 Prompt 元信息文件 %s 失败:%s", metadata_path, exc)
return {}
return dict(metadata) if isinstance(metadata, dict) else {}
def _extract_template_metadata(metadata: dict[str, Any], prompt_name: str) -> dict[str, Any]:
templates = metadata.get("templates")
if isinstance(templates, dict) and isinstance(templates.get(prompt_name), dict):
return dict(templates[prompt_name])
prompt_metadata = metadata.get(prompt_name)
if isinstance(prompt_metadata, dict):
return dict(prompt_metadata)
return metadata if any(key in metadata for key in ("display_name", "advanced", "description")) else {}
def _load_prompt_metadata(prompt_path: Path) -> PromptMetadata:
prompt_name = prompt_path.stem
metadata_sources = (
prompt_path.with_name(f"{prompt_name}.meta.toml"),
prompt_path.with_name(f"{prompt_name}.meta.json"),
prompt_path.parent / ".meta.toml",
prompt_path.parent / ".meta.json",
)
merged_metadata: dict[str, Any] = {}
for metadata_path in reversed(metadata_sources):
raw_metadata = _read_metadata_file(metadata_path)
merged_metadata.update(_extract_template_metadata(raw_metadata, prompt_name))
return _coerce_metadata(merged_metadata)
def _scan_prompt_directory(directory: Path, prompts_root: Path) -> dict[str, PromptTemplateInfo]:
prompt_paths: dict[str, PromptTemplateInfo] = {}
for prompt_path in iter_prompt_files(directory):
prompt_name = prompt_path.stem
existing_path = prompt_paths.get(prompt_name)
if existing_path is not None:
_raise_duplicate_prompt_name(prompt_name, existing_path, prompt_path, prompts_root)
prompt_paths[prompt_name] = prompt_path
existing_info = prompt_paths.get(prompt_name)
if existing_info is not None:
_raise_duplicate_prompt_name(prompt_name, existing_info.path, prompt_path, prompts_root)
prompt_paths[prompt_name] = PromptTemplateInfo(path=prompt_path, metadata=_load_prompt_metadata(prompt_path))
return prompt_paths
@@ -115,11 +194,11 @@ def _iter_locale_candidates(requested_locale: str) -> list[str]:
return locale_candidates
def list_prompt_templates(locale: str | None = None, prompts_root: Path | None = None) -> dict[str, Path]:
def list_prompt_templates(locale: str | None = None, prompts_root: Path | None = None) -> dict[str, PromptTemplateInfo]:
resolved_prompts_root = get_prompts_root(prompts_root)
requested_locale = normalize_locale(locale or get_locale())
prompt_paths: dict[str, Path] = {}
prompt_paths: dict[str, PromptTemplateInfo] = {}
for directory in _iter_prompt_template_layers(resolved_prompts_root, requested_locale):
prompt_paths.update(_scan_prompt_directory(directory, resolved_prompts_root))
@@ -149,7 +228,7 @@ def resolve_prompt_path(
else:
prompt_paths = list_prompt_templates(locale=requested_locale, prompts_root=resolved_prompts_root)
if normalized_name in prompt_paths:
return prompt_paths[normalized_name]
return prompt_paths[normalized_name].path
raise FileNotFoundError(t("prompt.template_not_found", locale=requested_locale, name=normalized_name))

View File

@@ -57,7 +57,7 @@ MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute(
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0-pre.11"
CONFIG_VERSION: str = "8.10.6"
CONFIG_VERSION: str = "8.10.7"
MODEL_CONFIG_VERSION: str = "1.15.3"
logger = get_logger("config")

View File

@@ -30,7 +30,7 @@ class ExampleConfig(ConfigBase):
class BotConfig(ConfigBase):
"""机器人配置类"""
__ui_label__ = "本信息"
__ui_label__ = ""
__ui_icon__ = "bot"
platform: str = Field(
@@ -87,6 +87,7 @@ class BotConfig(ConfigBase):
class PersonalityConfig(ConfigBase):
"""人格配置类"""
__ui_parent__ = "bot"
__ui_label__ = "人格"
__ui_icon__ = "user-circle"
@@ -1299,16 +1300,6 @@ class EmojiConfig(ConfigBase):
)
"""是否启用表情包过滤,只有符合该要求的表情包才会被保存"""
filtration_prompt: str = Field(
default="符合公序良俗",
json_schema_extra={
"advanced": True,
"x-widget": "input",
"x-icon": "shield",
},
)
"""表情包过滤要求,只有符合该要求的表情包才会被保存"""
class KeywordRuleConfig(ConfigBase):
"""关键词规则配置类"""

View File

@@ -915,11 +915,10 @@ class EmojiManager:
# 表情包审查
if global_config.emoji.content_filtration:
try:
filtration_prompt_template = prompt_manager.get_prompt("emoji_content_filtration")
filtration_prompt_template.add_context("demand", global_config.emoji.filtration_prompt)
filtration_prompt = await prompt_manager.render_prompt(filtration_prompt_template)
review_prompt_template = prompt_manager.get_prompt("emoji_content_filtration")
review_prompt = await prompt_manager.render_prompt(review_prompt_template)
filtration_result = await emoji_manager_vlm.generate_response_for_image(
filtration_prompt,
review_prompt,
image_base64,
image_format,
)

View File

@@ -13,7 +13,7 @@ from PIL import Image as PILImage
from PIL import ImageDraw, ImageFont
from pydantic import BaseModel, Field as PydanticField
from src.emoji_system.emoji_manager import emoji_manager
from src.emoji_system.emoji_manager import _is_vlm_task_configured, emoji_manager
from src.emoji_system.maisaka_tool import send_emoji_for_maisaka
from src.common.data_models.image_data_model import MaiEmoji
from src.common.data_models.message_component_data_model import ImageComponent, MessageSequence, TextComponent
@@ -38,6 +38,7 @@ _EMOJI_SUB_AGENT_MAX_TOKENS = 240
_EMOJI_MAX_CANDIDATE_COUNT = 64
_EMOJI_CANDIDATE_TILE_SIZE = 256
_EMOJI_SUCCESS_MESSAGE = "表情包发送成功"
_EMOJI_VLM_NOT_CONFIGURED_MESSAGE = "错误,没有配置视觉模型,无法使用表情包功能"
class EmojiSelectionResult(BaseModel):
@@ -298,6 +299,13 @@ def _resolve_emoji_selector_model_task_name() -> str:
return "vlm"
def _is_missing_visual_model_error(exc: Exception) -> bool:
"""判断是否为未配置视觉模型导致的选择失败。"""
error_text = str(exc)
return _EMOJI_VLM_NOT_CONFIGURED_MESSAGE in error_text or "未找到名为 '' 的模型" in error_text
async def _select_emoji_with_sub_agent(
tool_ctx: BuiltinToolRuntimeContext,
reasoning: str,
@@ -351,13 +359,17 @@ async def _select_emoji_with_sub_agent(
request_messages.append(candidate_llm_message)
serialized_request_messages = serialize_prompt_messages(request_messages)
model_task_name = _resolve_emoji_selector_model_task_name()
if model_task_name == "vlm" and not _is_vlm_task_configured():
raise RuntimeError(_EMOJI_VLM_NOT_CONFIGURED_MESSAGE)
selection_started_at = datetime.now()
response = await tool_ctx.runtime.run_sub_agent(
context_message_limit=_EMOJI_SUB_AGENT_CONTEXT_LIMIT,
system_prompt=system_prompt,
extra_messages=[prompt_message, candidate_message],
max_tokens=_EMOJI_SUB_AGENT_MAX_TOKENS,
model_task_name=_resolve_emoji_selector_model_task_name(),
model_task_name=model_task_name,
)
selection_duration_ms = round((datetime.now() - selection_started_at).total_seconds() * 1000, 2)
@@ -448,7 +460,10 @@ async def handle_tool(
)
except Exception as exc:
logger.exception(f"{tool_ctx.runtime.log_prefix} 发送表情包时发生异常: {exc}")
structured_result["message"] = f"发送表情包时发生异常:{exc}"
if _is_missing_visual_model_error(exc):
structured_result["message"] = _EMOJI_VLM_NOT_CONFIGURED_MESSAGE
else:
structured_result["message"] = f"发送表情包时发生异常:{exc}"
return tool_ctx.build_failure_result(
invocation.tool_name,
structured_result["message"],

View File

@@ -274,12 +274,12 @@ class PromptManager:
Exception: 如果在加载过程中出现任何文件操作错误则引发该异常
"""
prompt_templates = list_prompt_templates(prompts_root=PROMPTS_DIR)
for prompt_name, prompt_file in prompt_templates.items():
for prompt_name, prompt_template in prompt_templates.items():
try:
template, need_save = self._load_prompt_template(prompt_name)
self.add_prompt(Prompt(prompt_name=prompt_name, template=template), need_save=need_save)
except Exception as exc:
logger.error(f"加载 Prompt 文件 '{prompt_file}' 时出错,错误信息: {exc}")
logger.error(f"加载 Prompt 文件 '{prompt_template.path}' 时出错,错误信息: {exc}")
raise
for prompt_file in CUSTOM_PROMPTS_DIR.glob(f"*{SUFFIX_PROMPT}"):
if prompt_file.stem in prompt_templates:

View File

@@ -14,6 +14,7 @@ from pydantic import BaseModel, Field
import tomlkit
from src.common.logger import get_logger
from src.common.prompt_i18n import list_prompt_templates
from src.config.config import CONFIG_DIR, PROJECT_ROOT, Config, ModelConfig
from src.config.config_base import AttributeData, ConfigBase
from src.config.model_configs import (
@@ -64,6 +65,9 @@ class PromptFileInfo(BaseModel):
name: str = Field(..., description="Prompt 文件名")
size: int = Field(..., description="文件大小")
modified_at: float = Field(..., description="最后修改时间戳")
display_name: str = Field(default="", description="Prompt 展示名称")
advanced: bool = Field(default=False, description="是否为高级 Prompt")
description: str = Field(default="", description="Prompt 描述")
class PromptCatalogResponse(BaseModel):
@@ -213,14 +217,20 @@ async def list_prompt_files():
continue
language = language_dir.name
prompt_template_infos = list_prompt_templates(locale=language, prompts_root=PROMPTS_DIR)
prompt_files: List[PromptFileInfo] = []
for prompt_file in sorted(language_dir.glob("*.prompt"), key=lambda item: item.name):
stat = prompt_file.stat()
template_info = prompt_template_infos.get(prompt_file.stem)
metadata = template_info.metadata if template_info and template_info.path == prompt_file else None
prompt_files.append(
PromptFileInfo(
name=prompt_file.name,
size=stat.st_size,
modified_at=stat.st_mtime,
display_name=metadata.display_name if metadata else "",
advanced=metadata.advanced if metadata else False,
description=metadata.description if metadata else "",
)
)