This commit is contained in:
Dreamwxz
2026-05-05 18:49:35 +08:00
55 changed files with 1133 additions and 555 deletions

View File

@@ -78,7 +78,7 @@ class ChatManager:
"""初始化聊天管理器"""
try:
await self.load_all_sessions_from_db()
logger.info(f"已加载 {len(self.sessions)} 个会话记录到内存中")
logger.debug(f"已加载 {len(self.sessions)} 个会话记录到内存中")
except Exception as e:
logger.error(f"初始化聊天管理器出现错误: {e}")

View File

@@ -829,20 +829,19 @@ def initialize_logging(verbose: bool = True):
reconfigure_existing_loggers()
# 启动日志清理任务
start_log_cleanup_task(verbose=verbose)
start_log_cleanup_task()
# 只在 verbose=True 时输出详细的初始化信息
if verbose:
logger = get_logger("logger")
console_level = LOG_CONFIG.get("console_log_level", LOG_CONFIG.get("log_level", "INFO"))
file_level = LOG_CONFIG.get("file_log_level", LOG_CONFIG.get("log_level", "INFO"))
logger.info("日志系统已初始化:")
logger.info(f" - 控制台级别: {console_level}")
logger.info(f" - 文件级别: {file_level}")
max_log_files = max(1, int(LOG_CONFIG.get("max_log_files", 30) or 30))
log_cleanup_days = max(1, int(LOG_CONFIG.get("log_cleanup_days", 30) or 30))
logger.info(f" - 轮转份数: {max_log_files}个文件|自动清理: {log_cleanup_days}天前的日志")
logger.info(
f"日志系统已初始化:控制台={console_level},文件={file_level}"
f"轮转={max_log_files}个文件,清理={log_cleanup_days}天前"
)
def cleanup_old_logs():
@@ -875,12 +874,8 @@ def cleanup_old_logs():
logger.error(f"清理旧日志文件时出错: {e}")
def start_log_cleanup_task(verbose: bool = True):
"""启动日志清理任务
Args:
verbose: 是否输出启动信息。默认为 True。
"""
def start_log_cleanup_task():
"""启动日志清理任务"""
global _cleanup_task_started
# 防止重复启动清理任务
@@ -897,12 +892,6 @@ def start_log_cleanup_task(verbose: bool = True):
cleanup_thread = threading.Thread(target=cleanup_task, daemon=True)
cleanup_thread.start()
if verbose:
logger = get_logger("logger")
max_log_files = max(1, int(LOG_CONFIG.get("max_log_files", 30) or 30))
log_cleanup_days = max(1, int(LOG_CONFIG.get("log_cleanup_days", 30) or 30))
logger.info(f"已启动日志清理任务,将自动清理{log_cleanup_days}天前的日志文件(轮转份数限制: {max_log_files}个文件)")
def shutdown_logging():
"""优雅关闭日志系统,释放所有文件句柄"""

View File

@@ -1,8 +1,12 @@
from __future__ import annotations
from dataclasses import dataclass
from functools import lru_cache
from pathlib import Path
from typing import Any
from tomlkit import parse as parse_toml
import json
import logging
import os
import re
@@ -22,6 +26,19 @@ STRICT_ENV_VALUES = {"1", "true", "yes", "on"}
extract_prompt_placeholders = extract_placeholders
@dataclass(frozen=True)
class PromptMetadata:
display_name: str = ""
advanced: bool = False
description: str = ""
@dataclass(frozen=True)
class PromptTemplateInfo:
path: Path
metadata: PromptMetadata
def get_prompts_root(prompts_root: Path | None = None) -> Path:
return (prompts_root or PROMPTS_ROOT).resolve()
@@ -80,24 +97,86 @@ def iter_prompt_files(directory: Path, recursive: bool = True) -> list[Path]:
def _raise_duplicate_prompt_name(name: str, first_path: Path, second_path: Path, prompts_root: Path) -> None:
path_a = first_path.relative_to(prompts_root).as_posix()
path_b = second_path.relative_to(prompts_root).as_posix()
raise ValueError(
t(
"prompt.duplicate_template_name",
name=name,
path_a=first_path.relative_to(prompts_root),
path_b=second_path.relative_to(prompts_root),
path_a=path_a,
path_b=path_b,
)
)
def _scan_prompt_directory(directory: Path, prompts_root: Path) -> dict[str, Path]:
prompt_paths: dict[str, Path] = {}
def _coerce_metadata(raw_metadata: Any) -> PromptMetadata:
if not isinstance(raw_metadata, dict):
return PromptMetadata()
display_name = raw_metadata.get("display_name", "")
advanced = raw_metadata.get("advanced", False)
description = raw_metadata.get("description", "")
return PromptMetadata(
display_name=display_name if isinstance(display_name, str) else "",
advanced=advanced if isinstance(advanced, bool) else False,
description=description if isinstance(description, str) else "",
)
def _read_metadata_file(metadata_path: Path) -> dict[str, Any]:
if not metadata_path.is_file():
return {}
try:
if metadata_path.suffix == ".json":
metadata = json.loads(metadata_path.read_text(encoding="utf-8"))
else:
metadata = parse_toml(metadata_path.read_text(encoding="utf-8"))
except Exception as exc:
logger.warning("读取 Prompt 元信息文件 %s 失败:%s", metadata_path, exc)
return {}
return dict(metadata) if isinstance(metadata, dict) else {}
def _extract_template_metadata(metadata: dict[str, Any], prompt_name: str) -> dict[str, Any]:
templates = metadata.get("templates")
if isinstance(templates, dict) and isinstance(templates.get(prompt_name), dict):
return dict(templates[prompt_name])
prompt_metadata = metadata.get(prompt_name)
if isinstance(prompt_metadata, dict):
return dict(prompt_metadata)
return metadata if any(key in metadata for key in ("display_name", "advanced", "description")) else {}
def _load_prompt_metadata(prompt_path: Path) -> PromptMetadata:
prompt_name = prompt_path.stem
metadata_sources = (
prompt_path.with_name(f"{prompt_name}.meta.toml"),
prompt_path.with_name(f"{prompt_name}.meta.json"),
prompt_path.parent / ".meta.toml",
prompt_path.parent / ".meta.json",
)
merged_metadata: dict[str, Any] = {}
for metadata_path in reversed(metadata_sources):
raw_metadata = _read_metadata_file(metadata_path)
merged_metadata.update(_extract_template_metadata(raw_metadata, prompt_name))
return _coerce_metadata(merged_metadata)
def _scan_prompt_directory(directory: Path, prompts_root: Path) -> dict[str, PromptTemplateInfo]:
prompt_paths: dict[str, PromptTemplateInfo] = {}
for prompt_path in iter_prompt_files(directory):
prompt_name = prompt_path.stem
existing_path = prompt_paths.get(prompt_name)
if existing_path is not None:
_raise_duplicate_prompt_name(prompt_name, existing_path, prompt_path, prompts_root)
prompt_paths[prompt_name] = prompt_path
existing_info = prompt_paths.get(prompt_name)
if existing_info is not None:
_raise_duplicate_prompt_name(prompt_name, existing_info.path, prompt_path, prompts_root)
prompt_paths[prompt_name] = PromptTemplateInfo(path=prompt_path, metadata=_load_prompt_metadata(prompt_path))
return prompt_paths
@@ -115,11 +194,11 @@ def _iter_locale_candidates(requested_locale: str) -> list[str]:
return locale_candidates
def list_prompt_templates(locale: str | None = None, prompts_root: Path | None = None) -> dict[str, Path]:
def list_prompt_templates(locale: str | None = None, prompts_root: Path | None = None) -> dict[str, PromptTemplateInfo]:
resolved_prompts_root = get_prompts_root(prompts_root)
requested_locale = normalize_locale(locale or get_locale())
prompt_paths: dict[str, Path] = {}
prompt_paths: dict[str, PromptTemplateInfo] = {}
for directory in _iter_prompt_template_layers(resolved_prompts_root, requested_locale):
prompt_paths.update(_scan_prompt_directory(directory, resolved_prompts_root))
@@ -149,7 +228,7 @@ def resolve_prompt_path(
else:
prompt_paths = list_prompt_templates(locale=requested_locale, prompts_root=resolved_prompts_root)
if normalized_name in prompt_paths:
return prompt_paths[normalized_name]
return prompt_paths[normalized_name].path
raise FileNotFoundError(t("prompt.template_not_found", locale=requested_locale, name=normalized_name))

View File

@@ -57,7 +57,7 @@ MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute(
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0-pre.11"
CONFIG_VERSION: str = "8.10.6"
CONFIG_VERSION: str = "8.10.7"
MODEL_CONFIG_VERSION: str = "1.15.3"
logger = get_logger("config")

View File

@@ -30,7 +30,7 @@ class ExampleConfig(ConfigBase):
class BotConfig(ConfigBase):
"""机器人配置类"""
__ui_label__ = "本信息"
__ui_label__ = ""
__ui_icon__ = "bot"
platform: str = Field(
@@ -40,6 +40,7 @@ class BotConfig(ConfigBase):
"x-icon": "wifi",
"x-layout": "inline-right",
"x-input-width": "12rem",
"x-row": "bot-platform-account",
},
)
"""平台"""
@@ -51,6 +52,7 @@ class BotConfig(ConfigBase):
"x-icon": "user",
"x-layout": "inline-right",
"x-input-width": "12rem",
"x-row": "bot-platform-account",
},
)
"""QQ账号"""
@@ -87,6 +89,7 @@ class BotConfig(ConfigBase):
class PersonalityConfig(ConfigBase):
"""人格配置类"""
__ui_parent__ = "bot"
__ui_label__ = "人格"
__ui_icon__ = "user-circle"
@@ -1299,16 +1302,6 @@ class EmojiConfig(ConfigBase):
)
"""是否启用表情包过滤,只有符合该要求的表情包才会被保存"""
filtration_prompt: str = Field(
default="符合公序良俗",
json_schema_extra={
"advanced": True,
"x-widget": "input",
"x-icon": "shield",
},
)
"""表情包过滤要求,只有符合该要求的表情包才会被保存"""
class KeywordRuleConfig(ConfigBase):
"""关键词规则配置类"""

View File

@@ -915,11 +915,10 @@ class EmojiManager:
# 表情包审查
if global_config.emoji.content_filtration:
try:
filtration_prompt_template = prompt_manager.get_prompt("emoji_content_filtration")
filtration_prompt_template.add_context("demand", global_config.emoji.filtration_prompt)
filtration_prompt = await prompt_manager.render_prompt(filtration_prompt_template)
review_prompt_template = prompt_manager.get_prompt("emoji_content_filtration")
review_prompt = await prompt_manager.render_prompt(review_prompt_template)
filtration_result = await emoji_manager_vlm.generate_response_for_image(
filtration_prompt,
review_prompt,
image_base64,
image_format,
)

View File

@@ -13,7 +13,7 @@ from PIL import Image as PILImage
from PIL import ImageDraw, ImageFont
from pydantic import BaseModel, Field as PydanticField
from src.emoji_system.emoji_manager import emoji_manager
from src.emoji_system.emoji_manager import _is_vlm_task_configured, emoji_manager
from src.emoji_system.maisaka_tool import send_emoji_for_maisaka
from src.common.data_models.image_data_model import MaiEmoji
from src.common.data_models.message_component_data_model import ImageComponent, MessageSequence, TextComponent
@@ -38,6 +38,7 @@ _EMOJI_SUB_AGENT_MAX_TOKENS = 240
_EMOJI_MAX_CANDIDATE_COUNT = 64
_EMOJI_CANDIDATE_TILE_SIZE = 256
_EMOJI_SUCCESS_MESSAGE = "表情包发送成功"
_EMOJI_VLM_NOT_CONFIGURED_MESSAGE = "错误,没有配置视觉模型,无法使用表情包功能"
class EmojiSelectionResult(BaseModel):
@@ -298,6 +299,13 @@ def _resolve_emoji_selector_model_task_name() -> str:
return "vlm"
def _is_missing_visual_model_error(exc: Exception) -> bool:
"""判断是否为未配置视觉模型导致的选择失败。"""
error_text = str(exc)
return _EMOJI_VLM_NOT_CONFIGURED_MESSAGE in error_text or "未找到名为 '' 的模型" in error_text
async def _select_emoji_with_sub_agent(
tool_ctx: BuiltinToolRuntimeContext,
reasoning: str,
@@ -351,13 +359,17 @@ async def _select_emoji_with_sub_agent(
request_messages.append(candidate_llm_message)
serialized_request_messages = serialize_prompt_messages(request_messages)
model_task_name = _resolve_emoji_selector_model_task_name()
if model_task_name == "vlm" and not _is_vlm_task_configured():
raise RuntimeError(_EMOJI_VLM_NOT_CONFIGURED_MESSAGE)
selection_started_at = datetime.now()
response = await tool_ctx.runtime.run_sub_agent(
context_message_limit=_EMOJI_SUB_AGENT_CONTEXT_LIMIT,
system_prompt=system_prompt,
extra_messages=[prompt_message, candidate_message],
max_tokens=_EMOJI_SUB_AGENT_MAX_TOKENS,
model_task_name=_resolve_emoji_selector_model_task_name(),
model_task_name=model_task_name,
)
selection_duration_ms = round((datetime.now() - selection_started_at).total_seconds() * 1000, 2)
@@ -448,7 +460,10 @@ async def handle_tool(
)
except Exception as exc:
logger.exception(f"{tool_ctx.runtime.log_prefix} 发送表情包时发生异常: {exc}")
structured_result["message"] = f"发送表情包时发生异常:{exc}"
if _is_missing_visual_model_error(exc):
structured_result["message"] = _EMOJI_VLM_NOT_CONFIGURED_MESSAGE
else:
structured_result["message"] = f"发送表情包时发生异常:{exc}"
return tool_ctx.build_failure_result(
invocation.tool_name,
structured_result["message"],

View File

@@ -274,12 +274,12 @@ class PromptManager:
Exception: 如果在加载过程中出现任何文件操作错误则引发该异常
"""
prompt_templates = list_prompt_templates(prompts_root=PROMPTS_DIR)
for prompt_name, prompt_file in prompt_templates.items():
for prompt_name, prompt_template in prompt_templates.items():
try:
template, need_save = self._load_prompt_template(prompt_name)
self.add_prompt(Prompt(prompt_name=prompt_name, template=template), need_save=need_save)
except Exception as exc:
logger.error(f"加载 Prompt 文件 '{prompt_file}' 时出错,错误信息: {exc}")
logger.error(f"加载 Prompt 文件 '{prompt_template.path}' 时出错,错误信息: {exc}")
raise
for prompt_file in CUSTOM_PROMPTS_DIR.glob(f"*{SUFFIX_PROMPT}"):
if prompt_file.stem in prompt_templates:

View File

@@ -134,7 +134,7 @@ def _setup_anti_crawler(app: FastAPI):
"basic": t("startup.webui_anti_crawler_mode_basic"),
}
mode_desc = mode_descriptions.get(anti_crawler_mode, t("startup.webui_anti_crawler_mode_basic"))
logger.info(t("startup.webui_anti_crawler_configured", mode_desc=mode_desc))
logger.debug(t("startup.webui_anti_crawler_configured", mode_desc=mode_desc))
except Exception as e:
logger.error(t("startup.webui_anti_crawler_config_failed", error=e), exc_info=True)
@@ -159,7 +159,7 @@ def _register_api_routes(app: FastAPI):
for router in get_all_routers():
app.include_router(router)
logger.info(t("startup.webui_api_routes_registered"))
logger.debug(t("startup.webui_api_routes_registered"))
except Exception as e:
logger.error(t("startup.webui_api_routes_register_failed", error=e), exc_info=True)
@@ -217,7 +217,7 @@ def _setup_static_files(app: FastAPI):
response.headers["X-Robots-Tag"] = "noindex, nofollow, noarchive"
return response
logger.info(t("startup.webui_static_files_configured", static_path=static_path))
logger.debug(t("startup.webui_static_files_configured", static_path=static_path))
def _resolve_static_path() -> Path | None:
@@ -247,6 +247,5 @@ def show_access_token():
token_manager = get_token_manager()
current_token = token_manager.get_token()
logger.info(t("startup.webui_access_token", token=current_token))
logger.info(t("startup.webui_access_token_login_hint"))
except Exception as e:
logger.error(t("startup.webui_access_token_failed", error=e))

View File

@@ -14,6 +14,7 @@ from pydantic import BaseModel, Field
import tomlkit
from src.common.logger import get_logger
from src.common.prompt_i18n import list_prompt_templates
from src.config.config import CONFIG_DIR, PROJECT_ROOT, Config, ModelConfig
from src.config.config_base import AttributeData, ConfigBase
from src.config.model_configs import (
@@ -64,6 +65,9 @@ class PromptFileInfo(BaseModel):
name: str = Field(..., description="Prompt 文件名")
size: int = Field(..., description="文件大小")
modified_at: float = Field(..., description="最后修改时间戳")
display_name: str = Field(default="", description="Prompt 展示名称")
advanced: bool = Field(default=False, description="是否为高级 Prompt")
description: str = Field(default="", description="Prompt 描述")
class PromptCatalogResponse(BaseModel):
@@ -213,14 +217,20 @@ async def list_prompt_files():
continue
language = language_dir.name
prompt_template_infos = list_prompt_templates(locale=language, prompts_root=PROMPTS_DIR)
prompt_files: List[PromptFileInfo] = []
for prompt_file in sorted(language_dir.glob("*.prompt"), key=lambda item: item.name):
stat = prompt_file.stat()
template_info = prompt_template_infos.get(prompt_file.stem)
metadata = template_info.metadata if template_info and template_info.path == prompt_file else None
prompt_files.append(
PromptFileInfo(
name=prompt_file.name,
size=stat.st_size,
modified_at=stat.st_mtime,
display_name=metadata.display_name if metadata else "",
advanced=metadata.advanced if metadata else False,
description=metadata.description if metadata else "",
)
)

View File

@@ -15,6 +15,7 @@ from src.common.logger import get_logger
from src.webui.dependencies import require_auth
logger = get_logger("webui.expression")
EXCLUDE_IDS_QUERY = Query(None, description="需要排除的表达方式 ID")
# 创建路由器
router = APIRouter(prefix="/expression", tags=["Expression"], dependencies=[Depends(require_auth)])
@@ -660,8 +661,10 @@ async def get_review_list(
page: int = Query(1, ge=1, description="页码"),
page_size: int = Query(20, ge=1, le=100, description="每页数量"),
filter_type: str = Query("unchecked", description="筛选类型: unchecked/passed/rejected/all"),
order: str = Query("latest", description="排序方式: latest/random"),
search: Optional[str] = Query(None, description="搜索关键词"),
chat_id: Optional[str] = Query(None, description="聊天ID筛选"),
exclude_ids: Optional[List[int]] = EXCLUDE_IDS_QUERY,
) -> ReviewListResponse:
"""获取待审核或已审核的表达方式列表。
@@ -669,8 +672,10 @@ async def get_review_list(
page: 页码。
page_size: 每页数量。
filter_type: 筛选类型,可选 unchecked、passed、rejected 或 all。
order: 排序方式,可选 latest 或 random。
search: 搜索关键词。
chat_id: 聊天 ID 筛选条件。
exclude_ids: 需要排除的表达方式 ID。
Returns:
ReviewListResponse: 审核列表响应。
@@ -689,11 +694,17 @@ async def get_review_list(
if chat_id:
statement = statement.where(col(Expression.session_id) == chat_id)
# 排序:创建时间倒序
statement = statement.order_by(
case((col(Expression.create_time).is_(None), 1), else_=0),
col(Expression.create_time).desc(),
)
if exclude_ids:
statement = statement.where(~col(Expression.id).in_(exclude_ids))
if order == "random":
statement = statement.order_by(func.random())
else:
# 排序:创建时间倒序
statement = statement.order_by(
case((col(Expression.create_time).is_(None), 1), else_=0),
col(Expression.create_time).desc(),
)
offset = (page - 1) * page_size
statement = statement.offset(offset).limit(page_size)
@@ -731,7 +742,7 @@ class BatchReviewItem(BaseModel):
id: int
rejected: bool
require_unchecked: bool = True # 默认要求未检查状态
require_unchecked: bool = True # 前端保留的来源标记,人工审核提交时不再阻断覆盖
class BatchReviewRequest(BaseModel):
@@ -790,14 +801,6 @@ async def batch_review_expressions(
failed += 1
continue
# 冲突检测:未审核列表发起的操作只允许处理仍处于未审核状态的条目。
if item.require_unchecked and expression.checked:
results.append(
BatchReviewResultItem(id=item.id, success=False, message="该表达方式已被审核,请刷新列表后重试")
)
failed += 1
continue
# 更新状态
with get_db_session() as session:
db_expression = session.exec(