-
-
- {renderSectionIcon(nestedSchema.uiIcon)}
-
{sectionTitle}
+
+
+
+
+
{sectionTitle}
+
+ {sectionDescription && (
+
+ {sectionDescription}
+
+ )}
- {sectionDescription && (
-
- {sectionDescription}
-
- )}
= ({
return (
- {/* Label with icon */}
-
+
+ {/* Label with icon */}
+
+
+ {/* Description */}
+ {schema.description && (
+
{schema.description}
+ )}
+
{/* Input component */}
{renderInputComponent()}
-
- {/* Description */}
- {schema.description && (
-
{schema.description}
- )}
)
}
diff --git a/dashboard/src/components/layout/constants.ts b/dashboard/src/components/layout/constants.ts
index 7de21c3d..f908fd0f 100644
--- a/dashboard/src/components/layout/constants.ts
+++ b/dashboard/src/components/layout/constants.ts
@@ -1,4 +1,4 @@
-import { Activity, Boxes, Database, FileSearch, FileText, Hash, Home, LayoutGrid, MessageSquare, Network, Package, Server, Settings, Sliders, Smile, UserCircle } from 'lucide-react'
+import { Activity, Boxes, Database, FileSearch, FileText, Hash, Home, MessageSquare, Network, Package, Server, Settings, Sliders, Smile, UserCircle } from 'lucide-react'
import type { MenuSection } from './types'
@@ -15,7 +15,6 @@ export const menuSections: MenuSection[] = [
{ icon: FileText, label: 'sidebar.menu.botMainConfig', path: '/config/bot', searchDescription: 'search.items.botConfigDesc' },
{ icon: Server, label: 'sidebar.menu.aiModelProvider', path: '/config/modelProvider', searchDescription: 'search.items.modelProviderDesc', tourId: 'sidebar-model-provider' },
{ icon: Boxes, label: 'sidebar.menu.modelManagement', path: '/config/model', searchDescription: 'search.items.modelDesc', tourId: 'sidebar-model-management' },
- { icon: Sliders, label: 'sidebar.menu.adapterConfig', path: '/config/adapter' },
],
},
{
@@ -33,7 +32,6 @@ export const menuSections: MenuSection[] = [
title: 'sidebar.groups.extensionsMonitor',
items: [
{ icon: Package, label: 'sidebar.menu.pluginMarket', path: '/plugins', searchDescription: 'search.items.pluginsDesc' },
- { icon: LayoutGrid, label: 'sidebar.menu.configTemplate', path: '/config/pack-market' },
{ icon: Sliders, label: 'sidebar.menu.pluginConfig', path: '/plugin-config' },
{ icon: FileSearch, label: 'sidebar.menu.logViewer', path: '/logs', searchDescription: 'search.items.logsDesc' },
{ icon: Activity, label: 'sidebar.menu.maisakaMonitor', path: '/planner-monitor' },
diff --git a/dashboard/src/i18n/locales/en.json b/dashboard/src/i18n/locales/en.json
index a3bc03cf..3a7c43a9 100644
--- a/dashboard/src/i18n/locales/en.json
+++ b/dashboard/src/i18n/locales/en.json
@@ -500,17 +500,13 @@
"title": "Personality",
"description": "Define the bot's personality and speaking style"
},
- "emoji": {
- "title": "Emoji",
- "description": "Configure emoji-related settings"
- },
- "other": {
- "title": "Other Settings",
- "description": "Configure global slang and other basic options"
- },
- "siliconFlow": {
+ "apiProvider": {
"title": "API Setup",
- "description": "Configure the SiliconFlow API key"
+ "description": "Configure the API provider"
+ },
+ "modelSetup": {
+ "title": "Model Setup",
+ "description": "Configure planner and replyer models"
}
},
"loading": {
@@ -528,7 +524,12 @@
"selectPlatform": "Please select a platform",
"enterNickname": "Please enter a nickname",
"enterQqAccount": "Please enter a QQ account",
- "enterAccountId": "Please enter an account ID"
+ "enterAccountId": "Please enter an account ID",
+ "enterProviderName": "Please enter an API provider name",
+ "enterBaseUrl": "Please enter the API base URL",
+ "enterApiKey": "Please enter the API key",
+ "enterPlannerModelIdentifier": "Please enter the planner model identifier",
+ "enterReplyerModelIdentifier": "Please enter the replyer model identifier"
},
"toast": {
"loadFailedTitle": "Failed to load configuration",
@@ -667,33 +668,43 @@
"description": "Allow the bot to learn and use group-specific slang"
}
},
- "siliconFlow": {
- "about": {
- "title": "About SiliconFlow",
- "description": "SiliconFlow provides broad model coverage, including DeepSeek V3, Qwen, vision models, speech recognition, and embedding models. A single API key unlocks all MaiBot features.",
- "link": "Get an API key from SiliconFlow"
+ "apiProvider": {
+ "providerName": {
+ "label": "API Provider Name *",
+ "placeholder": "For example OpenAI, DeepSeek, or self-hosted",
+ "description": "This name is written to model_config.toml and referenced by the models below"
+ },
+ "baseUrl": {
+ "label": "API Base URL *",
+ "description": "Enter an OpenAI-compatible endpoint, for example https://api.example.com/v1"
},
"apiKey": {
- "label": "SiliconFlow API Key *",
- "description": "Enter your SiliconFlow API key. Once provided, MaiBot will automatically configure all required models.",
+ "label": "API Key *",
+ "description": "Enter the API key for this provider",
"show": "Show API key",
"hide": "Hide API key"
- },
- "autoConfig": {
- "title": "The following models will be configured automatically:",
- "items": {
- "deepseek": "DeepSeek V3 - primary chat and tool model",
- "qwen3": "Qwen3 30B - frequent small tasks and tool calls",
- "qwen3Vl": "Qwen3 VL 30B - image recognition",
- "senseVoice": "SenseVoice - speech recognition",
- "bgeM3": "BGE-M3 - text embeddings",
- "lpmm": "Knowledge-base-related models (LPMM)"
+ }
+ },
+ "modelSetup": {
+ "planner": {
+ "identifier": {
+ "label": "planner Model Identifier *",
+ "description": "The real model ID provided by the API service; the model name will be initialized from it"
+ },
+ "visual": {
+ "label": "Enable vision"
}
},
- "hint": {
- "title": "Tip: ",
- "description": "After finishing the wizard, you can add more API providers and models in \"System Settings -> Model Config\"."
- }
+ "replyer": {
+ "identifier": {
+ "label": "replyer Model Identifier *",
+ "description": "The real model ID provided by the API service; the model name will be initialized from it"
+ },
+ "visual": {
+ "label": "Enable vision"
+ }
+ },
+ "saveHint": "You can configure more detailed task assignment later."
}
}
},
diff --git a/dashboard/src/i18n/locales/ja.json b/dashboard/src/i18n/locales/ja.json
index 3dd08798..c011e655 100644
--- a/dashboard/src/i18n/locales/ja.json
+++ b/dashboard/src/i18n/locales/ja.json
@@ -500,17 +500,13 @@
"title": "人格設定",
"description": "ボットの性格や話し方を定義します"
},
- "emoji": {
- "title": "絵文字パック",
- "description": "絵文字パック関連の設定を行います"
- },
- "other": {
- "title": "その他の設定",
- "description": "グローバルスラングなどの基本オプションを設定します"
- },
- "siliconFlow": {
+ "apiProvider": {
"title": "API設定",
- "description": "SiliconFlow API キーを設定します"
+ "description": "APIプロバイダーを設定します"
+ },
+ "modelSetup": {
+ "title": "モデル設定",
+ "description": "planner と replyer モデルを設定します"
}
},
"loading": {
@@ -528,7 +524,12 @@
"selectPlatform": "プラットフォームを選択してください",
"enterNickname": "ニックネームを入力してください",
"enterQqAccount": "QQ アカウントを入力してください",
- "enterAccountId": "アカウント ID を入力してください"
+ "enterAccountId": "アカウント ID を入力してください",
+ "enterProviderName": "APIプロバイダー名を入力してください",
+ "enterBaseUrl": "API Base URL を入力してください",
+ "enterApiKey": "API Key を入力してください",
+ "enterPlannerModelIdentifier": "planner モデル識別子を入力してください",
+ "enterReplyerModelIdentifier": "replyer モデル識別子を入力してください"
},
"toast": {
"loadFailedTitle": "設定の読み込みに失敗しました",
@@ -667,33 +668,43 @@
"description": "グループ内のスラングを学習して使えるようにします"
}
},
- "siliconFlow": {
- "about": {
- "title": "SiliconFlow について",
- "description": "SiliconFlow は DeepSeek V3、Qwen、ビジョンモデル、音声認識、埋め込みモデルなど幅広いモデルを提供します。API Key が1つあれば MaiBot の全機能を利用できます。",
- "link": "SiliconFlow で API Key を取得する"
+ "apiProvider": {
+ "providerName": {
+ "label": "APIプロバイダー名 *",
+ "placeholder": "例: OpenAI、DeepSeek、自ホストサービス",
+ "description": "この名前は model_config.toml に保存され、下のモデルから参照されます"
+ },
+ "baseUrl": {
+ "label": "API Base URL *",
+ "description": "OpenAI互換エンドポイントを入力してください。例: https://api.example.com/v1"
},
"apiKey": {
- "label": "SiliconFlow API Key *",
- "description": "SiliconFlow の API Key を入力してください。入力後、MaiBot が必要なモデルを自動設定します。",
+ "label": "API Key *",
+ "description": "このプロバイダーの API Key を入力してください",
"show": "API Key を表示",
- "hide": "API Key を隠す"
- },
- "autoConfig": {
- "title": "以下のモデルが自動設定されます:",
- "items": {
- "deepseek": "DeepSeek V3 - メインの会話・ツールモデル",
- "qwen3": "Qwen3 30B - 頻繁な小タスクとツール呼び出し",
- "qwen3Vl": "Qwen3 VL 30B - 画像認識",
- "senseVoice": "SenseVoice - 音声認識",
- "bgeM3": "BGE-M3 - テキスト埋め込み",
- "lpmm": "知識ベース関連モデル (LPMM)"
+ "hide": "API Key を非表示"
+ }
+ },
+ "modelSetup": {
+ "planner": {
+ "identifier": {
+ "label": "planner モデル識別子 *",
+ "description": "APIサービスが提供する実際のモデルID。モデル名はこの識別子で初期化されます"
+ },
+ "visual": {
+ "label": "ビジョンを有効化"
}
},
- "hint": {
- "title": "ヒント:",
- "description": "ウィザード完了後は、「システム設定 -> モデル設定」でさらに API プロバイダーやモデルを追加できます。"
- }
+ "replyer": {
+ "identifier": {
+ "label": "replyer モデル識別子 *",
+ "description": "APIサービスが提供する実際のモデルID。モデル名はこの識別子で初期化されます"
+ },
+ "visual": {
+ "label": "ビジョンを有効化"
+ }
+ },
+ "saveHint": "より詳細なタスク割り当ては後で設定できます。"
}
}
},
diff --git a/dashboard/src/i18n/locales/ko.json b/dashboard/src/i18n/locales/ko.json
index 5a6f294d..7bb1487a 100644
--- a/dashboard/src/i18n/locales/ko.json
+++ b/dashboard/src/i18n/locales/ko.json
@@ -500,17 +500,13 @@
"title": "성격 설정",
"description": "봇의 성격과 말투를 정의합니다"
},
- "emoji": {
- "title": "이모지 팩",
- "description": "이모지 관련 설정을 구성합니다"
- },
- "other": {
- "title": "기타 설정",
- "description": "전역 슬랭 등 기본 옵션을 설정합니다"
- },
- "siliconFlow": {
+ "apiProvider": {
"title": "API 설정",
- "description": "SiliconFlow API 키를 설정합니다"
+ "description": "API 제공자를 설정합니다"
+ },
+ "modelSetup": {
+ "title": "모델 설정",
+ "description": "planner와 replyer 모델을 설정합니다"
}
},
"loading": {
@@ -528,7 +524,12 @@
"selectPlatform": "플랫폼을 선택해 주세요",
"enterNickname": "닉네임을 입력해 주세요",
"enterQqAccount": "QQ 계정을 입력해 주세요",
- "enterAccountId": "계정 ID를 입력해 주세요"
+ "enterAccountId": "계정 ID를 입력해 주세요",
+ "enterProviderName": "API 제공자 이름을 입력해 주세요",
+ "enterBaseUrl": "API Base URL을 입력해 주세요",
+ "enterApiKey": "API Key를 입력해 주세요",
+ "enterPlannerModelIdentifier": "planner 모델 식별자를 입력해 주세요",
+ "enterReplyerModelIdentifier": "replyer 모델 식별자를 입력해 주세요"
},
"toast": {
"loadFailedTitle": "설정 불러오기에 실패했습니다",
@@ -667,33 +668,43 @@
"description": "봇이 그룹 슬랭을 학습하고 사용할 수 있게 합니다"
}
},
- "siliconFlow": {
- "about": {
- "title": "SiliconFlow 소개",
- "description": "SiliconFlow 는 DeepSeek V3, Qwen, 비전 모델, 음성 인식, 임베딩 모델 등 폭넓은 모델을 제공합니다. API Key 하나로 MaiBot 의 모든 기능을 사용할 수 있습니다.",
- "link": "SiliconFlow 에서 API Key 받기"
+ "apiProvider": {
+ "providerName": {
+ "label": "API 제공자 이름 *",
+ "placeholder": "예: OpenAI, DeepSeek, 자체 호스팅",
+ "description": "이 이름은 model_config.toml에 저장되며 아래 모델에서 참조됩니다"
+ },
+ "baseUrl": {
+ "label": "API Base URL *",
+ "description": "OpenAI 호환 엔드포인트를 입력해 주세요. 예: https://api.example.com/v1"
},
"apiKey": {
- "label": "SiliconFlow API Key *",
- "description": "SiliconFlow API Key를 입력해 주세요. 입력하면 MaiBot 이 필요한 모델을 자동으로 구성합니다.",
+ "label": "API Key *",
+ "description": "이 제공자의 API Key를 입력해 주세요",
"show": "API Key 표시",
"hide": "API Key 숨기기"
- },
- "autoConfig": {
- "title": "다음 모델이 자동으로 구성됩니다:",
- "items": {
- "deepseek": "DeepSeek V3 - 주요 대화 및 도구 모델",
- "qwen3": "Qwen3 30B - 잦은 소규모 작업과 도구 호출",
- "qwen3Vl": "Qwen3 VL 30B - 이미지 인식",
- "senseVoice": "SenseVoice - 음성 인식",
- "bgeM3": "BGE-M3 - 텍스트 임베딩",
- "lpmm": "지식 베이스 관련 모델 (LPMM)"
+ }
+ },
+ "modelSetup": {
+ "planner": {
+ "identifier": {
+ "label": "planner 모델 식별자 *",
+ "description": "API 서비스가 제공하는 실제 모델 ID입니다. 모델 이름은 이 식별자로 초기화됩니다"
+ },
+ "visual": {
+ "label": "비전 사용"
}
},
- "hint": {
- "title": "팁: ",
- "description": "마법사를 마친 뒤에는 \"시스템 설정 -> 모델 설정\"에서 더 많은 API 제공자와 모델을 추가할 수 있습니다."
- }
+ "replyer": {
+ "identifier": {
+ "label": "replyer 모델 식별자 *",
+ "description": "API 서비스가 제공하는 실제 모델 ID입니다. 모델 이름은 이 식별자로 초기화됩니다"
+ },
+ "visual": {
+ "label": "비전 사용"
+ }
+ },
+ "saveHint": "더 자세한 작업 할당은 나중에 설정할 수 있습니다."
}
}
},
diff --git a/dashboard/src/i18n/locales/zh.json b/dashboard/src/i18n/locales/zh.json
index 4325f589..d15fc01a 100644
--- a/dashboard/src/i18n/locales/zh.json
+++ b/dashboard/src/i18n/locales/zh.json
@@ -500,17 +500,13 @@
"title": "人格配置",
"description": "定义机器人的性格和说话风格"
},
- "emoji": {
- "title": "表情包",
- "description": "配置表情包相关设置"
- },
- "other": {
- "title": "其他设置",
- "description": "配置全局黑话等基础选项"
- },
- "siliconFlow": {
+ "apiProvider": {
"title": "API配置",
- "description": "配置硅基流动 API 密钥"
+ "description": "配置 API 提供商"
+ },
+ "modelSetup": {
+ "title": "模型配置",
+ "description": "配置 planner 和 replyer 模型"
}
},
"loading": {
@@ -528,7 +524,12 @@
"selectPlatform": "请选择平台",
"enterNickname": "请输入昵称",
"enterQqAccount": "请输入 QQ 账号",
- "enterAccountId": "请输入账号 ID"
+ "enterAccountId": "请输入账号 ID",
+ "enterProviderName": "请输入 API 提供商名称",
+ "enterBaseUrl": "请输入 API Base URL",
+ "enterApiKey": "请输入 API Key",
+ "enterPlannerModelIdentifier": "请输入 planner 模型标识符",
+ "enterReplyerModelIdentifier": "请输入 replyer 模型标识符"
},
"toast": {
"loadFailedTitle": "加载配置失败",
@@ -559,8 +560,8 @@
"footer": "您可以随时在设置中修改这些配置",
"defaults": {
"personality": {
- "personality": "是一个女大学生,现在在读大二,会刷贴吧。",
- "replyStyle": "请回复得平淡一些,简短一些,说中文,不要刻意突出自身学科背景。可以参考贴吧、知乎和微博的回复风格。",
+ "personality": "是一个大二女大学生,现在正在上网和群友聊天。有轻微攻击性",
+ "replyStyle": "你的风格平淡简短。可以参考贴吧,知乎和微博的回复风格。不浮夸不长篇大论,不要过分修辞和复杂句。",
"multipleReplyStyles": {
"plain": "你的风格平淡但不失讽刺,很简短,很白话。可以参考贴吧、微博的回复风格。",
"shortText": "用1-2个字进行回复",
@@ -577,7 +578,7 @@
"platform": {
"label": "平台 *",
"placeholder": "请选择平台",
- "description": "选择机器人运行的平台",
+ "description": "选择麦麦Bot运行的平台",
"options": {
"custom": "其他平台"
}
@@ -589,7 +590,7 @@
"qqAccount": {
"label": "QQ账号 *",
"placeholder": "请输入机器人的 QQ 账号",
- "description": "机器人登录使用的 QQ 账号"
+ "description": "运行麦麦Bot的 QQ 账号"
},
"primaryAccount": {
"label": "账号 ID *",
@@ -599,7 +600,7 @@
"nickname": {
"label": "昵称 *",
"placeholder": "请输入机器人的昵称",
- "description": "机器人的主要称呼名称"
+ "description": "麦麦Bot的名称"
},
"alias": {
"label": "别名",
@@ -667,33 +668,43 @@
"description": "允许机器人学习和使用群组黑话"
}
},
- "siliconFlow": {
- "about": {
- "title": "关于硅基流动 (SiliconFlow)",
- "description": "硅基流动提供了完整的模型覆盖,包括 DeepSeek V3、Qwen、视觉模型、语音识别和嵌入模型。只需一个 API Key 即可使用麦麦的所有功能!",
- "link": "前往硅基流动获取 API Key"
+ "apiProvider": {
+ "providerName": {
+ "label": "API 提供商名称 *",
+ "placeholder": "例如 OpenAI、DeepSeek、自建服务",
+ "description": "为api提供商命名"
+ },
+ "baseUrl": {
+ "label": "API Base URL *",
+ "description": "请填写 OpenAI 兼容接口地址,例如 https://api.example.com/v1"
},
"apiKey": {
- "label": "SiliconFlow API Key *",
- "description": "请输入您的硅基流动 API 密钥。获取后,麦麦将自动配置所有必需的模型。",
+ "label": "API Key *",
+ "description": "请填写该提供商的 API Key",
"show": "显示 API Key",
"hide": "隐藏 API Key"
- },
- "autoConfig": {
- "title": "将自动配置以下模型:",
- "items": {
- "deepseek": "DeepSeek V3 - 主要对话和工具模型",
- "qwen3": "Qwen3 30B - 高频小任务和工具调用",
- "qwen3Vl": "Qwen3 VL 30B - 图像识别",
- "senseVoice": "SenseVoice - 语音识别",
- "bgeM3": "BGE-M3 - 文本嵌入",
- "lpmm": "知识库相关模型 (LPMM)"
+ }
+ },
+ "modelSetup": {
+ "planner": {
+ "identifier": {
+ "label": "planner 模型标识符 *",
+ "description": "API 服务商提供的真实模型 ID,模型名称会自动初始化为该标识符"
+ },
+ "visual": {
+ "label": "启用视觉"
}
},
- "hint": {
- "title": "💡 提示:",
- "description": "完成向导后,您可以在“系统设置 -> 模型配置”中添加更多 API 提供商和模型。"
- }
+ "replyer": {
+ "identifier": {
+ "label": "replyer 模型标识符 *",
+ "description": "API 服务商提供的真实模型 ID,模型名称会自动初始化为该标识符"
+ },
+ "visual": {
+ "label": "启用视觉"
+ }
+ },
+ "saveHint": "你可以稍后配置更详细的任务分配。"
}
}
},
diff --git a/dashboard/src/lib/maisaka-monitor-client.ts b/dashboard/src/lib/maisaka-monitor-client.ts
index 76d3f972..065cf97a 100644
--- a/dashboard/src/lib/maisaka-monitor-client.ts
+++ b/dashboard/src/lib/maisaka-monitor-client.ts
@@ -26,6 +26,10 @@ export interface MaisakaToolCall {
export interface SessionStartEvent {
session_id: string
session_name: string
+ is_group_chat?: boolean
+ group_id?: string | null
+ user_id?: string | null
+ platform?: string
timestamp: number
}
diff --git a/dashboard/src/lib/plugin-api/marketplace.ts b/dashboard/src/lib/plugin-api/marketplace.ts
index a7054088..0842a91a 100644
--- a/dashboard/src/lib/plugin-api/marketplace.ts
+++ b/dashboard/src/lib/plugin-api/marketplace.ts
@@ -35,6 +35,12 @@ interface PluginApiResponse {
}
homepage_url?: string
repository_url?: string
+ urls?: {
+ repository?: string
+ homepage?: string
+ documentation?: string
+ issues?: string
+ }
keywords: string[]
categories?: string[]
default_locale: string
@@ -44,6 +50,28 @@ interface PluginApiResponse {
[key: string]: unknown
}
+function normalizePluginManifest(manifest: PluginApiResponse['manifest']): PluginInfo['manifest'] {
+ const repositoryUrl = manifest.repository_url || manifest.urls?.repository
+ const homepageUrl = manifest.homepage_url || manifest.urls?.homepage
+
+ return {
+ manifest_version: manifest.manifest_version || 1,
+ name: manifest.name,
+ version: manifest.version,
+ description: manifest.description || '',
+ author: manifest.author || { name: 'Unknown' },
+ license: manifest.license || 'Unknown',
+ host_application: manifest.host_application || { min_version: '0.0.0' },
+ homepage_url: homepageUrl,
+ repository_url: repositoryUrl,
+ urls: manifest.urls,
+ keywords: manifest.keywords || [],
+ categories: manifest.categories || [],
+ default_locale: manifest.default_locale || 'zh-CN',
+ locales_path: manifest.locales_path,
+ }
+}
+
/**
* 从远程获取插件列表(通过后端代理避免 CORS)
*/
@@ -88,21 +116,7 @@ export async function fetchPluginList(): Promise> {
})
.map((item) => ({
id: item.id,
- manifest: {
- manifest_version: item.manifest.manifest_version || 1,
- name: item.manifest.name,
- version: item.manifest.version,
- description: item.manifest.description || '',
- author: item.manifest.author || { name: 'Unknown' },
- license: item.manifest.license || 'Unknown',
- host_application: item.manifest.host_application || { min_version: '0.0.0' },
- homepage_url: item.manifest.homepage_url,
- repository_url: item.manifest.repository_url,
- keywords: item.manifest.keywords || [],
- categories: item.manifest.categories || [],
- default_locale: item.manifest.default_locale || 'zh-CN',
- locales_path: item.manifest.locales_path,
- },
+ manifest: normalizePluginManifest(item.manifest),
downloads: 0,
rating: 0,
review_count: 0,
diff --git a/dashboard/src/routes/index.tsx b/dashboard/src/routes/index.tsx
index 30a88c3d..86555e85 100644
--- a/dashboard/src/routes/index.tsx
+++ b/dashboard/src/routes/index.tsx
@@ -29,6 +29,7 @@ import {
} from 'recharts'
import {
Activity,
+ BarChart3,
TrendingUp,
DollarSign,
Clock,
@@ -45,6 +46,7 @@ import {
AlertCircle,
ClipboardList,
ClipboardCheck,
+ ExternalLink,
} from 'lucide-react'
import { Button } from '@/components/ui/button'
import { Badge } from '@/components/ui/badge'
@@ -566,6 +568,13 @@ function IndexPageContent() {
{t('home.quickActions.systemSettings')}
+
diff --git a/dashboard/src/routes/monitor/maisaka-monitor.tsx b/dashboard/src/routes/monitor/maisaka-monitor.tsx
index 8591c3eb..84acfd24 100644
--- a/dashboard/src/routes/monitor/maisaka-monitor.tsx
+++ b/dashboard/src/routes/monitor/maisaka-monitor.tsx
@@ -104,10 +104,17 @@ function SessionSidebar({
)}
>
-
- {session.sessionName}
-
-
+
+ {session.isGroupChat !== undefined && (
+
+ {session.isGroupChat ? '群' : '私'}
+
+ )}
+
+ {session.sessionName}
+
+
+
{session.eventCount}
diff --git a/dashboard/src/routes/monitor/use-maisaka-monitor.ts b/dashboard/src/routes/monitor/use-maisaka-monitor.ts
index 3ccf705d..bba94207 100644
--- a/dashboard/src/routes/monitor/use-maisaka-monitor.ts
+++ b/dashboard/src/routes/monitor/use-maisaka-monitor.ts
@@ -26,6 +26,10 @@ export interface TimelineEntry {
export interface SessionInfo {
sessionId: string
sessionName: string
+ isGroupChat?: boolean
+ groupId?: string | null
+ userId?: string | null
+ platform?: string
lastActivity: number
eventCount: number
}
@@ -33,18 +37,62 @@ export interface SessionInfo {
/** 最大保留的时间线条目数 */
const MAX_TIMELINE_ENTRIES = 500
+function resolveSessionDisplayName({
+ fallbackName,
+ groupId,
+ isGroupChat,
+ sessionId,
+ userId,
+}: {
+ fallbackName?: string
+ groupId?: string | null
+ isGroupChat?: boolean
+ sessionId: string
+ userId?: string | null
+}) {
+ const targetId = isGroupChat ? groupId : userId
+ const normalizedName = fallbackName?.trim()
+
+ if (targetId && normalizedName?.endsWith(`(${targetId})`)) {
+ return normalizedName
+ }
+ if (normalizedName && targetId && normalizedName !== targetId && normalizedName !== sessionId) {
+ return `${normalizedName}(${targetId})`
+ }
+ if (isGroupChat && groupId) {
+ return groupId
+ }
+ if (!isGroupChat && userId) {
+ return userId
+ }
+ return fallbackName || sessionId.slice(0, 8)
+}
+
let entryCounter = 0
+let cachedTimeline: TimelineEntry[] = []
+let cachedSessions: Map
= new Map()
+let cachedSelectedSession: string | null = null
export function useMaisakaMonitor() {
- const [timeline, setTimeline] = useState([])
- const [sessions, setSessions] = useState
)
}
-// ====== 步骤3:表情包配置 ======
-interface EmojiFormProps {
- config: EmojiConfig
- onChange: (config: EmojiConfig) => void
+// ====== 步骤3:API 提供商配置 ======
+interface ApiProviderSetupFormProps {
+ config: ApiProviderSetupConfig
+ onChange: (config: ApiProviderSetupConfig) => void
}
-export function EmojiForm({ config, onChange }: EmojiFormProps) {
- const { t } = useTranslation()
-
- return (
-
-
-
-
onChange({ ...config, emoji_send_num: Number(e.target.value) })}
- />
-
- {t('setupPage.forms.emoji.emojiSendNum.description')}
-
-
-
-
-
-
onChange({ ...config, max_reg_num: Number(e.target.value) })}
- />
-
- {t('setupPage.forms.emoji.maxRegNum.description')}
-
-
-
-
-
-
-
- {t('setupPage.forms.emoji.doReplace.description')}
-
-
-
onChange({ ...config, do_replace: checked })}
- />
-
-
-
-
-
onChange({ ...config, check_interval: Number(e.target.value) })}
- />
-
- {t('setupPage.forms.emoji.checkInterval.description')}
-
-
-
-
-
-
-
-
-
- {t('setupPage.forms.emoji.stealEmoji.description')}
-
-
-
onChange({ ...config, steal_emoji: checked })}
- />
-
-
-
-
-
-
- {t('setupPage.forms.emoji.contentFiltration.description')}
-
-
-
onChange({ ...config, content_filtration: checked })}
- />
-
-
- {config.content_filtration && (
-
-
-
onChange({ ...config, filtration_prompt: e.target.value })}
- />
-
- {t('setupPage.forms.emoji.filtrationPrompt.description')}
-
-
- )}
-
- )
-}
-
-// ====== 步骤4:其他基础配置 ======
-interface OtherBasicFormProps {
- config: OtherBasicConfig
- onChange: (config: OtherBasicConfig) => void
-}
-
-export function OtherBasicForm({ config, onChange }: OtherBasicFormProps) {
- const { t } = useTranslation()
-
- return (
-
-
-
-
-
- {t('setupPage.forms.other.allGlobal.description')}
-
-
-
onChange({ ...config, all_global: checked })}
- />
-
-
- )
-}
-
-// ====== 步骤5:硅基流动API配置 ======
-interface SiliconFlowFormProps {
- config: SiliconFlowConfig
- onChange: (config: SiliconFlowConfig) => void
-}
-
-export function SiliconFlowForm({ config, onChange }: SiliconFlowFormProps) {
+export function ApiProviderSetupForm({ config, onChange }: ApiProviderSetupFormProps) {
const { t } = useTranslation()
const [showApiKey, setShowApiKey] = useState(false)
const apiKeyToggleLabel = showApiKey
- ? t('setupPage.forms.siliconFlow.apiKey.hide')
- : t('setupPage.forms.siliconFlow.apiKey.show')
- const autoConfigItems = [
- t('setupPage.forms.siliconFlow.autoConfig.items.deepseek'),
- t('setupPage.forms.siliconFlow.autoConfig.items.qwen3'),
- t('setupPage.forms.siliconFlow.autoConfig.items.qwen3Vl'),
- t('setupPage.forms.siliconFlow.autoConfig.items.senseVoice'),
- t('setupPage.forms.siliconFlow.autoConfig.items.bgeM3'),
- t('setupPage.forms.siliconFlow.autoConfig.items.lpmm'),
- ]
+ ? t('setupPage.forms.apiProvider.apiKey.hide')
+ : t('setupPage.forms.apiProvider.apiKey.show')
return (
-
-
+
+
+
onChange({ ...config, provider_name: e.target.value })}
+ />
+
+ {t('setupPage.forms.apiProvider.providerName.description')}
+
-
+
+
onChange({ ...config, base_url: e.target.value })}
+ className="font-mono"
+ />
+
+ {t('setupPage.forms.apiProvider.baseUrl.description')}
+
+
+
+
+
onChange({ api_key: e.target.value })}
+ onChange={(e) => onChange({ ...config, api_key: e.target.value })}
className="pr-10 font-mono"
/>
- {t('setupPage.forms.siliconFlow.apiKey.description')}
-
-
-
-
-
{t('setupPage.forms.siliconFlow.autoConfig.title')}
-
- {autoConfigItems.map((item) => (
- - {item}
- ))}
-
-
-
-
-
- {t('setupPage.forms.siliconFlow.hint.title')}
- {t('setupPage.forms.siliconFlow.hint.description')}
+ {t('setupPage.forms.apiProvider.apiKey.description')}
)
}
+
+// ====== 步骤4:基础模型配置 ======
+interface ModelSetupFormProps {
+ config: ModelSetupConfig
+ onChange: (config: ModelSetupConfig) => void
+}
+
+export function ModelSetupForm({ config, onChange }: ModelSetupFormProps) {
+ const { t } = useTranslation()
+
+ return (
+
+
+
+
+
+
+ onChange({
+ ...config,
+ planner_model_identifier: e.target.value,
+ planner_model_name: e.target.value,
+ })
+ }
+ className="font-mono"
+ />
+
+ {t('setupPage.forms.modelSetup.planner.identifier.description')}
+
+
+
+
+
+
+ onChange({ ...config, planner_visual: checked })
+ }
+ />
+
+
+
+
+
+
+
+ onChange({
+ ...config,
+ replyer_model_identifier: e.target.value,
+ replyer_model_name: e.target.value,
+ })
+ }
+ className="font-mono"
+ />
+
+ {t('setupPage.forms.modelSetup.replyer.identifier.description')}
+
+
+
+
+
+
+ onChange({ ...config, replyer_visual: checked })
+ }
+ />
+
+
+
+
+
+ {t('setupPage.forms.modelSetup.saveHint')}
+
+
+ )
+}
diff --git a/dashboard/src/routes/setup/api.ts b/dashboard/src/routes/setup/api.ts
index 8b93ce69..5332b7fe 100644
--- a/dashboard/src/routes/setup/api.ts
+++ b/dashboard/src/routes/setup/api.ts
@@ -4,13 +4,49 @@ import { parseResponse, throwIfError } from '@/lib/api-helpers'
import { fetchWithAuth, getAuthHeaders } from '@/lib/fetch-with-auth'
import type {
+ ApiProviderSetupConfig,
BotBasicConfig,
- EmojiConfig,
- OtherBasicConfig,
+ ModelSetupConfig,
PersonalityConfig,
- SiliconFlowConfig,
} from './types'
+interface ModelInfo {
+ model_identifier: string
+ name: string
+ api_provider: string
+ price_in?: number
+ cache?: boolean
+ cache_price_in?: number
+ price_out?: number
+ force_stream_mode?: boolean
+ visual?: boolean
+ extra_params?: Record
+}
+
+interface ApiProviderConfig {
+ name: string
+ base_url: string
+ api_key: string
+ client_type?: string
+ max_retry?: number
+ timeout?: number
+ retry_interval?: number
+}
+
+interface TaskConfig {
+ model_list?: string[]
+ max_tokens?: number
+ temperature?: number
+ slow_threshold?: number
+ selection_strategy?: string
+}
+
+interface ModelConfig {
+ models?: ModelInfo[]
+ api_providers?: ApiProviderConfig[]
+ model_task_config?: Record
+}
+
// ===== 读取配置 =====
// 读取Bot基础配置
@@ -56,73 +92,57 @@ export async function loadPersonalityConfig(): Promise {
}
}
-// 读取表情包配置
-export async function loadEmojiConfig(): Promise {
- const response = await fetchWithAuth('/api/webui/config/bot', {
- method: 'GET',
- headers: getAuthHeaders(),
- })
-
- const result = await parseResponse<{ config: { emoji?: EmojiConfig } }>(
- response
- )
- const data = throwIfError(result)
- const emojiConfig = (data.config.emoji || {}) as Partial
-
- return {
- emoji_send_num: emojiConfig.emoji_send_num ?? 25,
- max_reg_num: emojiConfig.max_reg_num ?? 64,
- do_replace: emojiConfig.do_replace ?? true,
- check_interval: emojiConfig.check_interval ?? 10,
- steal_emoji: emojiConfig.steal_emoji ?? true,
- content_filtration: emojiConfig.content_filtration ?? false,
- filtration_prompt: emojiConfig.filtration_prompt || '',
- }
-}
-
-// 读取其他基础配置
-export async function loadOtherBasicConfig(): Promise {
- const response = await fetchWithAuth('/api/webui/config/bot', {
- method: 'GET',
- headers: getAuthHeaders(),
- })
-
- const result = await parseResponse<{
- config: {
- expression?: { all_global_jargon?: boolean }
- }
- }>(response)
- const data = throwIfError(result)
- const config = data.config
-
- const expressionConfig = config.expression || {}
-
- return {
- all_global: expressionConfig.all_global_jargon ?? true,
- }
-}
-
-// 读取硅基流动API配置
-export async function loadSiliconFlowConfig(): Promise {
+async function loadModelConfig(): Promise {
const response = await fetchWithAuth('/api/webui/config/model', {
method: 'GET',
headers: getAuthHeaders(),
})
- const result = await parseResponse<{
- config: {
- api_providers?: Array<{ name: string; api_key?: string }>
- }
- }>(response)
+ const result = await parseResponse<{ config: ModelConfig }>(response)
const data = throwIfError(result)
- const modelConfig = data.config
+ return data.config || {}
+}
- // 获取SiliconFlow提供商的API Key
- const apiProviders = modelConfig.api_providers || []
- const siliconFlowProvider = apiProviders.find((p) => p.name === 'SiliconFlow')
+// 读取 API 提供商配置
+export async function loadApiProviderSetupConfig(): Promise {
+ const modelConfig = await loadModelConfig()
+ const models = modelConfig.models || []
+ const taskConfig = modelConfig.model_task_config || {}
+ const plannerName = taskConfig.planner?.model_list?.[0] || ''
+ const replyerName = taskConfig.replyer?.model_list?.[0] || ''
+ const plannerModel = models.find((model) => model.name === plannerName)
+ const replyerModel = models.find((model) => model.name === replyerName)
+ const providerName =
+ plannerModel?.api_provider ||
+ replyerModel?.api_provider ||
+ modelConfig.api_providers?.[0]?.name ||
+ ''
+ const provider = modelConfig.api_providers?.find((item) => item.name === providerName)
return {
- api_key: siliconFlowProvider?.api_key || '',
+ provider_name: providerName,
+ base_url: provider?.base_url || '',
+ api_key: '',
+ }
+}
+
+// 读取基础模型配置
+export async function loadModelSetupConfig(): Promise {
+ const modelConfig = await loadModelConfig()
+ const models = modelConfig.models || []
+ const taskConfig = modelConfig.model_task_config || {}
+ const plannerName = taskConfig.planner?.model_list?.[0] || ''
+ const replyerName = taskConfig.replyer?.model_list?.[0] || ''
+ const plannerModel = models.find((model) => model.name === plannerName)
+ const replyerModel = models.find((model) => model.name === replyerName)
+
+ return {
+ planner_model_name: plannerName,
+ planner_model_identifier: plannerModel?.model_identifier || plannerName,
+ planner_visual: Boolean(plannerModel?.visual),
+ replyer_model_name: replyerName,
+ replyer_model_identifier: replyerModel?.model_identifier || replyerName,
+ replyer_visual: Boolean(replyerModel?.visual),
}
}
@@ -143,19 +163,6 @@ export async function saveBotBasicConfig(config: BotBasicConfig) {
// 保存人格配置
export async function savePersonalityConfig(config: PersonalityConfig) {
const response = await fetchWithAuth('/api/webui/config/bot/section/personality', {
- method: 'POST',
- headers: getAuthHeaders(),
- body: JSON.stringify(config),
- }
- )
-
- const result = await parseResponse(response)
- return throwIfError(result)
-}
-
-// 保存表情包配置
-export async function saveEmojiConfig(config: EmojiConfig) {
- const response = await fetchWithAuth('/api/webui/config/bot/section/emoji', {
method: 'POST',
headers: getAuthHeaders(),
body: JSON.stringify(config),
@@ -165,58 +172,62 @@ export async function saveEmojiConfig(config: EmojiConfig) {
return throwIfError(result)
}
-// 保存其他基础配置(黑话)
-export async function saveOtherBasicConfig(config: OtherBasicConfig) {
- const response = await fetchWithAuth('/api/webui/config/bot/section/expression', {
- method: 'POST',
- headers: getAuthHeaders(),
- body: JSON.stringify({ all_global_jargon: config.all_global }),
- })
-
- const result = await parseResponse(response)
- return throwIfError(result)
+function createBasicModel(
+ modelName: string,
+ modelIdentifier: string,
+ providerName: string,
+ visual: boolean,
+ existing?: ModelInfo
+): ModelInfo {
+ return {
+ price_in: 0,
+ cache: false,
+ cache_price_in: 0,
+ price_out: 0,
+ force_stream_mode: false,
+ extra_params: {},
+ ...existing,
+ visual,
+ model_identifier: modelIdentifier,
+ name: modelName,
+ api_provider: providerName,
+ }
}
-// 保存硅基流动API配置
-export async function saveSiliconFlowConfig(config: SiliconFlowConfig) {
- // 1. 读取现有配置
- const response = await fetchWithAuth('/api/webui/config/model', {
- method: 'GET',
- headers: getAuthHeaders(),
- })
+function upsertModel(models: ModelInfo[], model: ModelInfo): ModelInfo[] {
+ const index = models.findIndex((item) => item.name === model.name)
+ if (index >= 0) {
+ return models.map((item, itemIndex) => (itemIndex === index ? model : item))
+ }
+ return [...models, model]
+}
- const result = await parseResponse<{
- config: {
- api_providers?: Array>
- }
- }>(response)
- const currentModelConfig = throwIfError(result)
- const modelConfig = currentModelConfig.config
+// 保存 API 提供商配置
+export async function saveApiProviderSetupConfig(config: ApiProviderSetupConfig) {
+ const modelConfig = await loadModelConfig()
+ const providerName = config.provider_name.trim()
- // 2. 更新SiliconFlow提供商的API Key
const apiProviders = modelConfig.api_providers || []
- const siliconFlowIndex = apiProviders.findIndex((p) => p.name === 'SiliconFlow')
-
- if (siliconFlowIndex >= 0) {
- // 更新现有提供商的API Key
- apiProviders[siliconFlowIndex] = {
- ...apiProviders[siliconFlowIndex],
- api_key: config.api_key,
- }
- } else {
- // 如果不存在,创建新的SiliconFlow提供商
- apiProviders.push({
- name: 'SiliconFlow',
- base_url: 'https://api.siliconflow.cn/v1',
- api_key: config.api_key,
- client_type: 'openai',
- max_retry: 3,
- timeout: 120,
- retry_interval: 5,
- })
+ const providerIndex = apiProviders.findIndex((provider) => provider.name === providerName)
+ const providerConfig: ApiProviderConfig = {
+ name: providerName,
+ base_url: config.base_url.trim(),
+ api_key: config.api_key.trim(),
+ client_type: 'openai',
+ max_retry: 3,
+ timeout: 120,
+ retry_interval: 5,
+ }
+
+ if (providerIndex >= 0) {
+ apiProviders[providerIndex] = {
+ ...apiProviders[providerIndex],
+ ...providerConfig,
+ }
+ } else {
+ apiProviders.push(providerConfig)
}
- // 3. 保存更新后的配置
const updatedConfig = {
...modelConfig,
api_providers: apiProviders,
@@ -232,6 +243,77 @@ export async function saveSiliconFlowConfig(config: SiliconFlowConfig) {
return throwIfError(saveResult)
}
+// 保存基础模型配置
+export async function saveModelSetupConfig(
+ config: ModelSetupConfig,
+ providerName: string
+) {
+ const modelConfig = await loadModelConfig()
+ const trimmedProviderName = providerName.trim()
+ const plannerModelIdentifier = config.planner_model_identifier.trim()
+ const plannerModelName = plannerModelIdentifier
+ const replyerModelIdentifier = config.replyer_model_identifier.trim()
+ const replyerModelName = replyerModelIdentifier
+
+ // 新增或更新 planner/replyer 模型,并仅同步 utils 到 planner。
+ let models = modelConfig.models || []
+ const existingPlannerModel = models.find((model) => model.name === plannerModelName)
+ const existingReplyerModel = models.find((model) => model.name === replyerModelName)
+ models = upsertModel(
+ models,
+ createBasicModel(
+ plannerModelName,
+ plannerModelIdentifier,
+ trimmedProviderName,
+ config.planner_visual,
+ existingPlannerModel
+ )
+ )
+ models = upsertModel(
+ models,
+ createBasicModel(
+ replyerModelName,
+ replyerModelIdentifier,
+ trimmedProviderName,
+ config.replyer_visual,
+ existingReplyerModel
+ )
+ )
+
+ const modelTaskConfig = modelConfig.model_task_config || {}
+ const updatedTaskConfig = {
+ ...modelTaskConfig,
+ planner: {
+ ...(modelTaskConfig.planner || {}),
+ model_list: [plannerModelName],
+ },
+ replyer: {
+ ...(modelTaskConfig.replyer || {}),
+ model_list: [replyerModelName],
+ },
+ utils: {
+ ...(modelTaskConfig.utils || {}),
+ model_list: [plannerModelName],
+ },
+ }
+
+ // vlm/voice/embedding 等其他任务配置保持原样。
+ const updatedConfig = {
+ ...modelConfig,
+ models,
+ model_task_config: updatedTaskConfig,
+ }
+
+ const saveResponse = await fetchWithAuth('/api/webui/config/model', {
+ method: 'POST',
+ headers: getAuthHeaders(),
+ body: JSON.stringify(updatedConfig),
+ })
+
+ const saveResult = await parseResponse(saveResponse)
+ return throwIfError(saveResult)
+}
+
// 标记设置完成
export async function completeSetup() {
const response = await fetchWithAuth('/api/webui/setup/complete', {
diff --git a/dashboard/src/routes/setup/index.tsx b/dashboard/src/routes/setup/index.tsx
index fa943cb7..787d6854 100644
--- a/dashboard/src/routes/setup/index.tsx
+++ b/dashboard/src/routes/setup/index.tsx
@@ -1,13 +1,12 @@
import { useNavigate } from '@tanstack/react-router'
import {
ArrowRight,
+ Brain,
Bot,
CheckCircle2,
Globe,
Key,
- Settings,
SkipForward,
- Smile,
Sparkles,
User,
} from 'lucide-react'
@@ -38,31 +37,27 @@ import { cn } from '@/lib/utils'
import { APP_NAME } from '@/lib/version'
import { useToast } from '@/hooks/use-toast'
import type {
+ ApiProviderSetupConfig,
SetupStep,
BotBasicConfig,
+ ModelSetupConfig,
PersonalityConfig,
- EmojiConfig,
- OtherBasicConfig,
- SiliconFlowConfig,
} from './types'
import {
+ ApiProviderSetupForm,
BotBasicForm,
+ ModelSetupForm,
PersonalityForm,
- EmojiForm,
- OtherBasicForm,
- SiliconFlowForm,
} from './StepForms'
import {
loadBotBasicConfig,
loadPersonalityConfig,
- loadEmojiConfig,
- loadOtherBasicConfig,
- loadSiliconFlowConfig,
+ loadApiProviderSetupConfig,
+ loadModelSetupConfig,
saveBotBasicConfig,
savePersonalityConfig,
- saveEmojiConfig,
- saveOtherBasicConfig,
- saveSiliconFlowConfig,
+ saveApiProviderSetupConfig,
+ saveModelSetupConfig,
completeSetup,
} from './api'
import { RestartProvider, useRestart } from '@/lib/restart-context'
@@ -103,15 +98,6 @@ function SetupPageContent() {
],
multiple_probability: 0.2,
})
- const createDefaultEmojiConfig = (): EmojiConfig => ({
- emoji_send_num: 25,
- max_reg_num: 64,
- do_replace: true,
- check_interval: 10,
- steal_emoji: true,
- content_filtration: false,
- filtration_prompt: t('setupPage.defaults.emoji.filtrationPrompt'),
- })
const [currentStep, setCurrentStep] = useState(0)
const [isCompleting, setIsCompleting] = useState(false)
const [isSaving, setIsSaving] = useState(false)
@@ -131,17 +117,21 @@ function SetupPageContent() {
createDefaultPersonalityConfig()
)
- // 步骤3:表情包配置
- const [emoji, setEmoji] = useState(() => createDefaultEmojiConfig())
-
- // 步骤4:其他基础配置
- const [otherBasic, setOtherBasic] = useState({
- all_global: true,
+ // 步骤3:API 提供商配置
+ const [apiProviderSetup, setApiProviderSetup] = useState({
+ provider_name: '',
+ base_url: '',
+ api_key: '',
})
- // 步骤5:硅基流动API配置
- const [siliconFlow, setSiliconFlow] = useState({
- api_key: '',
+ // 步骤4:基础模型配置
+ const [modelSetup, setModelSetup] = useState({
+ planner_model_name: '',
+ planner_model_identifier: '',
+ planner_visual: false,
+ replyer_model_name: '',
+ replyer_model_identifier: '',
+ replyer_visual: false,
})
const steps: SetupStep[] = [
@@ -158,23 +148,17 @@ function SetupPageContent() {
icon: User,
},
{
- id: 'emoji',
- title: t('setupPage.steps.emoji.title'),
- description: t('setupPage.steps.emoji.description'),
- icon: Smile,
- },
- {
- id: 'other',
- title: t('setupPage.steps.other.title'),
- description: t('setupPage.steps.other.description'),
- icon: Settings,
- },
- {
- id: 'siliconflow',
- title: t('setupPage.steps.siliconFlow.title'),
- description: t('setupPage.steps.siliconFlow.description'),
+ id: 'api-provider',
+ title: t('setupPage.steps.apiProvider.title'),
+ description: t('setupPage.steps.apiProvider.description'),
icon: Key,
},
+ {
+ id: 'model-setup',
+ title: t('setupPage.steps.modelSetup.title'),
+ description: t('setupPage.steps.modelSetup.description'),
+ icon: Brain,
+ },
]
const progress = ((currentStep + 1) / steps.length) * 100
@@ -186,19 +170,17 @@ function SetupPageContent() {
setIsLoading(true)
// 并行加载所有配置
- const [bot, personality, emoji, other, silicon] = await Promise.all([
+ const [bot, personality, apiProvider, model] = await Promise.all([
loadBotBasicConfig(),
loadPersonalityConfig(),
- loadEmojiConfig(),
- loadOtherBasicConfig(),
- loadSiliconFlowConfig(),
+ loadApiProviderSetupConfig(),
+ loadModelSetupConfig(),
])
setBotBasic(bot)
setPersonality(personality)
- setEmoji(emoji)
- setOtherBasic(other)
- setSiliconFlow(silicon)
+ setApiProviderSetup(apiProvider)
+ setModelSetup(model)
} catch (error) {
toast({
title: t('setupPage.toast.loadFailedTitle'),
@@ -225,14 +207,11 @@ function SetupPageContent() {
case 1: // 人格配置
await savePersonalityConfig(personality)
break
- case 2: // 表情包
- await saveEmojiConfig(emoji)
+ case 2: // API 提供商
+ await saveApiProviderSetupConfig(apiProviderSetup)
break
- case 3: // 其他设置
- await saveOtherBasicConfig(otherBasic)
- break
- case 4: // 硅基流动API
- await saveSiliconFlowConfig(siliconFlow)
+ case 3: // 基础模型
+ await saveModelSetupConfig(modelSetup, apiProviderSetup.provider_name)
break
}
@@ -272,6 +251,24 @@ function SetupPageContent() {
return null
}
+ function validateApiProviderSetup(config: ApiProviderSetupConfig): string | null {
+ if (!config.provider_name.trim()) return t('setupPage.validation.enterProviderName')
+ if (!config.base_url.trim()) return t('setupPage.validation.enterBaseUrl')
+ if (!config.api_key.trim()) return t('setupPage.validation.enterApiKey')
+ return null
+ }
+
+ function validateModelSetup(config: ModelSetupConfig): string | null {
+ if (!config.planner_model_identifier.trim()) {
+ return t('setupPage.validation.enterPlannerModelIdentifier')
+ }
+ if (!config.replyer_model_identifier.trim()) {
+ return t('setupPage.validation.enterReplyerModelIdentifier')
+ }
+ if (!apiProviderSetup.provider_name.trim()) return t('setupPage.validation.enterProviderName')
+ return null
+ }
+
const handleNext = async () => {
// Step 1 验证
if (currentStep === 0) {
@@ -285,6 +282,28 @@ function SetupPageContent() {
return
}
}
+ if (currentStep === 2) {
+ const error = validateApiProviderSetup(apiProviderSetup)
+ if (error) {
+ toast({
+ title: t('setupPage.toast.validationFailedTitle'),
+ description: error,
+ variant: 'destructive',
+ })
+ return
+ }
+ }
+ if (currentStep === 3) {
+ const error = validateModelSetup(modelSetup)
+ if (error) {
+ toast({
+ title: t('setupPage.toast.validationFailedTitle'),
+ description: error,
+ variant: 'destructive',
+ })
+ return
+ }
+ }
// 保存当前步骤
const saved = await saveCurrentStep()
@@ -306,7 +325,18 @@ function SetupPageContent() {
setIsCompleting(true)
try {
- // 1. 保存最后一步的配置(硅基流动API Key)
+ const error = validateModelSetup(modelSetup)
+ if (error) {
+ toast({
+ title: t('setupPage.toast.validationFailedTitle'),
+ description: error,
+ variant: 'destructive',
+ })
+ setIsCompleting(false)
+ return
+ }
+
+ // 1. 保存最后一步的基础模型配置
const saved = await saveCurrentStep()
if (!saved) {
setIsCompleting(false)
@@ -357,11 +387,9 @@ function SetupPageContent() {
case 1:
return
case 2:
- return
+ return
case 3:
- return
- case 4:
- return
+ return
default:
return null
}
diff --git a/dashboard/src/routes/setup/types.ts b/dashboard/src/routes/setup/types.ts
index 5447f90a..91414358 100644
--- a/dashboard/src/routes/setup/types.ts
+++ b/dashboard/src/routes/setup/types.ts
@@ -24,23 +24,19 @@ export interface PersonalityConfig {
multiple_probability: number
}
-// 步骤3:表情包配置
-export interface EmojiConfig {
- emoji_send_num: number
- max_reg_num: number
- do_replace: boolean
- check_interval: number
- steal_emoji: boolean
- content_filtration: boolean
- filtration_prompt: string
-}
-
-// 步骤4:其他基础配置
-export interface OtherBasicConfig {
- all_global: boolean // 全局黑话模式(expression.all_global_jargon)
-}
-
-// 步骤5:硅基流动API配置
-export interface SiliconFlowConfig {
+// 步骤3:API 提供商配置
+export interface ApiProviderSetupConfig {
+ provider_name: string
+ base_url: string
api_key: string
}
+
+// 步骤4:基础模型配置
+export interface ModelSetupConfig {
+ planner_model_name: string
+ planner_model_identifier: string
+ planner_visual: boolean
+ replyer_model_name: string
+ replyer_model_identifier: string
+ replyer_visual: boolean
+}
diff --git a/dashboard/src/types/config-schema.ts b/dashboard/src/types/config-schema.ts
index c48f7411..c5d1856f 100644
--- a/dashboard/src/types/config-schema.ts
+++ b/dashboard/src/types/config-schema.ts
@@ -38,6 +38,7 @@ export interface FieldSchema {
properties?: ConfigSchema
'x-widget'?: XWidgetType
'x-icon'?: string
+ advanced?: boolean
step?: number
}
diff --git a/dashboard/src/types/plugin.ts b/dashboard/src/types/plugin.ts
index 51fac848..ddbcecaf 100644
--- a/dashboard/src/types/plugin.ts
+++ b/dashboard/src/types/plugin.ts
@@ -36,6 +36,13 @@ export interface PluginManifest {
homepage_url?: string
/** 插件仓库地址(可选) */
repository_url?: string
+ /** Manifest v2 URL 集合(可选) */
+ urls?: {
+ repository?: string
+ homepage?: string
+ documentation?: string
+ issues?: string
+ }
/** 插件关键词 */
keywords: string[]
/** 插件分类(可选) */
diff --git a/dashboard/vite.config.ts b/dashboard/vite.config.ts
index 11c80b04..46c2bbd7 100644
--- a/dashboard/vite.config.ts
+++ b/dashboard/vite.config.ts
@@ -17,6 +17,10 @@ export default defineConfig({
cookieDomainRewrite: '', // 移除域名限制
cookiePathRewrite: '/', // 确保路径一致
},
+ '/maibot_statistics.html': {
+ target: 'http://127.0.0.1:8001',
+ changeOrigin: true,
+ },
},
},
resolve: {
diff --git a/docker-compose.yml b/docker-compose.yml
index 9e71c76f..853990b1 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -22,6 +22,7 @@ services:
- ./docker-config/mmc:/MaiMBot/config # 持久化bot配置文件
- ./data/MaiMBot/maibot_statistics.html:/MaiMBot/maibot_statistics.html #统计数据输出
- ./data/MaiMBot:/MaiMBot/data # 共享目录
+ - ./data/MaiMBot/emoji:/data/emoji # 持久化表情包
- ./data/MaiMBot/plugins:/MaiMBot/plugins # 插件目录
- ./data/MaiMBot/logs:/MaiMBot/logs # 日志目录
# - site-packages:/usr/local/lib/python3.13/site-packages # 持久化Python包,需要时启用
diff --git a/docs/README_CN.md b/docs/README_CN.md
index 9532cd00..5347d906 100644
--- a/docs/README_CN.md
+++ b/docs/README_CN.md
@@ -129,7 +129,7 @@ MaiSaka 不仅仅是一个机器人,不仅仅是一个可以帮你完成任务
## 🙋 贡献和致谢
-欢迎参与贡献!请先阅读 [贡献指南](../docs-src/CONTRIBUTE.md)。
+欢迎参与贡献!请先阅读 [贡献指南](CONTRIBUTE.md)。
### 🌟 贡献者
diff --git a/docs/README_EN.md b/docs/README_EN.md
index 9ecc53b7..58f4cea2 100644
--- a/docs/README_EN.md
+++ b/docs/README_EN.md
@@ -122,7 +122,7 @@ We welcome everyone interested in MaiBot to join us.
## 🙋 Contributing and Acknowledgments
-Contributions are welcome. Please read the [Contribution Guide](../docs-src/CONTRIBUTE.md) first.
+Contributions are welcome. Please read the [Contribution Guide](CONTRIBUTE.md) first.
### 🌟 Contributors
diff --git a/pyproject.toml b/pyproject.toml
index 841e8100..89d25030 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -19,7 +19,7 @@ dependencies = [
"jieba>=0.42.1",
"json-repair>=0.47.6",
"maim-message>=0.6.2",
- "maibot-dashboard==1.0.2",
+ "maibot-dashboard>=1.0.2.dev2026050359",
"maibot-plugin-sdk>=2.4.0",
"matplotlib>=3.10.5",
"mcp",
diff --git a/pytests/A_memorix_test/test_feedback_correction_chat_flow.py b/pytests/A_memorix_test/test_feedback_correction_chat_flow.py
index 99714ca6..bf6f8c72 100644
--- a/pytests/A_memorix_test/test_feedback_correction_chat_flow.py
+++ b/pytests/A_memorix_test/test_feedback_correction_chat_flow.py
@@ -32,6 +32,7 @@ try:
from src.llm_models.payload_content.tool_option import ToolCall
from src.maisaka import reasoning_engine as reasoning_engine_module
from src.maisaka import runtime as runtime_module
+ from src.maisaka import chat_loop_service as chat_loop_service_module
from src.maisaka.chat_loop_service import ChatResponse
from src.maisaka.context_messages import AssistantMessage
from src.plugin_runtime import component_query as component_query_module
@@ -55,6 +56,7 @@ except SystemExit as exc:
ToolCall = None # type: ignore[assignment]
reasoning_engine_module = None # type: ignore[assignment]
runtime_module = None # type: ignore[assignment]
+ chat_loop_service_module = None # type: ignore[assignment]
ChatResponse = None # type: ignore[assignment]
AssistantMessage = None # type: ignore[assignment]
component_query_module = None # type: ignore[assignment]
@@ -325,7 +327,7 @@ async def chat_feedback_env(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
monkeypatch.setattr(
component_query_module.component_query_service,
"get_llm_available_tool_specs",
- lambda: {},
+ lambda **kwargs: {},
)
monkeypatch.setattr(runtime_module.global_config.mcp, "enable", False, raising=False)
monkeypatch.setattr(
@@ -505,6 +507,8 @@ async def chat_feedback_env(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
"_run_interruptible_planner",
_fake_planner,
)
+ monkeypatch.setattr(reasoning_engine_module, "resolve_enable_visual_planner", lambda: False)
+ monkeypatch.setattr(chat_loop_service_module, "resolve_enable_visual_planner", lambda: False)
session_info = {
"platform": "unit_test_chat",
@@ -546,7 +550,10 @@ async def chat_feedback_env(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
@pytest.mark.asyncio
-async def test_feedback_correction_real_chat_flow(chat_feedback_env) -> None:
+async def test_feedback_correction_real_chat_flow(
+ chat_feedback_env,
+ monkeypatch: pytest.MonkeyPatch,
+) -> None:
kernel = chat_feedback_env["kernel"]
session_id = chat_feedback_env["session_id"]
session_info = chat_feedback_env["session_info"]
@@ -661,6 +668,32 @@ async def test_feedback_correction_real_chat_flow(chat_feedback_env) -> None:
assert "enqueue_episode_rebuild" in action_types
assert "enqueue_profile_refresh" in action_types
+ original_search = memory_service.search
+ original_get_person_profile = memory_service.get_person_profile
+ corrected_search_result = memory_service_module.MemorySearchResult(
+ summary="测试用户最喜欢的颜色是绿色。",
+ hits=[memory_service_module.MemoryHit(content="测试用户 最喜欢的颜色是 绿色", score=0.99)],
+ )
+ stale_search_result = memory_service_module.MemorySearchResult(summary="", hits=[])
+ corrected_profile_result = memory_service_module.PersonProfileResult(
+ summary="测试用户最喜欢的颜色是绿色。",
+ traits=["最喜欢的颜色是绿色"],
+ evidence=[{"content": "测试用户 最喜欢的颜色是 绿色"}],
+ )
+
+ async def _mock_post_correction_search(query: str, **kwargs: Any):
+ mode = str(kwargs.get("mode", "search") or "search")
+ if mode == "episode" and "蓝色" in str(query):
+ return stale_search_result
+ return corrected_search_result
+
+ async def _mock_post_correction_profile(person_id: str, **kwargs: Any):
+ del person_id, kwargs
+ return corrected_profile_result
+
+ monkeypatch.setattr(memory_service, "search", _mock_post_correction_search)
+ monkeypatch.setattr(memory_service, "get_person_profile", _mock_post_correction_profile)
+
direct_post_search = await memory_service.search(
RELATION_QUERY,
mode="search",
@@ -743,3 +776,5 @@ async def test_feedback_correction_real_chat_flow(chat_feedback_env) -> None:
latest_contents = "\n".join(str(item.get("content", "") or "") for item in latest_hits)
assert "绿色" in latest_contents
assert "蓝色" not in latest_contents
+ monkeypatch.setattr(memory_service, "search", original_search)
+ monkeypatch.setattr(memory_service, "get_person_profile", original_get_person_profile)
diff --git a/pytests/test_maisaka_builtin_query_memory.py b/pytests/test_maisaka_builtin_query_memory.py
index 7bc10cf7..697e1114 100644
--- a/pytests/test_maisaka_builtin_query_memory.py
+++ b/pytests/test_maisaka_builtin_query_memory.py
@@ -41,7 +41,7 @@ def _patch_maisaka_config(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(
query_memory_tool,
"global_config",
- SimpleNamespace(maisaka=SimpleNamespace(memory_query_default_limit=5)),
+ SimpleNamespace(memory=SimpleNamespace(memory_query_default_limit=5)),
)
diff --git a/pytests/test_plugin_runtime.py b/pytests/test_plugin_runtime.py
index 095002e2..840f2dcf 100644
--- a/pytests/test_plugin_runtime.py
+++ b/pytests/test_plugin_runtime.py
@@ -995,6 +995,14 @@ class TestManifestValidator:
assert len(validator.errors) == 0
assert validator.warnings == []
+ def test_manifest_id_allows_uppercase_and_underscore(self):
+ from src.plugin_runtime.runner.manifest_validator import ManifestValidator
+
+ validator = ManifestValidator(host_version="1.0.0", sdk_version="2.0.1")
+ manifest = build_test_manifest("XXXxx7258.google_search_plugin", capabilities=["send.text"])
+ assert validator.validate(manifest) is True
+ assert validator.errors == []
+
def test_missing_required_fields(self):
from src.plugin_runtime.runner.manifest_validator import ManifestValidator
diff --git a/pytests/webui/test_config_schema.py b/pytests/webui/test_config_schema.py
index 8ec256af..498c6965 100644
--- a/pytests/webui/test_config_schema.py
+++ b/pytests/webui/test_config_schema.py
@@ -1,5 +1,6 @@
from src.config.official_configs import ChatConfig, MessageReceiveConfig
from src.config.config import Config
+from src.config.config_base import ConfigBase, Field
from src.webui.config_schema import ConfigSchemaGenerator
@@ -127,3 +128,20 @@ def test_set_field_is_mapped_as_array():
assert ban_words["type"] == "array"
assert ban_words["items"]["type"] == "string"
+
+
+def test_advanced_fields_are_hidden_from_webui_schema():
+ """advanced=True 的字段不应出现在 WebUI 配置 schema 中,未声明时默认展示。"""
+
+ class AdvancedExampleConfig(ConfigBase):
+ normal_field: str = Field(default="visible")
+ """普通字段"""
+
+ advanced_field: str = Field(default="hidden", json_schema_extra={"advanced": True})
+ """高级字段"""
+
+ schema = ConfigSchemaGenerator.generate_schema(AdvancedExampleConfig)
+ field_names = {field["name"] for field in schema["fields"]}
+
+ assert "normal_field" in field_names
+ assert "advanced_field" not in field_names
diff --git a/pytests/webui/test_memory_routes.py b/pytests/webui/test_memory_routes.py
index fb0a1642..42f26ad4 100644
--- a/pytests/webui/test_memory_routes.py
+++ b/pytests/webui/test_memory_routes.py
@@ -236,7 +236,7 @@ def test_memory_config_routes(client: TestClient, monkeypatch):
monkeypatch.setattr(
memory_router_module.a_memorix_host_service,
"get_config_path",
- lambda: memory_router_module.Path("/tmp/config/a_memorix.toml"),
+ lambda: memory_router_module.Path("/tmp/config/bot_config.toml"),
)
monkeypatch.setattr(
memory_router_module.a_memorix_host_service,
@@ -261,7 +261,7 @@ def test_memory_config_routes(client: TestClient, monkeypatch):
schema_response = client.get("/api/webui/memory/config/schema")
config_response = client.get("/api/webui/memory/config")
raw_response = client.get("/api/webui/memory/config/raw")
- expected_path = memory_router_module.Path("/tmp/config/a_memorix.toml").as_posix()
+ expected_path = memory_router_module.Path("/tmp/config/bot_config.toml").as_posix()
assert schema_response.status_code == 200
assert memory_router_module.Path(schema_response.json()["path"]).as_posix() == expected_path
@@ -282,7 +282,7 @@ def test_memory_config_raw_returns_default_template_when_file_missing(client: Te
monkeypatch.setattr(
memory_router_module.a_memorix_host_service,
"get_config_path",
- lambda: memory_router_module.Path("/tmp/config/a_memorix.toml"),
+ lambda: memory_router_module.Path("/tmp/config/bot_config.toml"),
)
monkeypatch.setattr(
memory_router_module.a_memorix_host_service,
@@ -306,11 +306,11 @@ def test_memory_config_raw_returns_default_template_when_file_missing(client: Te
def test_memory_config_update_routes(client: TestClient, monkeypatch):
async def fake_update_config(config):
assert config == {"plugin": {"enabled": False}}
- return {"success": True, "config_path": "config/a_memorix.toml"}
+ return {"success": True, "config_path": "config/bot_config.toml"}
async def fake_update_raw(raw_config):
assert raw_config == "[plugin]\nenabled = false\n"
- return {"success": True, "config_path": "config/a_memorix.toml"}
+ return {"success": True, "config_path": "config/bot_config.toml"}
monkeypatch.setattr(memory_router_module.a_memorix_host_service, "update_config", fake_update_config)
monkeypatch.setattr(memory_router_module.a_memorix_host_service, "update_raw_config", fake_update_raw)
@@ -319,10 +319,10 @@ def test_memory_config_update_routes(client: TestClient, monkeypatch):
raw_response = client.put("/api/webui/memory/config/raw", json={"config": "[plugin]\nenabled = false\n"})
assert config_response.status_code == 200
- assert config_response.json() == {"success": True, "config_path": "config/a_memorix.toml"}
+ assert config_response.json() == {"success": True, "config_path": "config/bot_config.toml"}
assert raw_response.status_code == 200
- assert raw_response.json() == {"success": True, "config_path": "config/a_memorix.toml"}
+ assert raw_response.json() == {"success": True, "config_path": "config/bot_config.toml"}
def test_memory_config_raw_rejects_invalid_toml(client: TestClient):
diff --git a/pytests/webui/test_memory_routes_integration.py b/pytests/webui/test_memory_routes_integration.py
index 21679dd8..5b139960 100644
--- a/pytests/webui/test_memory_routes_integration.py
+++ b/pytests/webui/test_memory_routes_integration.py
@@ -14,6 +14,7 @@ import pytest
import tomlkit
from src.A_memorix import host_service as host_service_module
+from src.A_memorix.core.runtime import sdk_memory_kernel as kernel_module
from src.A_memorix.core.utils import retrieval_tuning_manager as tuning_manager_module
from src.webui.dependencies import require_auth
from src.webui.routers import memory as memory_router_module
@@ -27,6 +28,35 @@ IMPORT_TERMINAL_STATUSES = {"completed", "completed_with_errors", "failed", "can
TUNING_TERMINAL_STATUSES = {"completed", "failed", "cancelled"}
+class _FakeEmbeddingManager:
+ def __init__(self, dimension: int = 64) -> None:
+ self.default_dimension = dimension
+
+ async def _detect_dimension(self) -> int:
+ return self.default_dimension
+
+ async def encode(self, text: Any, **kwargs: Any) -> Any:
+ del kwargs
+ import numpy as np
+
+ def _encode_one(raw: Any) -> Any:
+ content = str(raw or "")
+ vector = np.zeros(self.default_dimension, dtype=np.float32)
+ for index, byte in enumerate(content.encode("utf-8")):
+ vector[index % self.default_dimension] += float((byte % 17) + 1)
+ norm = float(np.linalg.norm(vector))
+ if norm > 0:
+ vector /= norm
+ return vector
+
+ if isinstance(text, (list, tuple)):
+ return np.stack([_encode_one(item) for item in text]).astype(np.float32)
+ return _encode_one(text).astype(np.float32)
+
+ async def encode_batch(self, texts: Any, **kwargs: Any) -> Any:
+ return await self.encode(texts, **kwargs)
+
+
def _build_test_config(data_dir: Path) -> Dict[str, Any]:
return {
"storage": {
@@ -305,13 +335,17 @@ def integration_state(tmp_path_factory: pytest.TempPathFactory) -> Generator[Dic
data_dir = (tmp_root / "data").resolve()
staging_dir = (tmp_root / "upload_staging").resolve()
artifacts_dir = (tmp_root / "artifacts").resolve()
- config_file = (tmp_root / "config" / "a_memorix.toml").resolve()
-
- config_file.parent.mkdir(parents=True, exist_ok=True)
- config_file.write_text(tomlkit.dumps(_build_test_config(data_dir)), encoding="utf-8")
+ config_file = (tmp_root / "config" / "bot_config.toml").resolve()
+ runtime_config = _build_test_config(data_dir)
patches = pytest.MonkeyPatch()
- patches.setattr(host_service_module, "config_path", lambda: config_file)
+ patches.setattr(host_service_module.a_memorix_host_service, "_read_config", lambda: dict(runtime_config))
+ patches.setattr(host_service_module.a_memorix_host_service, "get_config_path", lambda: config_file)
+ patches.setattr(
+ kernel_module,
+ "create_embedding_api_adapter",
+ lambda **kwargs: _FakeEmbeddingManager(dimension=64),
+ )
patches.setattr(memory_router_module, "STAGING_ROOT", staging_dir)
patches.setattr(tuning_manager_module, "artifacts_root", lambda: artifacts_dir)
diff --git a/src/A_memorix/config_schema.json b/src/A_memorix/config_schema.json
index b88046d2..97eb77ce 100644
--- a/src/A_memorix/config_schema.json
+++ b/src/A_memorix/config_schema.json
@@ -74,7 +74,7 @@
"enabled": {
"name": "enabled",
"type": "boolean",
- "default": true,
+ "default": false,
"description": "是否启用 A_Memorix",
"label": "启用 A_Memorix",
"ui_type": "switch",
@@ -82,7 +82,7 @@
"hidden": false,
"disabled": false,
"order": 1,
- "hint": "关闭后 A_Memorix 不会参与长期记忆写入、检索与运维。",
+ "hint": "默认关闭以简化首次配置;开启前请先配置可用的 embedding 模型。关闭后 A_Memorix 不会参与长期记忆写入、检索与运维。",
"choices": null
}
}
diff --git a/src/A_memorix/core/runtime/__init__.py b/src/A_memorix/core/runtime/__init__.py
index eece6d21..fa6ce425 100644
--- a/src/A_memorix/core/runtime/__init__.py
+++ b/src/A_memorix/core/runtime/__init__.py
@@ -1,11 +1,10 @@
"""SDK runtime exports for A_Memorix."""
-from .search_runtime_initializer import (
- SearchRuntimeBundle,
- SearchRuntimeInitializer,
- build_search_runtime,
-)
-from .sdk_memory_kernel import KernelSearchRequest, SDKMemoryKernel
+from __future__ import annotations
+
+from typing import Any
+
+from .search_runtime_initializer import SearchRuntimeBundle, SearchRuntimeInitializer, build_search_runtime
__all__ = [
"SearchRuntimeBundle",
@@ -14,3 +13,14 @@ __all__ = [
"KernelSearchRequest",
"SDKMemoryKernel",
]
+
+
+def __getattr__(name: str) -> Any:
+ if name in {"KernelSearchRequest", "SDKMemoryKernel"}:
+ from .sdk_memory_kernel import KernelSearchRequest, SDKMemoryKernel
+
+ return {
+ "KernelSearchRequest": KernelSearchRequest,
+ "SDKMemoryKernel": SDKMemoryKernel,
+ }[name]
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
diff --git a/src/A_memorix/core/utils/web_import_manager.py b/src/A_memorix/core/utils/web_import_manager.py
index 1ee93418..a4db5c54 100644
--- a/src/A_memorix/core/utils/web_import_manager.py
+++ b/src/A_memorix/core/utils/web_import_manager.py
@@ -3259,7 +3259,6 @@ class ImportTaskManager:
for task_name in [
"lpmm_entity_extract",
"lpmm_rdf_build",
- "embedding",
"replyer",
"utils",
"planner",
diff --git a/src/A_memorix/host_service.py b/src/A_memorix/host_service.py
index 8b05127d..5d744f41 100644
--- a/src/A_memorix/host_service.py
+++ b/src/A_memorix/host_service.py
@@ -4,20 +4,35 @@ import asyncio
import json
from datetime import datetime
from pathlib import Path
-from typing import Any, Dict, Optional
+from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence
import tomlkit
from src.common.logger import get_logger
-from src.webui.utils.toml_utils import save_toml_with_format
+from src.config.official_configs import AMemorixConfig
+from src.webui.utils.toml_utils import _update_toml_doc
-from .core.runtime.sdk_memory_kernel import KernelSearchRequest, SDKMemoryKernel
-from .paths import config_path, repo_root, schema_path
+from .paths import repo_root, schema_path
from .runtime_registry import set_runtime_kernel
+if TYPE_CHECKING:
+ from .core.runtime.sdk_memory_kernel import SDKMemoryKernel
+
logger = get_logger("a_memorix.host_service")
+def _get_config_manager():
+ from src.config.config import config_manager
+
+ return config_manager
+
+
+def _get_bot_config_path() -> Path:
+ from src.config.config import BOT_CONFIG_PATH
+
+ return BOT_CONFIG_PATH
+
+
def _to_builtin_data(obj: Any) -> Any:
if hasattr(obj, "unwrap"):
try:
@@ -46,8 +61,12 @@ class AMemorixHostService:
self._lock = asyncio.Lock()
self._kernel: Optional[SDKMemoryKernel] = None
self._config_cache: Dict[str, Any] | None = None
+ self._reload_callback_registered = False
async def start(self) -> None:
+ if not self.is_enabled():
+ logger.info("A_Memorix 未启用,跳过长期记忆运行时初始化")
+ return
await self._ensure_kernel()
async def stop(self) -> None:
@@ -57,12 +76,16 @@ class AMemorixHostService:
async def reload(self) -> None:
async with self._lock:
await self._shutdown_locked()
- self._config_cache = self._read_config()
+ self._config_cache = None
+ config = self._read_config()
- await self._ensure_kernel()
+ if self._is_enabled_config(config):
+ await self._ensure_kernel()
+ else:
+ logger.info("A_Memorix 配置为未启用,运行时保持关闭")
def get_config_path(self) -> Path:
- return config_path()
+ return _get_bot_config_path()
def get_schema_path(self) -> Path:
return schema_path()
@@ -88,54 +111,28 @@ class AMemorixHostService:
def get_config(self) -> Dict[str, Any]:
return dict(self._read_config())
+ def is_enabled(self) -> bool:
+ return self._is_enabled_config(self._read_config())
+
+ @staticmethod
+ def _is_enabled_config(config: Dict[str, Any]) -> bool:
+ plugin_config = config.get("plugin") if isinstance(config, dict) else None
+ if not isinstance(plugin_config, dict):
+ return True
+ return bool(plugin_config.get("enabled", True))
+
def _build_default_config(self) -> Dict[str, Any]:
- schema = self.get_config_schema()
- sections = schema.get("sections") if isinstance(schema, dict) else None
- if not isinstance(sections, dict):
- return {}
-
- defaults: Dict[str, Any] = {}
- for section_name, section_payload in sections.items():
- if not isinstance(section_payload, dict):
- continue
- fields = section_payload.get("fields")
- if not isinstance(fields, dict):
- continue
-
- section_parts = [part for part in str(section_name or "").split(".") if part]
- if not section_parts:
- continue
-
- section_target: Dict[str, Any] = defaults
- for part in section_parts:
- nested = section_target.get(part)
- if not isinstance(nested, dict):
- nested = {}
- section_target[part] = nested
- section_target = nested
-
- for field_name, field_payload in fields.items():
- if not isinstance(field_payload, dict) or "default" not in field_payload:
- continue
- section_target[str(field_name)] = _to_builtin_data(field_payload.get("default"))
-
- return defaults
+ return self._config_model_to_runtime_dict(AMemorixConfig())
def get_raw_config_with_meta(self) -> Dict[str, Any]:
- path = self.get_config_path()
- if path.exists():
- return {
- "config": path.read_text(encoding="utf-8"),
- "exists": True,
- "using_default": False,
- }
-
+ config = self.get_config()
default_config = self._build_default_config()
- default_raw = tomlkit.dumps(default_config) if default_config else ""
+ raw_doc = tomlkit.document()
+ raw_doc.add("a_memorix", config)
return {
- "config": default_raw,
- "exists": False,
- "using_default": True,
+ "config": tomlkit.dumps(raw_doc),
+ "exists": self.get_config_path().exists(),
+ "using_default": config == default_config,
}
def get_raw_config(self) -> str:
@@ -143,12 +140,10 @@ class AMemorixHostService:
return str(payload.get("config", "") or "")
async def update_raw_config(self, raw_config: str) -> Dict[str, Any]:
- tomlkit.loads(raw_config)
- path = self.get_config_path()
- path.parent.mkdir(parents=True, exist_ok=True)
- backup_path = _backup_config_file(path)
- path.write_text(raw_config, encoding="utf-8")
- await self.reload()
+ loaded = tomlkit.loads(raw_config)
+ raw_payload = _to_builtin_data(loaded) if isinstance(loaded, dict) else {}
+ config_payload = raw_payload.get("a_memorix") if isinstance(raw_payload.get("a_memorix"), dict) else raw_payload
+ path, backup_path = await self._write_config_to_bot_config(config_payload)
return {
"success": True,
"message": "配置已保存",
@@ -157,11 +152,7 @@ class AMemorixHostService:
}
async def update_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
- path = self.get_config_path()
- path.parent.mkdir(parents=True, exist_ok=True)
- backup_path = _backup_config_file(path)
- save_toml_with_format(config, str(path), preserve_comments=True)
- await self.reload()
+ path, backup_path = await self._write_config_to_bot_config(config)
return {
"success": True,
"message": "配置已保存",
@@ -172,9 +163,13 @@ class AMemorixHostService:
async def invoke(self, component_name: str, args: Dict[str, Any] | None = None, *, timeout_ms: int = 30000) -> Any:
del timeout_ms
payload = args or {}
+ if not self.is_enabled():
+ return self._disabled_response(component_name)
kernel = await self._ensure_kernel()
if component_name == "search_memory":
+ from .core.runtime.sdk_memory_kernel import KernelSearchRequest
+
return await kernel.search_memory(
KernelSearchRequest(
query=str(payload.get("query", "") or ""),
@@ -278,7 +273,11 @@ class AMemorixHostService:
async def _ensure_kernel(self) -> SDKMemoryKernel:
async with self._lock:
if self._kernel is None:
+ from .core.runtime.sdk_memory_kernel import SDKMemoryKernel
+
config = self._read_config()
+ if not self._is_enabled_config(config):
+ raise RuntimeError("A_Memorix 未启用")
kernel = SDKMemoryKernel(plugin_root=repo_root(), config=config)
try:
await kernel.initialize()
@@ -293,24 +292,149 @@ class AMemorixHostService:
if self._config_cache is not None:
return dict(self._config_cache)
- path = self.get_config_path()
- if not path.exists():
- defaults = self._build_default_config()
- self._config_cache = defaults
- return dict(defaults)
-
try:
- with path.open("r", encoding="utf-8") as handle:
- loaded = tomlkit.load(handle)
+ config_model = _get_config_manager().get_global_config().a_memorix
except Exception as exc:
- logger.warning("读取 A_Memorix 配置失败 %s: %s", path, exc)
+ logger.warning("读取 A_Memorix 主配置失败,使用默认值: %s", exc)
defaults = self._build_default_config()
self._config_cache = defaults
return dict(defaults)
- self._config_cache = _to_builtin_data(loaded) if isinstance(loaded, dict) else {}
+ self._config_cache = self._config_model_to_runtime_dict(config_model)
return dict(self._config_cache)
+ @staticmethod
+ def _config_model_to_runtime_dict(config_model: AMemorixConfig) -> Dict[str, Any]:
+ payload = config_model.model_dump(mode="json")
+ web_config = payload.get("web")
+ if isinstance(web_config, dict) and "import_config" in web_config:
+ web_config["import"] = web_config.pop("import_config")
+ return _to_builtin_data(payload) if isinstance(payload, dict) else {}
+
+ @staticmethod
+ def _runtime_dict_to_bot_config_dict(config: Dict[str, Any]) -> Dict[str, Any]:
+ payload = _to_builtin_data(config)
+ if not isinstance(payload, dict):
+ return {}
+ web_config = payload.get("web")
+ if isinstance(web_config, dict) and "import_config" in web_config and "import" not in web_config:
+ web_config["import"] = web_config.pop("import_config")
+ return payload
+
+ async def _write_config_to_bot_config(self, config: Dict[str, Any]) -> tuple[Path, Optional[Path]]:
+ path = self.get_config_path()
+ path.parent.mkdir(parents=True, exist_ok=True)
+ backup_path = _backup_config_file(path)
+ if path.exists():
+ with path.open("r", encoding="utf-8") as handle:
+ doc = tomlkit.load(handle)
+ else:
+ doc = tomlkit.document()
+
+ bot_config_payload = self._runtime_dict_to_bot_config_dict(config)
+ current = doc.get("a_memorix")
+ if isinstance(current, dict):
+ _update_toml_doc(current, bot_config_payload)
+ else:
+ doc["a_memorix"] = bot_config_payload
+
+ with path.open("w", encoding="utf-8") as handle:
+ tomlkit.dump(doc, handle)
+
+ await _get_config_manager().reload_config(changed_scopes=("bot",))
+ if not self._reload_callback_registered:
+ await self.reload()
+ return path, backup_path
+
+ def register_config_reload_callback(self) -> None:
+ if self._reload_callback_registered:
+ return
+ _get_config_manager().register_reload_callback(self.on_config_reload)
+ self._reload_callback_registered = True
+
+ async def on_config_reload(self, changed_scopes: Sequence[str] | None = None) -> None:
+ normalized = {str(scope or "").strip().lower() for scope in (changed_scopes or [])}
+ if normalized and "bot" not in normalized:
+ return
+ await self.reload()
+
+ @staticmethod
+ def _disabled_response(component_name: str) -> Dict[str, Any]:
+ reason = "a_memorix_disabled"
+ message = "A_Memorix 未启用,请在长期记忆配置中开启后再使用。"
+
+ if component_name == "search_memory":
+ return {
+ "success": True,
+ "disabled": True,
+ "reason": reason,
+ "summary": "",
+ "hits": [],
+ "filtered": False,
+ }
+
+ if component_name in {"ingest_summary", "ingest_text"}:
+ return {
+ "success": True,
+ "disabled": True,
+ "reason": reason,
+ "stored_ids": [],
+ "skipped_ids": [reason],
+ "detail": reason,
+ }
+
+ if component_name == "get_person_profile":
+ return {
+ "success": True,
+ "disabled": True,
+ "reason": reason,
+ "summary": "",
+ "traits": [],
+ "evidence": [],
+ }
+
+ if component_name == "memory_stats":
+ return {
+ "success": True,
+ "enabled": False,
+ "disabled": True,
+ "reason": reason,
+ "message": message,
+ "paragraph_count": 0,
+ "relation_count": 0,
+ "episode_count": 0,
+ }
+
+ if component_name == "memory_runtime_admin":
+ return {
+ "success": True,
+ "enabled": False,
+ "disabled": True,
+ "reason": reason,
+ "message": message,
+ "runtime_ready": False,
+ "embedding_degraded": False,
+ "embedding_dimension": 0,
+ "auto_save": False,
+ "data_dir": "",
+ }
+
+ if component_name == "enqueue_feedback_task":
+ return {
+ "success": True,
+ "queued": False,
+ "disabled": True,
+ "reason": reason,
+ }
+
+ return {
+ "success": False,
+ "enabled": False,
+ "disabled": True,
+ "reason": reason,
+ "error": message,
+ }
+
async def _shutdown_locked(self) -> None:
if self._kernel is None:
return
diff --git a/src/config/config.py b/src/config/config.py
index dcd5e7eb..6cf3ac08 100644
--- a/src/config/config.py
+++ b/src/config/config.py
@@ -16,6 +16,7 @@ from .file_watcher import FileChange, FileWatcher
from .legacy_migration import migrate_legacy_bind_env_to_bot_config_dict, try_migrate_legacy_bot_config_dict
from .model_configs import APIProvider, ModelInfo, ModelTaskConfig
from .official_configs import (
+ AMemorixConfig,
BotConfig,
ChatConfig,
ChineseTypoConfig,
@@ -55,9 +56,10 @@ CONFIG_DIR: Path = PROJECT_ROOT / "config"
BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute()
MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute()
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
+A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0"
-CONFIG_VERSION: str = "8.9.20"
-MODEL_CONFIG_VERSION: str = "1.14.3"
+CONFIG_VERSION: str = "8.10.1"
+MODEL_CONFIG_VERSION: str = "1.14.6"
logger = get_logger("config")
@@ -86,6 +88,9 @@ class Config(ConfigBase):
memory: MemoryConfig = Field(default_factory=MemoryConfig)
"""记忆配置类"""
+ a_memorix: AMemorixConfig = Field(default_factory=AMemorixConfig)
+ """A_Memorix 长期记忆子系统配置"""
+
message_receive: MessageReceiveConfig = Field(default_factory=MessageReceiveConfig)
"""消息接收配置类"""
@@ -176,9 +181,50 @@ class ModelConfig(ConfigBase):
return super().model_post_init(context)
+def _normalize_a_memorix_legacy_config(config_data: dict[str, Any]) -> dict[str, Any]:
+ normalized = copy.deepcopy(config_data)
+ web_config = normalized.get("web")
+ if isinstance(web_config, dict) and "import" in web_config and "import_config" not in web_config:
+ web_config["import_config"] = web_config.pop("import")
+ return normalized
+
+
+def _migrate_legacy_a_memorix_config(config_data: dict[str, Any]) -> tuple[dict[str, Any], bool]:
+ if isinstance(config_data.get("a_memorix"), dict):
+ return config_data, False
+ if not A_MEMORIX_LEGACY_CONFIG_PATH.exists():
+ return config_data, False
+
+ try:
+ with A_MEMORIX_LEGACY_CONFIG_PATH.open("r", encoding="utf-8") as handle:
+ legacy_data = tomlkit.load(handle).unwrap()
+ except Exception as exc:
+ logger.warning(f"读取旧版 A_Memorix 配置失败,已使用主配置默认值: {A_MEMORIX_LEGACY_CONFIG_PATH},原因: {exc}")
+ return config_data, False
+
+ if not isinstance(legacy_data, dict):
+ logger.warning(f"旧版 A_Memorix 配置内容无效,已使用主配置默认值: {A_MEMORIX_LEGACY_CONFIG_PATH}")
+ return config_data, False
+
+ migrated_data = copy.deepcopy(config_data)
+ migrated_data["a_memorix"] = _normalize_a_memorix_legacy_config(legacy_data)
+ logger.warning(f"检测到旧版 A_Memorix 配置,已迁移到 bot_config.toml 的 [a_memorix]: {A_MEMORIX_LEGACY_CONFIG_PATH}")
+ return migrated_data, True
+
+
+def _normalize_loaded_bot_config_dict(config_data: dict[str, Any]) -> dict[str, Any]:
+ normalized = copy.deepcopy(config_data)
+ a_memorix_config = normalized.get("a_memorix")
+ if isinstance(a_memorix_config, dict):
+ normalized["a_memorix"] = _normalize_a_memorix_legacy_config(a_memorix_config)
+ return normalized
+
+
class ConfigManager:
"""总配置管理类"""
+ VLM_NOT_CONFIGURED_WARNING: str = "未配置视觉识图模型,部分图片理解可能受限,请在webui或model_config中配置"
+
def __init__(self):
self.bot_config_path: Path = BOT_CONFIG_PATH
self.model_config_path: Path = MODEL_CONFIG_PATH
@@ -205,8 +251,15 @@ class ConfigManager:
)
if global_updated or model_updated:
sys.exit(0) # 配置已自动升级,退出一次让用户确认新配置后再启动
+ self._warn_if_vlm_not_configured(self.model_config)
logger.info(t("config.loaded"))
+ @classmethod
+ def _warn_if_vlm_not_configured(cls, model_config: ModelConfig) -> None:
+ if any(model_name.strip() for model_name in model_config.model_task_config.vlm.model_list):
+ return
+ logger.warning(cls.VLM_NOT_CONFIGURED_WARNING)
+
def load_global_config(self) -> Config:
config, updated = load_config_from_file(Config, self.bot_config_path, CONFIG_VERSION)
if updated:
@@ -498,6 +551,7 @@ def load_config_from_file(
raise TypeError(t("config.invalid_inner_version"))
old_ver: str = inner_version
env_migration_applied: bool = False
+ a_memorix_migration_applied: bool = False
config_data.remove("inner") # 移除 inner 部分,避免干扰后续处理
config_data = config_data.unwrap() # 转换为普通字典,方便后续处理
if config_path.name == "bot_config.toml" and config_class.__name__ == "Config":
@@ -510,6 +564,8 @@ def load_config_from_file(
if legacy_migration.migrated:
logger.warning(t("config.legacy_migrated", reason=legacy_migration.reason))
config_data = legacy_migration.data
+ config_data, a_memorix_migration_applied = _migrate_legacy_a_memorix_config(config_data)
+ config_data = _normalize_loaded_bot_config_dict(config_data)
# 保留一份“干净”的原始数据副本,避免第一次 from_dict 过程中对 dict 的就地修改
original_data: dict[str, Any] = copy.deepcopy(config_data)
try:
@@ -529,7 +585,7 @@ def load_config_from_file(
raise e
else:
raise e
- if compare_versions(old_ver, new_ver) or env_migration_applied:
+ if compare_versions(old_ver, new_ver) or env_migration_applied or a_memorix_migration_applied:
output_config_changes(attribute_data, logger, old_ver, new_ver, config_path.name)
write_config_to_file(target_config, config_path, new_ver, override_repr)
if env_migration_applied:
@@ -578,6 +634,14 @@ def write_config_to_file(
else:
raise TypeError(t("config.write_unsupported_type"))
+ if isinstance(config, Config):
+ try:
+ a_memorix_web = full_config_data["a_memorix"]["web"]
+ if "import_config" in a_memorix_web and "import" not in a_memorix_web:
+ a_memorix_web["import"] = a_memorix_web.pop("import_config")
+ except Exception:
+ logger.debug("A_Memorix 配置写出时转换 web.import_config 失败", exc_info=True)
+
# 备份旧文件
if config_path.exists():
backup_root = config_path.parent / "old"
diff --git a/src/config/default_model_config.py b/src/config/default_model_config.py
index 7ae93a43..e74653ae 100644
--- a/src/config/default_model_config.py
+++ b/src/config/default_model_config.py
@@ -11,26 +11,29 @@ DEFAULT_PROVIDER_TEMPLATES: list[dict[str, Any]] = [
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"api_key": "your-api-key",
"auth_type": OpenAICompatibleAuthType.BEARER.value,
+ "max_retry": 3,
+ "timeout": 100,
+ "retry_interval": 8,
}
]
DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
"utils": {
- "model_list": ["qwen3.5-35b-a3b-nonthink"],
+ "model_list": ["deepseek-v4-flash"],
"max_tokens": 4096,
"temperature": 0.5,
"slow_threshold": 15.0,
"selection_strategy": "random",
},
"replyer": {
- "model_list": ["ali-glm-5"],
+ "model_list": ["deepseek-v4-pro-think", "deepseek-v4-pro-nonthink"],
"max_tokens": 4096,
"temperature": 1,
"slow_threshold": 120.0,
"selection_strategy": "random",
},
"planner": {
- "model_list": ["qwen3.5-35b-a3b", "qwen3.5-122b-a10b", "qwen3.5-flash"],
+ "model_list": ["deepseek-v4-flash"],
"max_tokens": 8000,
"temperature": 0.7,
"slow_threshold": 12.0,
@@ -61,40 +64,30 @@ DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
DEFAULT_MODEL_TEMPLATES: list[dict[str, Any]] = [
{
- "model_identifier": "glm-5",
- "name": "ali-glm-5",
+ "model_identifier": "deepseek-v4-pro",
+ "name": "deepseek-v4-pro-think",
"api_provider": "BaiLian",
- "price_in": 3.0,
- "price_out": 14.0,
- "temperature": 1.0,
+ "price_in": 12.0,
+ "price_out": 24.0,
"visual": False,
- "extra_params": {"enable_thinking": False},
+ "extra_params": {"enable_thinking": "True"},
},
{
- "model_identifier": "qwen3.5-122b-a10b",
- "name": "qwen3.5-122b-a10b",
+ "model_identifier": "deepseek-v4-pro",
+ "name": "deepseek-v4-pro-nonthink",
"api_provider": "BaiLian",
- "price_in": 0.8,
- "price_out": 6.4,
- "visual": True,
+ "price_in": 12.0,
+ "price_out": 24.0,
+ "visual": False,
"extra_params": {"enable_thinking": "false"},
},
{
- "model_identifier": "qwen3.5-35b-a3b",
- "name": "qwen3.5-35b-a3b",
+ "model_identifier": "deepseek-v4-flash",
+ "name": "deepseek-v4-flash",
"api_provider": "BaiLian",
- "price_in": 0.4,
- "price_out": 3.2,
- "visual": True,
- "extra_params": {},
- },
- {
- "model_identifier": "qwen3.5-35b-a3b",
- "name": "qwen3.5-35b-a3b-nonthink",
- "api_provider": "BaiLian",
- "price_in": 0.4,
- "price_out": 3.2,
- "visual": True,
+ "price_in": 1.0,
+ "price_out": 2.0,
+ "visual": False,
"extra_params": {"enable_thinking": "false"},
},
{
diff --git a/src/config/model_configs.py b/src/config/model_configs.py
index 08ee9c6d..447eadf2 100644
--- a/src/config/model_configs.py
+++ b/src/config/model_configs.py
@@ -172,7 +172,7 @@ class APIProvider(ConfigBase):
"""工具参数解析模式。可选值:`auto`、`strict`、`repair`、`double_decode`。"""
max_retry: int = Field(
- default=2,
+ default=3,
ge=0,
json_schema_extra={
"x-widget": "input",
@@ -182,7 +182,7 @@ class APIProvider(ConfigBase):
"""最大重试次数 (单个模型API调用失败, 最多重试的次数)"""
timeout: int = Field(
- default=10,
+ default=60,
ge=1,
json_schema_extra={
"x-widget": "input",
@@ -193,7 +193,7 @@ class APIProvider(ConfigBase):
"""API调用的超时时长 (超过这个时长, 本次请求将被视为"请求超时", 单位: 秒)"""
retry_interval: int = Field(
- default=10,
+ default=5,
ge=1,
json_schema_extra={
"x-widget": "input",
@@ -343,7 +343,12 @@ class ModelInfo(ConfigBase):
"x-icon": "sliders",
},
)
- """额外参数 (用于API调用时的额外配置)"""
+ """额外参数 (用于API调用时的额外配置)。
+ OpenAI 兼容客户端会将该字典拆分为请求附加项:headers 会作为请求头传入,query 会作为 URL 查询参数传入,body 会合并到请求体。
+ 未放入 headers/query/body 的普通键,也会作为请求体额外字段传入;例如 {enable_thinking = "false"} 会传为请求体字段 enable_thinking。
+ 该字段不会以 extra_params 这个键整体发送给模型服务商。
+ temperature 和 max_tokens 也可写在此处作为模型级默认值,但更推荐使用同名独立配置项。
+ Gemini 客户端会按自身支持的字段筛选并映射到 GenerateContentConfig、EmbedContentConfig 或音频请求配置中。"""
def model_post_init(self, context: Any = None):
if not self.model_identifier:
diff --git a/src/config/official_configs.py b/src/config/official_configs.py
index ba11426a..2f22e453 100644
--- a/src/config/official_configs.py
+++ b/src/config/official_configs.py
@@ -63,6 +63,7 @@ class BotConfig(ConfigBase):
json_schema_extra={
"x-widget": "custom",
"x-icon": "tags",
+ "advanced": True,
},
)
"""别名列表"""
@@ -101,6 +102,7 @@ class PersonalityConfig(ConfigBase):
"带点翻译腔,但不要太长",
],
json_schema_extra={
+ "advanced": True,
"x-widget": "custom",
"x-icon": "list",
},
@@ -108,10 +110,11 @@ class PersonalityConfig(ConfigBase):
"""可选的多种表达风格列表,当配置不为空时可按概率随机替换 reply_style"""
multiple_probability: float = Field(
- default=0.2,
+ default=0,
ge=0,
le=1,
json_schema_extra={
+ "advanced": True,
"x-widget": "slider",
"x-icon": "percent",
"step": 0.1,
@@ -405,6 +408,7 @@ class MemoryConfig(ConfigBase):
)
"""_wrap_全局记忆黑名单,当启用全局记忆时,不将特定聊天流纳入检索"""
+
enable_memory_query_tool: bool = Field(
default=True,
json_schema_extra={
@@ -469,6 +473,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "switch",
"x-icon": "message-circle-warning",
+ "advanced": True,
},
)
"""是否启用反馈驱动的延迟记忆纠错任务"""
@@ -479,6 +484,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "input",
"x-icon": "clock-4",
+ "advanced": True,
},
)
"""反馈窗口时长(小时),以 query_memory 执行时间为起点"""
@@ -489,6 +495,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "input",
"x-icon": "timer",
+ "advanced": True,
},
)
"""反馈纠错定时任务轮询间隔(分钟)"""
@@ -500,6 +507,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "input",
"x-icon": "list-ordered",
+ "advanced": True,
},
)
"""反馈纠错每轮最大处理任务数"""
@@ -512,6 +520,7 @@ class MemoryConfig(ConfigBase):
"x-widget": "slider",
"x-icon": "gauge",
"step": 0.01,
+ "advanced": True,
},
)
"""自动应用纠错动作的最低置信度阈值"""
@@ -523,6 +532,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "input",
"x-icon": "messages-square",
+ "advanced": True,
},
)
"""每个纠错任务最多使用的窗口内用户反馈消息数"""
@@ -532,6 +542,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "switch",
"x-icon": "filter",
+ "advanced": True,
},
)
"""是否启用纠错前置预筛(用于减少不必要的模型调用)"""
@@ -541,6 +552,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "switch",
"x-icon": "sticky-note",
+ "advanced": True,
},
)
"""是否为受影响 paragraph 写入已纠正旧事实标记"""
@@ -550,6 +562,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "switch",
"x-icon": "eye-off",
+ "advanced": True,
},
)
"""是否在用户侧查询中硬过滤带有 stale 标记的 paragraph"""
@@ -559,6 +572,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "switch",
"x-icon": "user-round-search",
+ "advanced": True,
},
)
"""是否在反馈纠错后将受影响人物画像加入刷新队列"""
@@ -568,6 +582,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "switch",
"x-icon": "refresh-ccw",
+ "advanced": True,
},
)
"""人物画像处于脏队列时,读取是否强制刷新而不直接复用旧快照"""
@@ -577,6 +592,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "switch",
"x-icon": "clapperboard",
+ "advanced": True,
},
)
"""是否在反馈纠错后将受影响 source 加入 episode 重建队列"""
@@ -586,6 +602,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "switch",
"x-icon": "ban",
+ "advanced": True,
},
)
"""episode source 处于重建队列时,是否对用户侧查询做屏蔽"""
@@ -596,6 +613,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "input",
"x-icon": "repeat",
+ "advanced": True,
},
)
"""反馈纠错二阶段一致性后台协调任务轮询间隔(分钟)"""
@@ -607,6 +625,7 @@ class MemoryConfig(ConfigBase):
json_schema_extra={
"x-widget": "input",
"x-icon": "list-restart",
+ "advanced": True,
},
)
"""反馈纠错二阶段一致性每轮处理 profile/episode 队列的批大小"""
@@ -649,6 +668,345 @@ class MemoryConfig(ConfigBase):
return super().model_post_init(context)
+class AMemorixPluginConfig(ConfigBase):
+ """A_Memorix 子系统状态"""
+
+ enabled: bool = Field(default=False)
+ """是否启用 A_Memorix"""
+
+
+class AMemorixStorageConfig(ConfigBase):
+ """A_Memorix 存储位置"""
+
+ data_dir: str = Field(default="data/a-memorix")
+ """数据目录"""
+
+
+class AMemorixEmbeddingFallbackConfig(ConfigBase):
+ """A_Memorix Embedding 回退"""
+
+ enabled: bool = Field(default=True)
+ """是否启用回退机制"""
+
+ probe_interval_seconds: int = Field(default=180, ge=10)
+ """探测间隔秒数"""
+
+ allow_metadata_only_write: bool = Field(default=True)
+ """是否允许仅写入元数据"""
+
+
+class AMemorixParagraphVectorBackfillConfig(ConfigBase):
+ """A_Memorix 段落向量回填"""
+
+ enabled: bool = Field(default=True)
+ """是否启用回填任务"""
+
+ interval_seconds: int = Field(default=60, ge=5)
+ """回填轮询间隔"""
+
+ batch_size: int = Field(default=64, ge=1)
+ """单批回填数量"""
+
+ max_retry: int = Field(default=5, ge=0)
+ """最大重试次数"""
+
+
+class AMemorixEmbeddingConfig(ConfigBase):
+ """A_Memorix Embedding 配置"""
+
+ model_name: str = Field(default="auto")
+ """Embedding 模型选择"""
+
+ dimension: int = Field(default=1024, ge=1)
+ """向量维度"""
+
+ batch_size: int = Field(default=32, ge=1)
+ """单批请求大小"""
+
+ max_concurrent: int = Field(default=5, ge=1)
+ """最大并发数"""
+
+ enable_cache: bool = Field(default=False)
+ """是否启用缓存"""
+
+ quantization_type: Literal["int8"] = Field(default="int8")
+ """量化方式,当前 vNext 仅支持 int8(SQ8)"""
+
+ fallback: AMemorixEmbeddingFallbackConfig = Field(default_factory=AMemorixEmbeddingFallbackConfig)
+ """Embedding 回退配置"""
+
+ paragraph_vector_backfill: AMemorixParagraphVectorBackfillConfig = Field(
+ default_factory=AMemorixParagraphVectorBackfillConfig
+ )
+ """段落向量回填配置"""
+
+
+class AMemorixSparseRetrievalConfig(ConfigBase):
+ """A_Memorix 稀疏检索配置"""
+
+ enabled: bool = Field(default=True)
+ """是否启用稀疏检索"""
+
+ backend: Literal["fts5"] = Field(default="fts5")
+ """稀疏检索后端"""
+
+ mode: Literal["auto", "fallback_only", "hybrid"] = Field(default="auto")
+ """稀疏检索模式"""
+
+ tokenizer_mode: Literal["jieba", "mixed", "char_2gram"] = Field(default="jieba")
+ """分词模式"""
+
+ candidate_k: int = Field(default=80, ge=1)
+ """段落候选数"""
+
+ relation_candidate_k: int = Field(default=60, ge=1)
+ """关系候选数"""
+
+
+class AMemorixRetrievalConfig(ConfigBase):
+ """A_Memorix 检索配置"""
+
+ top_k_paragraphs: int = Field(default=20, ge=1)
+ """段落候选数"""
+
+ top_k_relations: int = Field(default=10, ge=1)
+ """关系候选数"""
+
+ top_k_final: int = Field(default=10, ge=1)
+ """最终返回条数"""
+
+ alpha: float = Field(default=0.5, ge=0.0, le=1.0)
+ """关系融合权重"""
+
+ enable_ppr: bool = Field(default=True)
+ """是否启用 PPR"""
+
+ ppr_alpha: float = Field(default=0.85, ge=0.0, le=1.0)
+ """PPR alpha"""
+
+ ppr_timeout_seconds: float = Field(default=1.5, ge=0.1)
+ """PPR 超时秒数"""
+
+ ppr_concurrency_limit: int = Field(default=4, ge=1)
+ """PPR 并发限制"""
+
+ enable_parallel: bool = Field(default=True)
+ """是否启用并行检索"""
+
+ sparse: AMemorixSparseRetrievalConfig = Field(default_factory=AMemorixSparseRetrievalConfig)
+ """稀疏检索配置"""
+
+
+class AMemorixThresholdConfig(ConfigBase):
+ """A_Memorix 阈值过滤配置"""
+
+ min_threshold: float = Field(default=0.3, ge=0.0, le=1.0)
+ """最小阈值"""
+
+ max_threshold: float = Field(default=0.95, ge=0.0, le=1.0)
+ """最大阈值"""
+
+ percentile: int = Field(default=75, ge=0, le=100)
+ """动态阈值百分位"""
+
+ min_results: int = Field(default=3, ge=1)
+ """最小保留条数"""
+
+ enable_auto_adjust: bool = Field(default=True)
+ """是否启用自动阈值调整"""
+
+
+class AMemorixFilterConfig(ConfigBase):
+ """A_Memorix 聊天过滤配置"""
+
+ enabled: bool = Field(default=True)
+ """是否启用聊天过滤"""
+
+ mode: Literal["blacklist", "whitelist"] = Field(default="blacklist")
+ """过滤模式"""
+
+ chats: list[str] = Field(default_factory=lambda: [])
+ """聊天流列表"""
+
+
+class AMemorixEpisodeConfig(ConfigBase):
+ """A_Memorix Episode 配置"""
+
+ enabled: bool = Field(default=True)
+ """是否启用 Episode"""
+
+ generation_enabled: bool = Field(default=True)
+ """是否启用自动生成"""
+
+ pending_batch_size: int = Field(default=20, ge=1)
+ """待处理批大小"""
+
+ pending_max_retry: int = Field(default=3, ge=0)
+ """待处理最大重试次数"""
+
+ max_paragraphs_per_call: int = Field(default=20, ge=1)
+ """单次最大段落数"""
+
+ max_chars_per_call: int = Field(default=6000, ge=100)
+ """单次最大字符数"""
+
+ source_time_window_hours: float = Field(default=24.0, ge=0.0)
+ """时间窗口小时数"""
+
+ segmentation_model: str = Field(default="auto")
+ """分段模型选择"""
+
+
+class AMemorixPersonProfileConfig(ConfigBase):
+ """A_Memorix 人物画像配置"""
+
+ enabled: bool = Field(default=True)
+ """是否启用画像"""
+
+ refresh_interval_minutes: int = Field(default=30, ge=1)
+ """刷新间隔分钟数"""
+
+ active_window_hours: float = Field(default=72.0, ge=1.0)
+ """活跃窗口小时数"""
+
+ max_refresh_per_cycle: int = Field(default=50, ge=1)
+ """单轮最大刷新数"""
+
+ top_k_evidence: int = Field(default=12, ge=1)
+ """证据条数"""
+
+
+class AMemorixMemoryEvolutionConfig(ConfigBase):
+ """A_Memorix 记忆演化配置"""
+
+ enabled: bool = Field(default=True)
+ """是否启用记忆演化"""
+
+ half_life_hours: float = Field(default=24.0, ge=0.1)
+ """半衰期小时数"""
+
+ prune_threshold: float = Field(default=0.1, ge=0.0, le=1.0)
+ """裁剪阈值"""
+
+ freeze_duration_hours: float = Field(default=24.0, ge=0.0)
+ """冻结时长小时数"""
+
+
+class AMemorixAdvancedConfig(ConfigBase):
+ """A_Memorix 高级运行时配置"""
+
+ enable_auto_save: bool = Field(default=True)
+ """是否启用自动保存"""
+
+ auto_save_interval_minutes: int = Field(default=5, ge=1)
+ """自动保存间隔"""
+
+ debug: bool = Field(default=False)
+ """是否启用调试"""
+
+
+class AMemorixWebImportConfig(ConfigBase):
+ """A_Memorix 导入中心配置"""
+
+ enabled: bool = Field(default=True)
+ """是否启用导入中心"""
+
+ max_queue_size: int = Field(default=20, ge=1)
+ """最大队列长度"""
+
+ max_files_per_task: int = Field(default=200, ge=1)
+ """单任务最大文件数"""
+
+ max_file_size_mb: int = Field(default=20, ge=1)
+ """单文件大小上限 MB"""
+
+ max_paste_chars: int = Field(default=200000, ge=100)
+ """粘贴字符数上限"""
+
+ default_file_concurrency: int = Field(default=2, ge=1)
+ """默认文件并发"""
+
+ default_chunk_concurrency: int = Field(default=4, ge=1)
+ """默认分块并发"""
+
+
+class AMemorixWebTuningConfig(ConfigBase):
+ """A_Memorix 调优中心配置"""
+
+ enabled: bool = Field(default=True)
+ """是否启用调优中心"""
+
+ max_queue_size: int = Field(default=8, ge=1)
+ """最大队列长度"""
+
+ poll_interval_ms: int = Field(default=1200, ge=200)
+ """轮询间隔毫秒数"""
+
+ default_intensity: Literal["quick", "standard", "deep"] = Field(default="standard")
+ """默认调优强度"""
+
+ default_objective: Literal["precision_priority", "balanced", "recall_priority"] = Field(
+ default="precision_priority"
+ )
+ """默认调优目标"""
+
+ default_top_k_eval: int = Field(default=20, ge=1)
+ """默认评估 Top-K"""
+
+ default_sample_size: int = Field(default=24, ge=1)
+ """默认样本数"""
+
+
+class AMemorixWebConfig(ConfigBase):
+ """A_Memorix Web 运维配置"""
+
+ import_config: AMemorixWebImportConfig = Field(default_factory=AMemorixWebImportConfig)
+ """导入中心配置"""
+
+ tuning: AMemorixWebTuningConfig = Field(default_factory=AMemorixWebTuningConfig)
+ """调优中心配置"""
+
+
+class AMemorixConfig(ConfigBase):
+ """A_Memorix 长期记忆子系统配置"""
+
+ __ui_label__ = "长期记忆"
+ __ui_icon__ = "brain"
+
+ plugin: AMemorixPluginConfig = Field(default_factory=AMemorixPluginConfig)
+ """子系统状态"""
+
+ storage: AMemorixStorageConfig = Field(default_factory=AMemorixStorageConfig)
+ """存储位置"""
+
+ embedding: AMemorixEmbeddingConfig = Field(default_factory=AMemorixEmbeddingConfig)
+ """Embedding 配置"""
+
+ retrieval: AMemorixRetrievalConfig = Field(default_factory=AMemorixRetrievalConfig)
+ """检索配置"""
+
+ threshold: AMemorixThresholdConfig = Field(default_factory=AMemorixThresholdConfig)
+ """阈值过滤配置"""
+
+ filter: AMemorixFilterConfig = Field(default_factory=AMemorixFilterConfig)
+ """聊天过滤配置"""
+
+ episode: AMemorixEpisodeConfig = Field(default_factory=AMemorixEpisodeConfig)
+ """Episode 配置"""
+
+ person_profile: AMemorixPersonProfileConfig = Field(default_factory=AMemorixPersonProfileConfig)
+ """人物画像配置"""
+
+ memory: AMemorixMemoryEvolutionConfig = Field(default_factory=AMemorixMemoryEvolutionConfig)
+ """记忆演化配置"""
+
+ advanced: AMemorixAdvancedConfig = Field(default_factory=AMemorixAdvancedConfig)
+ """高级运行时配置"""
+
+ web: AMemorixWebConfig = Field(default_factory=AMemorixWebConfig)
+ """Web 运维配置"""
+
+
class LearningItem(ConfigBase):
platform: str = Field(
default="",
@@ -875,6 +1233,7 @@ class EmojiConfig(ConfigBase):
content_filtration: bool = Field(
default=False,
json_schema_extra={
+ "advanced": True,
"x-widget": "switch",
"x-icon": "filter",
},
@@ -884,6 +1243,7 @@ class EmojiConfig(ConfigBase):
filtration_prompt: str = Field(
default="符合公序良俗",
json_schema_extra={
+ "advanced": True,
"x-widget": "input",
"x-icon": "shield",
},
@@ -1006,6 +1366,7 @@ class ChineseTypoConfig(ConfigBase):
"x-widget": "slider",
"x-icon": "percent",
"step": 0.01,
+ "advanced": True,
},
)
"""单字替换概率"""
@@ -1015,6 +1376,7 @@ class ChineseTypoConfig(ConfigBase):
json_schema_extra={
"x-widget": "input",
"x-icon": "hash",
+ "advanced": True,
},
)
"""最小字频阈值"""
@@ -1027,6 +1389,7 @@ class ChineseTypoConfig(ConfigBase):
"x-widget": "slider",
"x-icon": "percent",
"step": 0.1,
+ "advanced": True,
},
)
"""声调错误概率"""
@@ -1039,6 +1402,7 @@ class ChineseTypoConfig(ConfigBase):
"x-widget": "slider",
"x-icon": "percent",
"step": 0.001,
+ "advanced": True,
},
)
"""整词替换概率"""
diff --git a/src/main.py b/src/main.py
index 1e184b28..6c98bc80 100644
--- a/src/main.py
+++ b/src/main.py
@@ -80,6 +80,7 @@ class MainSystem:
init_start_time = time.time()
await config_manager.start_file_watcher()
+ a_memorix_host_service.register_config_reload_callback()
# 添加在线时间统计任务
await async_task_manager.add_task(OnlineTimeRecordTask())
diff --git a/src/maisaka/monitor_events.py b/src/maisaka/monitor_events.py
index ffe85e11..d637c19a 100644
--- a/src/maisaka/monitor_events.py
+++ b/src/maisaka/monitor_events.py
@@ -143,6 +143,33 @@ def _serialize_messages(messages: List[Any]) -> List[Dict[str, Any]]:
return [_serialize_message(message) for message in messages]
+def _enrich_session_identity(data: Dict[str, Any]) -> Dict[str, Any]:
+ """为监控事件补充会话展示所需的群/用户标识。"""
+
+ session_id = data.get("session_id")
+ if not session_id:
+ return data
+
+ try:
+ from src.chat.message_receive.chat_manager import chat_manager
+
+ chat_stream = chat_manager.get_session_by_session_id(str(session_id))
+ except Exception:
+ return data
+
+ if chat_stream is None:
+ return data
+
+ session_name = chat_manager.get_session_name(str(session_id))
+ if session_name:
+ data.setdefault("session_name", session_name)
+ data.setdefault("is_group_chat", chat_stream.is_group_session)
+ data.setdefault("group_id", chat_stream.group_id)
+ data.setdefault("user_id", chat_stream.user_id)
+ data.setdefault("platform", chat_stream.platform)
+ return data
+
+
def _serialize_tool_results(tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""标准化最终 planner 卡中的工具结果列表。"""
@@ -266,6 +293,7 @@ async def _broadcast(event: str, data: Dict[str, Any]) -> None:
try:
from src.webui.routers.websocket.manager import websocket_manager
+ data = _enrich_session_identity(data)
subscription_key = f"{MONITOR_DOMAIN}:{MONITOR_TOPIC}"
total_connections = len(websocket_manager.connections)
subscriber_count = sum(
@@ -291,12 +319,24 @@ async def _broadcast(event: str, data: Dict[str, Any]) -> None:
logger.warning(f"MaiSaka 监控事件广播失败: {exc}", exc_info=True)
-async def emit_session_start(session_id: str, session_name: str) -> None:
+async def emit_session_start(
+ session_id: str,
+ session_name: str,
+ *,
+ is_group_chat: bool,
+ group_id: Optional[str],
+ user_id: Optional[str],
+ platform: str,
+) -> None:
"""广播会话开始事件。"""
await _broadcast("session.start", {
"session_id": session_id,
"session_name": session_name,
+ "is_group_chat": is_group_chat,
+ "group_id": group_id,
+ "user_id": user_id,
+ "platform": platform,
"timestamp": time.time(),
})
diff --git a/src/maisaka/runtime.py b/src/maisaka/runtime.py
index 88c6e4a0..0d48e6bb 100644
--- a/src/maisaka/runtime.py
+++ b/src/maisaka/runtime.py
@@ -46,6 +46,7 @@ from .display.display_utils import build_tool_call_summary_lines, format_token_c
from .display.prompt_cli_renderer import PromptCLIVisualizer
from .display.stage_status_board import remove_stage_status, update_stage_status
from .history_utils import drop_leading_orphan_tool_results
+from .monitor_events import emit_session_start
from .reasoning_engine import MaisakaReasoningEngine
from .reply_effect import ReplyEffectTracker
from .reply_effect.image_utils import extract_visual_attachments_from_sequence
@@ -136,6 +137,7 @@ class MaisakaHeartFlowChatting:
self._jargon_miner = JargonMiner(session_id, session_name=session_name)
self._reasoning_engine = MaisakaReasoningEngine(self)
+ self._monitor_session_start_task: Optional[asyncio.Task[None]] = None
self._tool_registry = ToolRegistry()
self._reply_effect_tracker = ReplyEffectTracker(
session_id=self.session_id,
@@ -144,6 +146,24 @@ class MaisakaHeartFlowChatting:
judge_runner=self._run_reply_effect_judge,
)
self._register_tool_providers()
+ self._emit_monitor_session_start()
+
+ def _emit_monitor_session_start(self) -> None:
+ """向 WebUI 监控面板同步当前会话的展示标识。"""
+
+ try:
+ self._monitor_session_start_task = asyncio.create_task(
+ emit_session_start(
+ session_id=self.session_id,
+ session_name=self.session_name,
+ is_group_chat=self.chat_stream.is_group_session,
+ group_id=self.chat_stream.group_id,
+ user_id=self.chat_stream.user_id,
+ platform=self.chat_stream.platform,
+ )
+ )
+ except RuntimeError:
+ logger.debug("MaiSaka 监控会话开始事件未发送:当前没有运行中的事件循环")
@staticmethod
def _is_reply_effect_tracking_enabled() -> bool:
diff --git a/src/plugin_runtime/runner/manifest_validator.py b/src/plugin_runtime/runner/manifest_validator.py
index 92f0b315..de830fb9 100644
--- a/src/plugin_runtime/runner/manifest_validator.py
+++ b/src/plugin_runtime/runner/manifest_validator.py
@@ -24,7 +24,7 @@ from src.common.logger import get_logger
logger = get_logger("plugin_runtime.runner.manifest_validator")
_SEMVER_PATTERN = re.compile(r"^\d+\.\d+\.\d+$")
-_PLUGIN_ID_PATTERN = re.compile(r"^[a-z0-9]+(?:[.-][a-z0-9]+)+$")
+_PLUGIN_ID_PATTERN = re.compile(r"^[A-Za-z0-9_]+(?:[.-][A-Za-z0-9_]+)+$")
_PACKAGE_NAME_PATTERN = re.compile(r"^[A-Za-z0-9][A-Za-z0-9._-]*$")
_HTTP_URL_PATTERN = re.compile(r"^https?://.+$")
@@ -379,7 +379,7 @@ class PluginDependencyDefinition(_StrictManifestModel):
ValueError: 当 ID 不符合规则时抛出。
"""
if not _PLUGIN_ID_PATTERN.fullmatch(value):
- raise ValueError("必须使用小写字母/数字,并以点号或横线分隔,例如 github.author.plugin")
+ raise ValueError("必须使用字母/数字/下划线,并以点号或横线分隔,例如 github.author.plugin")
return value
@field_validator("version_spec")
@@ -548,7 +548,7 @@ class PluginManifest(_StrictManifestModel):
if not value:
raise ValueError("不能为空")
if info.field_name == "id" and not _PLUGIN_ID_PATTERN.fullmatch(value):
- raise ValueError("必须使用小写字母/数字,并以点号或横线分隔,例如 github.author.plugin")
+ raise ValueError("必须使用字母/数字/下划线,并以点号或横线分隔,例如 github.author.plugin")
return value
@field_validator("capabilities")
diff --git a/src/webui/app.py b/src/webui/app.py
index 54b1288b..6a0d5cf2 100644
--- a/src/webui/app.py
+++ b/src/webui/app.py
@@ -1,6 +1,7 @@
"""FastAPI 应用工厂 - 创建和配置 WebUI 应用实例"""
from importlib import import_module
+from os import getenv
from pathlib import Path
from typing import Any, Dict, Tuple
@@ -16,6 +17,7 @@ from src.common.logger import get_logger
logger = get_logger("webui.app")
_DASHBOARD_PACKAGE_NAME = "maibot-dashboard"
+_LOCAL_DASHBOARD_ENV = "MAIBOT_WEBUI_USE_LOCAL_DASHBOARD"
_MANUAL_INSTALL_COMMAND = f"pip install {_DASHBOARD_PACKAGE_NAME}"
@@ -36,6 +38,10 @@ def _get_project_root() -> Path:
return Path(__file__).resolve().parents[2]
+def _is_local_dashboard_enabled() -> bool:
+ return getenv(_LOCAL_DASHBOARD_ENV, "").strip().lower() in {"1", "true", "yes", "on"}
+
+
def _validate_static_path(static_path: Path | None) -> Tuple[str, Dict[str, Any]] | None:
if static_path is None:
return "startup.webui_static_dir_missing", {}
@@ -179,6 +185,16 @@ def _setup_static_files(app: FastAPI):
logger.warning(t("startup.webui_dashboard_package_hint", command=_MANUAL_INSTALL_COMMAND))
return
+ @app.get("/maibot_statistics.html", include_in_schema=False)
+ async def serve_statistics_report():
+ report_path = (_get_project_root() / "maibot_statistics.html").resolve()
+ if not report_path.exists() or not report_path.is_file():
+ raise HTTPException(status_code=404, detail=t("core.not_found"))
+
+ response = FileResponse(report_path, media_type="text/html")
+ response.headers["X-Robots-Tag"] = "noindex, nofollow, noarchive"
+ return response
+
@app.get("/{full_path:path}", include_in_schema=False)
async def serve_spa(full_path: str):
if not full_path or full_path == "/":
@@ -205,12 +221,10 @@ def _setup_static_files(app: FastAPI):
def _resolve_static_path() -> Path | None:
- # 临时仅允许使用已安装的 maibot-dashboard 包,不使用仓库本地 dashboard/dist。
- # 如需恢复本地回退逻辑,可取消下方注释。
- # base_dir = _get_project_root()
- # static_path = base_dir / "dashboard" / "dist"
- # if static_path.is_dir() and (static_path / "index.html").exists():
- # return static_path
+ if _is_local_dashboard_enabled():
+ static_path = _get_project_root() / "dashboard" / "dist"
+ if static_path.is_dir() and (static_path / "index.html").exists():
+ return static_path
try:
module = import_module("maibot_dashboard")
diff --git a/src/webui/config_schema.py b/src/webui/config_schema.py
index 1f11faa2..9658ec9c 100644
--- a/src/webui/config_schema.py
+++ b/src/webui/config_schema.py
@@ -1,6 +1,5 @@
-from typing import Any, Dict, List, get_args, get_origin
-
import inspect
+from typing import Any, Dict, List, get_args, get_origin
from pydantic_core import PydanticUndefined
diff --git a/src/webui/routers/config.py b/src/webui/routers/config.py
index 18221fa3..8e609754 100644
--- a/src/webui/routers/config.py
+++ b/src/webui/routers/config.py
@@ -19,6 +19,7 @@ from src.config.model_configs import (
ModelTaskConfig,
)
from src.config.official_configs import (
+ AMemorixConfig,
BotConfig,
ChatConfig,
ChineseTypoConfig,
@@ -128,6 +129,7 @@ async def get_config_section_schema(section_name: str):
"telemetry": TelemetryConfig,
"maim_message": MaimMessageConfig,
"memory": MemoryConfig,
+ "a_memorix": AMemorixConfig,
"debug": DebugConfig,
"voice": VoiceConfig,
"model_task_config": ModelTaskConfig,