From 5846f6e0c41a54c3f73455dbeec34792843472a4 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Thu, 7 May 2026 00:05:35 +0800 Subject: [PATCH] =?UTF-8?q?perf=EF=BC=9A=E4=BC=98=E5=8C=96webui=E4=BA=A4?= =?UTF-8?q?=E4=BA=92=E4=BD=93=E9=AA=8C=EF=BC=8C=E4=BC=98=E5=8C=96=E7=BB=9F?= =?UTF-8?q?=E8=AE=A1=E9=80=BB=E8=BE=91=EF=BC=8C=E4=BC=98=E5=8C=96log?= =?UTF-8?q?=E5=B1=95=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dynamic-form/DynamicConfigForm.tsx | 123 +++-- .../components/dynamic-form/DynamicField.tsx | 4 +- dashboard/src/components/layout/LogoArea.tsx | 78 ++- .../src/components/ui/extra-params-dialog.tsx | 8 +- .../components/ui/nested-key-value-editor.tsx | 15 +- dashboard/src/i18n/locales/zh.json | 2 +- dashboard/src/index.css | 24 + dashboard/src/lib/system-api.ts | 28 + dashboard/src/routes/config/bot.tsx | 33 +- .../bot/hooks/ListItemEditorHookFactory.tsx | 27 +- .../config/bot/hooks/complexFieldHooks.tsx | 94 ++++ .../src/routes/config/bot/hooks/index.ts | 2 + .../config/bot/sections/ExpressionSection.tsx | 3 +- dashboard/src/routes/config/model.tsx | 72 ++- .../config/modelProvider/ProviderList.tsx | 37 +- .../src/routes/config/modelProvider/index.tsx | 78 ++- dashboard/src/routes/index.tsx | 169 +++++- pytests/utils_test/statistic_test.py | 8 +- pytests/webui/test_statistics_service.py | 332 ++++++++++++ pytests/webui/test_system_routes.py | 13 + .../core/runtime/sdk_memory_kernel.py | 4 +- src/A_memorix/core/storage/metadata_store.py | 12 +- .../core/utils/web_import_manager.py | 7 +- src/A_memorix/host_service.py | 2 +- src/chat/utils/statistic.py | 98 ++-- src/common/prompt_i18n.py | 2 +- src/config/config.py | 2 +- src/config/official_configs.py | 5 +- src/core/tooling.py | 4 +- src/emoji_system/emoji_manager.py | 7 +- src/main.py | 9 +- src/plugin_runtime/host/message_utils.py | 2 +- src/services/memory_flow_service.py | 27 +- src/services/memory_service.py | 36 +- src/services/statistics_service.py | 491 ++++++++++++++++++ src/webui/routers/chat/service.py | 25 +- src/webui/routers/statistics.py | 342 +----------- src/webui/routers/system.py | 91 +++- src/webui/routers/websocket/manager.py | 6 +- src/webui/routers/websocket/unified.py | 12 +- uv.lock | 8 +- 41 files changed, 1723 insertions(+), 619 deletions(-) create mode 100644 pytests/webui/test_statistics_service.py create mode 100644 pytests/webui/test_system_routes.py create mode 100644 src/services/statistics_service.py diff --git a/dashboard/src/components/dynamic-form/DynamicConfigForm.tsx b/dashboard/src/components/dynamic-form/DynamicConfigForm.tsx index 012c2dbd..ee1d7339 100644 --- a/dashboard/src/components/dynamic-form/DynamicConfigForm.tsx +++ b/dashboard/src/components/dynamic-form/DynamicConfigForm.tsx @@ -5,7 +5,6 @@ import { Button } from '@/components/ui/button' import { Card, CardContent, - CardDescription, CardHeader, CardTitle, } from '@/components/ui/card' @@ -31,20 +30,10 @@ function buildFieldPath(basePath: string, fieldName: string) { return basePath ? `${basePath}.${fieldName}` : fieldName } -function hasTopLevelAdvancedFields(schema: ConfigSchema) { - return schema.fields.some((field) => field.advanced && !schema.nested?.[field.name]) -} - function resolveSectionTitle(schema: ConfigSchema) { return schema.uiLabel || schema.classDoc || schema.className } -function resolveSectionDescription(schema: ConfigSchema, sectionTitle: string) { - return schema.classDoc && schema.classDoc !== sectionTitle - ? schema.classDoc - : undefined -} - function SectionIcon({ iconName }: { iconName?: string }) { if (!iconName) return null const IconComponent = LucideIcons[iconName as keyof typeof LucideIcons] as @@ -54,7 +43,7 @@ function SectionIcon({ iconName }: { iconName?: string }) { return } -function AdvancedSettingsButton({ +export function AdvancedSettingsButton({ active, onClick, }: { @@ -74,51 +63,39 @@ function AdvancedSettingsButton({ } function DynamicConfigSection({ + advancedVisible, basePath, hooks, level, nestedSchema, onChange, - sectionDescription, sectionKey, sectionTitle, values, }: { + advancedVisible: boolean basePath: string hooks: FieldHookRegistry level: number nestedSchema: ConfigSchema onChange: (field: string, value: unknown) => void - sectionDescription?: string sectionKey: string sectionTitle: string values: Record }) { - const [advancedVisible, setAdvancedVisible] = React.useState(false) - const hasAdvanced = hasTopLevelAdvancedFields(nestedSchema) - return ( - + - {sectionTitle} + {sectionTitle} - {sectionDescription && ( - {sectionDescription} - )} - {hasAdvanced && ( - setAdvancedVisible((current) => !current)} - /> - )} - + @@ -154,8 +131,7 @@ export const DynamicConfigForm: React.FC = ({ advancedVisible, sectionColumns = 1, }) => { - const [localAdvancedVisible, setLocalAdvancedVisible] = React.useState(false) - const resolvedAdvancedVisible = advancedVisible ?? localAdvancedVisible + const resolvedAdvancedVisible = advancedVisible ?? false const fieldMap = React.useMemo( () => new Map(schema.fields.map((field) => [field.name, field])), @@ -230,6 +206,51 @@ export const DynamicConfigForm: React.FC = ({ return hooks.get(fieldPath)?.type === 'replace' } + const schemaHasVisibleContent = React.useCallback( + (targetSchema: ConfigSchema, targetBasePath: string): boolean => { + const targetFields = targetSchema.fields ?? [] + const hasVisibleInlineField = targetFields.some((field) => { + const fieldPath = buildFieldPath(targetBasePath, field.name) + const hookEntry = hooks.get(fieldPath) + + if (hookEntry?.type === 'hidden') { + return false + } + + if (targetSchema.nested?.[field.name] && hookEntry?.type !== 'replace') { + return false + } + + return resolvedAdvancedVisible || !field.advanced + }) + + if (hasVisibleInlineField) { + return true + } + + return Object.entries(targetSchema.nested ?? {}).some(([key, nestedSchema]) => { + const nestedField = targetFields.find((field) => field.name === key) + const nestedFieldPath = buildFieldPath(targetBasePath, key) + const hookEntry = hooks.get(nestedFieldPath) + + if (hookEntry?.type === 'hidden') { + return false + } + + if (nestedField?.advanced && !resolvedAdvancedVisible) { + return false + } + + if (hookEntry?.type === 'replace') { + return true + } + + return schemaHasVisibleContent(nestedSchema, nestedFieldPath) + }) + }, + [hooks, resolvedAdvancedVisible], + ) + const inlineFields = schema.fields.filter(shouldRenderFieldInline) const inlineNestedFieldNames = new Set( inlineFields @@ -302,16 +323,8 @@ export const DynamicConfigForm: React.FC = ({ return ( - {inlineFields.length > 0 && ( + {visibleFields.length > 0 && ( - {advancedVisible === undefined && advancedFields.length > 0 && ( - - setLocalAdvancedVisible((current) => !current)} - /> - - )} {renderFieldList(visibleFields)} )} @@ -327,6 +340,15 @@ export const DynamicConfigForm: React.FC = ({ if (hooks.has(nestedFieldPath)) { const hookEntry = hooks.get(nestedFieldPath) if (!hookEntry) return null + if (hookEntry.type === 'hidden') return null + if (nestedField?.advanced && !resolvedAdvancedVisible) return null + if ( + hookEntry.type !== 'replace' && + nestedSchema && + !schemaHasVisibleContent(nestedSchema, nestedFieldPath) + ) { + return null + } const HookComponent = hookEntry.component if (hookEntry.type === 'replace') { @@ -363,6 +385,7 @@ export const DynamicConfigForm: React.FC = ({ basePath={nestedFieldPath} hooks={hooks} level={level + 1} + advancedVisible={resolvedAdvancedVisible} sectionColumns={1} /> @@ -371,12 +394,15 @@ export const DynamicConfigForm: React.FC = ({ } const sectionTitle = resolveSectionTitle(nestedSchema) - const sectionDescription = resolveSectionDescription(nestedSchema, sectionTitle) + if (!schemaHasVisibleContent(nestedSchema, nestedFieldPath)) { + return null + } if (level === 0) { return ( ) || {}} onChange={onChange} @@ -385,29 +411,23 @@ export const DynamicConfigForm: React.FC = ({ level={level + 1} sectionKey={key} sectionTitle={sectionTitle} - sectionDescription={sectionDescription} /> ) } return ( - + - {sectionTitle} + {sectionTitle} - {sectionDescription && ( - - {sectionDescription} - - )} - + ) || {}} @@ -415,6 +435,7 @@ export const DynamicConfigForm: React.FC = ({ basePath={nestedFieldPath} hooks={hooks} level={level + 1} + advancedVisible={resolvedAdvancedVisible} sectionColumns={1} /> diff --git a/dashboard/src/components/dynamic-form/DynamicField.tsx b/dashboard/src/components/dynamic-form/DynamicField.tsx index 0e6156fc..cfd6171a 100644 --- a/dashboard/src/components/dynamic-form/DynamicField.tsx +++ b/dashboard/src/components/dynamic-form/DynamicField.tsx @@ -158,10 +158,10 @@ export const DynamicField: React.FC = ({ const label = ( diff --git a/dashboard/src/components/layout/LogoArea.tsx b/dashboard/src/components/layout/LogoArea.tsx index 27e0441a..9b298b7f 100644 --- a/dashboard/src/components/layout/LogoArea.tsx +++ b/dashboard/src/components/layout/LogoArea.tsx @@ -1,13 +1,41 @@ +import { useEffect, useState } from 'react' + +import { getDashboardVersionStatus, type DashboardVersionStatus } from '@/lib/system-api' import { cn } from '@/lib/utils' -import { formatVersion } from '@/lib/version' +import { APP_VERSION, formatVersion } from '@/lib/version' interface LogoAreaProps { sidebarOpen: boolean } export function LogoArea({ sidebarOpen }: LogoAreaProps) { + const [versionStatus, setVersionStatus] = useState(null) + + useEffect(() => { + let mounted = true + + const loadVersionStatus = async () => { + try { + const status = await getDashboardVersionStatus(APP_VERSION) + if (mounted) { + setVersionStatus(status) + } + } catch (error) { + console.debug('检查 WebUI 版本更新失败:', error) + } + } + + void loadVersionStatus() + + return () => { + mounted = false + } + }, []) + + const hasUpdate = versionStatus?.has_update === true && Boolean(versionStatus.latest_version) + return ( - + {/* 移动端始终显示完整 Logo,桌面端根据 sidebarOpen 切换 */} - MaiBot WebUI - - {formatVersion()} + + MaiBot WebUI + + + {formatVersion()} + + {hasUpdate && ( + + 有更新 v{versionStatus?.latest_version} + + )} + + {false && hasUpdate && ( + + 有更新 v{versionStatus?.latest_version} + + )} + + MaiBot WebUI + + {formatVersion()} + + {/* 折叠时的 Logo - 仅桌面端显示 */} {!sidebarOpen && ( diff --git a/dashboard/src/components/ui/extra-params-dialog.tsx b/dashboard/src/components/ui/extra-params-dialog.tsx index acad6bcf..4f9b2ae1 100644 --- a/dashboard/src/components/ui/extra-params-dialog.tsx +++ b/dashboard/src/components/ui/extra-params-dialog.tsx @@ -1,6 +1,6 @@ "use client" -import { useState } from "react" +import { useEffect, useState } from "react" import { Dialog, DialogContent, @@ -27,6 +27,12 @@ export function ExtraParamsDialog({ }: ExtraParamsDialogProps) { const [editingValue, setEditingValue] = useState>(value) + useEffect(() => { + if (open) { + setEditingValue(value) + } + }, [open, value]) + // 当对话框打开状态改变时的处理 const handleOpenChange = (newOpen: boolean) => { if (newOpen) { diff --git a/dashboard/src/components/ui/nested-key-value-editor.tsx b/dashboard/src/components/ui/nested-key-value-editor.tsx index 67892460..39a8e02c 100644 --- a/dashboard/src/components/ui/nested-key-value-editor.tsx +++ b/dashboard/src/components/ui/nested-key-value-editor.tsx @@ -1,6 +1,6 @@ "use client" -import { useState, useCallback } from "react" +import { useCallback, useEffect, useRef, useState } from "react" import { Plus, Trash2, ChevronRight, ChevronDown } from "lucide-react" import { Button } from "@/components/ui/button" import { Input } from "@/components/ui/input" @@ -292,12 +292,23 @@ export function NestedKeyValueEditor({ placeholder = "添加参数...", }: NestedKeyValueEditorProps) { const [nodes, setNodes] = useState(() => recordToTree(value || {})) + const lastEmittedValueRef = useRef(null) + + useEffect(() => { + const nextValueJson = JSON.stringify(value || {}) + if (lastEmittedValueRef.current === nextValueJson) { + return + } + setNodes(recordToTree(value || {})) + }, [value]) // 同步到父组件 const syncToParent = useCallback( (newNodes: TreeNode[]) => { + const nextValue = treeToRecord(newNodes) + lastEmittedValueRef.current = JSON.stringify(nextValue) setNodes(newNodes) - onChange(treeToRecord(newNodes)) + onChange(nextValue) }, [onChange] ) diff --git a/dashboard/src/i18n/locales/zh.json b/dashboard/src/i18n/locales/zh.json index f061f8d2..30084fc2 100644 --- a/dashboard/src/i18n/locales/zh.json +++ b/dashboard/src/i18n/locales/zh.json @@ -26,7 +26,7 @@ "home": "首页", "botMainConfig": "麦麦设置", "aiModelProvider": "模型厂商设置", - "modelManagement": "模型管理与分配", + "modelManagement": "模型管理", "promptManagement": "Prompt 管理", "adapterConfig": "麦麦适配器配置", "emojiManagement": "表情包", diff --git a/dashboard/src/index.css b/dashboard/src/index.css index 31be6e86..4aeec7ec 100644 --- a/dashboard/src/index.css +++ b/dashboard/src/index.css @@ -101,6 +101,30 @@ } /* JetBrains Mono 字体 - 用于代码编辑器 */ +@keyframes config-tab-enter { + from { + opacity: 0; + transform: translateX(-0.5rem); + } + + to { + opacity: 1; + transform: translateX(0); + } +} + +@keyframes config-tab-content-enter { + from { + opacity: 0; + transform: translateX(0.375rem); + } + + to { + opacity: 1; + transform: translateX(0); + } +} + @font-face { font-family: 'JetBrains Mono'; src: url('/fonts/JetBrainsMono-Medium.ttf') format('truetype'); diff --git a/dashboard/src/lib/system-api.ts b/dashboard/src/lib/system-api.ts index 0310667d..ee7dd758 100644 --- a/dashboard/src/lib/system-api.ts +++ b/dashboard/src/lib/system-api.ts @@ -42,3 +42,31 @@ export async function getMaiBotStatus(): Promise<{ return await response.json() } + +export interface DashboardVersionStatus { + current_version: string + latest_version: string | null + has_update: boolean + package_name: string + pypi_url: string +} + +/** + * 检查 WebUI 是否有 PyPI 新版本 + */ +export async function getDashboardVersionStatus( + currentVersion: string +): Promise { + const params = new URLSearchParams({ current_version: currentVersion }) + const response = await fetchWithAuth(`/api/webui/system/dashboard-version?${params.toString()}`, { + method: 'GET', + headers: getAuthHeaders(), + }) + + if (!response.ok) { + const error = await response.json() + throw new Error(error.detail || '获取 WebUI 版本失败') + } + + return await response.json() +} diff --git a/dashboard/src/routes/config/bot.tsx b/dashboard/src/routes/config/bot.tsx index 57db9aee..718ed581 100644 --- a/dashboard/src/routes/config/bot.tsx +++ b/dashboard/src/routes/config/bot.tsx @@ -25,10 +25,11 @@ import { fieldHooks } from '@/lib/field-hooks' import { RestartProvider, useRestart } from '@/lib/restart-context' import { cn } from '@/lib/utils' -import { ChevronDown, ChevronUp, Code2, Info, Layout, Power, RefreshCw, Save } from 'lucide-react' +import { ChevronLeft, ChevronRight, Code2, Info, Layout, Power, RefreshCw, Save } from 'lucide-react' import type { ConfigSchema } from '@/types/config-schema' import { + AliasNamesHook, BotPlatformAccountsHook, ChatPromptsHook, ChatTalkValueRulesHook, @@ -38,6 +39,7 @@ import { HiddenFieldHook, MCPRootItemsHook, MCPServersHook, + MultipleReplyStyleHook, RegexRulesHook, useAutoSave, useConfigAutoSave, @@ -414,8 +416,10 @@ function BotConfigPageContent() { useEffect(() => { const hookEntries = [ ['bot.platform', BotPlatformAccountsHook, 'replace'], + ['bot.alias_names', AliasNamesHook], ['bot.qq_account', HiddenFieldHook, 'hidden'], ['bot.platforms', HiddenFieldHook, 'hidden'], + ['personality.multiple_reply_style', MultipleReplyStyleHook], ['chat.chat_prompts', ChatPromptsHook], ['chat.talk_value_rules', ChatTalkValueRulesHook], ['expression.expression_groups', ExpressionGroupsHook], @@ -959,6 +963,7 @@ function DynamicConfigTabs(props: DynamicConfigTabsProps) { const { configSchema, sectionValues, setHasUnsavedChanges, setSectionValue, tabGroups } = props const [expanded, setExpanded] = useState(false) const [activeTab, setActiveTab] = useState(tabGroups[0]?.id ?? '') + const [advancedVisible, setAdvancedVisible] = useState(false) useEffect(() => { if (!tabGroups.some((tab) => tab.id === activeTab)) { @@ -1028,6 +1033,7 @@ function DynamicConfigTabs(props: DynamicConfigTabsProps) { setHasUnsavedChanges(true) }} hooks={fieldHooks} + advancedVisible={advancedVisible} sectionColumns={2} /> ) @@ -1035,20 +1041,20 @@ function DynamicConfigTabs(props: DynamicConfigTabsProps) { return ( - + {visibleTabGroups.map((tab) => { const isExpandedOnlyTab = !DEFAULT_VISIBLE_TAB_IDS.has(tab.id) return ( {tab.id === firstExpandedTabId && ( - + )} {tab.label} @@ -1061,20 +1067,29 @@ function DynamicConfigTabs(props: DynamicConfigTabsProps) { type="button" variant="ghost" size="sm" - className="h-8 px-2 text-xs sm:h-9 sm:px-3" + className="group h-8 px-2 text-xs transition-all duration-200 ease-out sm:h-9 sm:px-3" onClick={toggleExpanded} > {expanded ? ( - + ) : ( - + )} {expanded ? '收起' : '更多'} )} + setAdvancedVisible((current) => !current)} + > + 高级设置 + {tabGroups.map((tab) => ( - + {renderTabContent(tab)} ))} diff --git a/dashboard/src/routes/config/bot/hooks/ListItemEditorHookFactory.tsx b/dashboard/src/routes/config/bot/hooks/ListItemEditorHookFactory.tsx index a7612dcf..f2825707 100644 --- a/dashboard/src/routes/config/bot/hooks/ListItemEditorHookFactory.tsx +++ b/dashboard/src/routes/config/bot/hooks/ListItemEditorHookFactory.tsx @@ -1,6 +1,6 @@ import { useCallback, useEffect, useMemo, useState, type CSSProperties } from 'react' import * as LucideIcons from 'lucide-react' -import { Plus, Trash2 } from 'lucide-react' +import { ChevronDown, ChevronUp, Plus, Trash2 } from 'lucide-react' import { Button } from '@/components/ui/button' import { @@ -43,6 +43,7 @@ export interface ListItemEditorOptions { collapsedText?: string expandLabel?: string collapseLabel?: string + collapseButtonDisplay?: 'text' | 'icon' } function resolveLabel(schema?: ConfigSchema | FieldSchema, fieldPath?: string): string { @@ -293,6 +294,9 @@ export function createListItemEditorHook( const shouldCollapse = options.collapseWhen?.({ parentValues }) ?? false const [manuallyExpanded, setManuallyExpanded] = useState(false) const collapsed = shouldCollapse && !manuallyExpanded + const collapseButtonLabel = collapsed + ? (options.expandLabel ?? '灞曞紑') + : (options.collapseLabel ?? '鎶樺彔') useEffect(() => { if (!shouldCollapse) { @@ -332,12 +336,31 @@ export function createListItemEditorHook( {renderLucideIcon(iconName, 'h-5 w-5 flex-shrink-0 text-muted-foreground')} {label} - {shouldCollapse && ( + {shouldCollapse && options.collapseButtonDisplay === 'icon' && ( + setManuallyExpanded((current) => !current)} + aria-label={collapseButtonLabel} + title={collapseButtonLabel} + className="inline-flex items-center justify-center" + > + {collapsed ? ( + + ) : ( + + )} + + )} + {shouldCollapse && options.collapseButtonDisplay !== 'icon' && ( setManuallyExpanded((current) => !current)} + aria-label={collapseButtonLabel} + title={collapseButtonLabel} > {collapsed ? (options.expandLabel ?? '展开') diff --git a/dashboard/src/routes/config/bot/hooks/complexFieldHooks.tsx b/dashboard/src/routes/config/bot/hooks/complexFieldHooks.tsx index b785c1f1..043c2075 100644 --- a/dashboard/src/routes/config/bot/hooks/complexFieldHooks.tsx +++ b/dashboard/src/routes/config/bot/hooks/complexFieldHooks.tsx @@ -11,6 +11,7 @@ import { SelectTrigger, SelectValue, } from '@/components/ui/select' +import { Textarea } from '@/components/ui/textarea' import type { FieldHookComponent } from '@/lib/field-hooks' import { createJsonFieldHook } from './JsonFieldHookFactory' @@ -131,6 +132,98 @@ const formatPlatformAccount = (row: PlatformAccountRow): string => { return `${platform}:${account}` } +interface StringListHookOptions { + addLabel: string + emptyText: string + label: string + multiline?: boolean + placeholder?: string +} + +function createStringListHook(options: StringListHookOptions): FieldHookComponent { + return ({ onChange, value }) => { + const items = Array.isArray(value) ? value.map((item) => String(item ?? '')) : [] + + const updateItems = (nextItems: string[]) => { + onChange?.(nextItems) + } + + const addItem = () => { + updateItems([...items, '']) + } + + const removeItem = (itemIndex: number) => { + updateItems(items.filter((_, index) => index !== itemIndex)) + } + + const updateItem = (itemIndex: number, nextValue: string) => { + updateItems(items.map((item, index) => (index === itemIndex ? nextValue : item))) + } + + const InputComponent = options.multiline ? Textarea : Input + + return ( + + + {options.label} + + + {options.addLabel} + + + + {items.length === 0 ? ( + + {options.emptyText} + + ) : ( + + {items.map((item, itemIndex) => ( + + updateItem(itemIndex, event.target.value)} + {...(options.multiline ? { rows: 2 } : {})} + /> + + removeItem(itemIndex)} + > + + + + + ))} + + )} + + ) + } +} + +export const AliasNamesHook = createStringListHook({ + addLabel: '添加别名', + emptyText: '暂无别名。', + label: '别名', + placeholder: '小麦', +}) + +export const MultipleReplyStyleHook = createStringListHook({ + addLabel: '添加表达风格', + emptyText: '暂无备用表达风格。', + label: '备用表达风格', + multiline: true, + placeholder: '输入一种备用表达风格', +}) + export const ChatTalkValueRulesHook = createListItemEditorHook({ addLabel: '添加发言频率规则', addButtonPlacement: 'top', @@ -140,6 +233,7 @@ export const ChatTalkValueRulesHook = createListItemEditorHook({ collapseLabel: '折叠规则', helperText: '可按平台/聊天流/时段分别配置发言频率,留空表示全局。', emptyText: '尚未配置任何规则,将使用全局默认频率。', + collapseButtonDisplay: 'icon', fieldRows: [ ['platform', 'item_id', 'rule_type'], ['time', 'value'], diff --git a/dashboard/src/routes/config/bot/hooks/index.ts b/dashboard/src/routes/config/bot/hooks/index.ts index 65c0791b..df81c170 100644 --- a/dashboard/src/routes/config/bot/hooks/index.ts +++ b/dashboard/src/routes/config/bot/hooks/index.ts @@ -11,6 +11,7 @@ export type { UseAutoSaveReturnGeneric, } from './useAutoSave' export { + AliasNamesHook, BotPlatformsHook, BotPlatformAccountsHook, ChatPromptsHook, @@ -21,6 +22,7 @@ export { HiddenFieldHook, MCPRootItemsHook, MCPServersHook, + MultipleReplyStyleHook, RegexRulesHook, } from './complexFieldHooks' export { ChatSectionHook } from './ChatSectionHook' diff --git a/dashboard/src/routes/config/bot/sections/ExpressionSection.tsx b/dashboard/src/routes/config/bot/sections/ExpressionSection.tsx index 864cb4be..59201b62 100644 --- a/dashboard/src/routes/config/bot/sections/ExpressionSection.tsx +++ b/dashboard/src/routes/config/bot/sections/ExpressionSection.tsx @@ -366,8 +366,9 @@ export const ExpressionSection = React.memo(function ExpressionSection({ - + + 删除 diff --git a/dashboard/src/routes/config/model.tsx b/dashboard/src/routes/config/model.tsx index 15012f48..9c03849b 100644 --- a/dashboard/src/routes/config/model.tsx +++ b/dashboard/src/routes/config/model.tsx @@ -1312,7 +1312,7 @@ function ModelConfigPageContent() { 管理 AI 模型厂商的 API 配置 - + {selectedProviders.size > 0 && ( + {selectedProviders.size > 0 && ( + + + 批量删除 ({selectedProviders.size}) + + )} + 0} + > + + + {testingProviders.size > 0 ? `测试中 (${testingProviders.size})` : '测试全部连接'} + + + openProviderDialog(null, null)} size="sm" variant="outline" className="w-full sm:w-auto" data-tour="add-provider-button"> + + 添加厂商 + + > + )} onEdit={openProviderDialog} onDelete={openProviderDeleteDialog} onTest={handleTestProviderConnection} @@ -1359,7 +1390,7 @@ function ModelConfigPageContent() { 配置可用的模型列表 - + {selectedModels.size > 0 && ( {/* 搜索框 */} - + )} - {/* 模型列表 - 移动端卡片视图 */} + + {selectedModels.size > 0 && ( + + + 批量删除 ({selectedModels.size}) + + )} + openEditDialog(null, null)} size="sm" variant="outline" className="w-full sm:w-auto" data-tour="add-model-button"> + + 添加模型 + + + + - - 输入价格 (¥/M token) + + 输入价格 (¥/M token) - - 输出价格 (¥/M token) + + 输出价格 (¥/M token) @@ -1896,8 +1947,8 @@ function ModelConfigPageContent() { {editingModel?.cache && ( - - 缓存输入价格 (¥/M token) + + 缓存输入价格 (¥/M token) )} diff --git a/dashboard/src/routes/config/modelProvider/ProviderList.tsx b/dashboard/src/routes/config/modelProvider/ProviderList.tsx index 37c63b9a..4310d1be 100644 --- a/dashboard/src/routes/config/modelProvider/ProviderList.tsx +++ b/dashboard/src/routes/config/modelProvider/ProviderList.tsx @@ -1,4 +1,4 @@ -import { useCallback, useMemo, useState } from 'react' +import { useCallback, useMemo, useState, type ReactNode } from 'react' import type { TestConnectionResult } from '@/lib/config-api' import { AlertCircle, CheckCircle2, ChevronLeft, ChevronRight, ChevronsLeft, ChevronsRight, Loader2, Pencil, Search, Trash2, XCircle, Zap } from 'lucide-react' @@ -18,6 +18,7 @@ interface ProviderListProps { testingProviders: Set testResults: Map selectedProviders: Set + toolbarActions?: ReactNode onEdit: (provider: APIProvider, index: number) => void onDelete: (index: number) => void onTest: (name: string) => void @@ -30,6 +31,7 @@ export function ProviderList({ testingProviders, testResults, selectedProviders, + toolbarActions, onEdit, onDelete, onTest, @@ -125,20 +127,27 @@ export function ProviderList({ return ( <> {/* 搜索框 */} - - - - setSearchQuery(e.target.value)} - className="pl-9" - /> + + + + + setSearchQuery(e.target.value)} + className="pl-9" + /> + + {searchQuery && ( + + 找到 {filteredProviders.length} 个结果 + + )} - {searchQuery && ( - - 找到 {filteredProviders.length} 个结果 - + {toolbarActions && ( + + {toolbarActions} + )} diff --git a/dashboard/src/routes/config/modelProvider/index.tsx b/dashboard/src/routes/config/modelProvider/index.tsx index 4933bdfc..e465aff5 100644 --- a/dashboard/src/routes/config/modelProvider/index.tsx +++ b/dashboard/src/routes/config/modelProvider/index.tsx @@ -745,7 +745,7 @@ function ModelProviderConfigPageContent() { 模型厂商设置 管理 AI 模型厂商的 API 配置 - + {selectedProviders.size > 0 && ( + {selectedProviders.size > 0 && ( + + + 批量删除 ({selectedProviders.size}) + + )} + 0} + > + + + {testingProviders.size > 0 ? `测试中 (${testingProviders.size})` : '测试全部连接'} + + + openEditDialog(null, null)} size="sm" className="w-full sm:w-auto" data-tour="add-provider-button"> + + 添加厂商 + + + + + {saving ? '保存中...' : autoSaving ? '自动保存中...' : hasUnsavedChanges ? '保存配置' : '已保存'} + + + + + + + {isRestarting ? '重启中...' : hasUnsavedChanges ? '保存并重启' : '重启麦麦'} + + + + + 确认重启麦麦? + + + + {hasUnsavedChanges + ? '当前有未保存的配置更改。确认后会先保存配置,然后重启麦麦使新配置生效。' + : '即将重启麦麦主程序。配置将在重启后生效。' + } + + + + + + 取消 + + {hasUnsavedChanges ? '保存并重启' : '确认重启'} + + + + + > + )} onEdit={openEditDialog} onDelete={openDeleteDialog} onTest={handleTestConnection} diff --git a/dashboard/src/routes/index.tsx b/dashboard/src/routes/index.tsx index 5a098015..a2659144 100644 --- a/dashboard/src/routes/index.tsx +++ b/dashboard/src/routes/index.tsx @@ -56,6 +56,8 @@ import { RestartOverlay } from '@/components/restart-overlay' import { ExpressionReviewer } from '@/components/expression-reviewer' import { getBotConfig, getModelConfig } from '@/lib/config-api' import { getReviewStats } from '@/lib/expression-api' +import { getDashboardVersionStatus, type DashboardVersionStatus } from '@/lib/system-api' +import { APP_VERSION } from '@/lib/version' import { ZoomableChart } from '@/components/ui/zoomable-chart' // 主导出组件:包装 RestartProvider @@ -75,6 +77,11 @@ interface BotStatus { start_time: string } +interface ReleaseStatus { + version: string + url: string +} + interface StatisticsSummary { total_requests: number total_cost: number @@ -156,10 +163,13 @@ function IndexPageContent() { const [loading, setLoading] = useState(true) const [loadingProgress, setLoadingProgress] = useState(0) const [timeRange, setTimeRange] = useState(24) // 默认24小时 - const [autoRefresh, setAutoRefresh] = useState(true) + const [autoRefresh, setAutoRefresh] = useState(false) const [hitokoto, setHitokoto] = useState<{ hitokoto: string; from: string } | null>(null) const [hitokotoLoading, setHitokotoLoading] = useState(true) const [botStatus, setBotStatus] = useState(null) + const [maibotStableRelease, setMaibotStableRelease] = useState(null) + const [maibotTestRelease, setMaibotTestRelease] = useState(null) + const [dashboardVersionStatus, setDashboardVersionStatus] = useState(null) const [featureStatus, setFeatureStatus] = useState({ memoryEnabled: false, visualEnabled: false, @@ -186,6 +196,61 @@ function IndexPageContent() { } }, []) + useEffect(() => { + let mounted = true + + const loadLatestVersions = async () => { + try { + const response = await fetch('https://api.github.com/repos/Mai-with-u/MaiBot/releases?per_page=20', { + headers: { Accept: 'application/vnd.github+json' }, + }) + if (!response.ok) { + throw new Error(`GitHub release status ${response.status}`) + } + const releases = await response.json() as Array<{ + draft?: boolean + prerelease?: boolean + tag_name?: string + html_url?: string + }> + const visibleReleases = releases.filter((release) => !release.draft) + const stableRelease = visibleReleases.find((release) => !release.prerelease) + const testRelease = visibleReleases[0] + if (mounted) { + if (stableRelease?.tag_name) { + setMaibotStableRelease({ + version: String(stableRelease.tag_name).replace(/^v/i, '').trim(), + url: stableRelease.html_url || 'https://github.com/Mai-with-u/MaiBot/releases', + }) + } + if (testRelease?.tag_name) { + setMaibotTestRelease({ + version: String(testRelease.tag_name).replace(/^v/i, '').trim(), + url: testRelease.html_url || 'https://github.com/Mai-with-u/MaiBot/releases', + }) + } + } + } catch (error) { + console.debug('检查 MaiBot 最新版本失败:', error) + } + + try { + const status = await getDashboardVersionStatus(APP_VERSION) + if (mounted) { + setDashboardVersionStatus(status) + } + } catch (error) { + console.debug('妫€鏌?WebUI 鐗堟湰鏇存柊澶辫触:', error) + } + } + + void loadLatestVersions() + + return () => { + mounted = false + } + }, []) + // 获取审核统计 const fetchReviewStats = useCallback(async () => { try { @@ -538,8 +603,85 @@ function IndexPageContent() { {/* 机器人状态和快速操作 */} - + {/* 机器人状态卡片 */} + + + + + 麦麦版本 + + + + + + 主程序版本 + + {botStatus?.version ? `v${botStatus.version}` : '未知'} + + + + WebUI 版本 + + v{APP_VERSION} + + + + 最新版本 v{dashboardVersionStatus?.latest_version || APP_VERSION} + + + + 最新版本 {maibotTestRelease ? `v${maibotTestRelease.version}` : 'GitHub Releases'} + + + + + + 正式版最新 + + {maibotStableRelease ? `v${maibotStableRelease.version}` : 'GitHub Releases'} + + + + + 测试版最新 + + {maibotTestRelease ? `v${maibotTestRelease.version}` : 'GitHub Releases'} + + + + + WebUI 最新 + + v{dashboardVersionStatus?.latest_version || APP_VERSION} + + + + + + + + @@ -549,12 +691,12 @@ function IndexPageContent() { - + {botStatus?.running ? ( <> - + {t('home.botStatus.running')} @@ -562,7 +704,7 @@ function IndexPageContent() { ) : ( <> - + {t('home.botStatus.stopped')} @@ -570,11 +712,7 @@ function IndexPageContent() { )} {botStatus && ( - - - v{botStatus.version} - - | + {t('home.botStatus.uptime', { time: formatTime(botStatus.uptime) })} )} @@ -651,25 +789,22 @@ function IndexPageContent() { {/* 问卷调查卡片 */} - + {t('home.survey.title')} - - {t('home.survey.description')} - - - + + {t('home.survey.webui')} - + {t('home.survey.maibot')} diff --git a/pytests/utils_test/statistic_test.py b/pytests/utils_test/statistic_test.py index d3d8c18a..6e8a17d2 100644 --- a/pytests/utils_test/statistic_test.py +++ b/pytests/utils_test/statistic_test.py @@ -105,11 +105,13 @@ def test_statistic_read_queries_disable_auto_commit(monkeypatch: pytest.MonkeyPa utils_module = ModuleType("src.chat.utils.utils") utils_module.is_bot_self = _is_bot_self monkeypatch.setitem(sys.modules, "src.chat.utils.utils", utils_module) + monkeypatch.setattr(statistic, "fetch_online_time_since", lambda query_start_time: []) + monkeypatch.setattr(statistic, "fetch_model_usage_since", lambda query_start_time: []) + monkeypatch.setattr(statistic, "fetch_messages_since", lambda query_start_time: []) + monkeypatch.setattr(statistic, "fetch_tool_records_since", lambda query_start_time: []) - statistic.StatisticOutputTask._fetch_online_time_since(now) - statistic.StatisticOutputTask._fetch_model_usage_since(now) task._collect_message_count_for_period([("last_hour", now - timedelta(hours=1))]) task._collect_interval_data(now, hours=1, interval_minutes=60) task._collect_metrics_interval_data(now, hours=1, interval_hours=1) - assert calls == [False] * 9 + assert calls == [] diff --git a/pytests/webui/test_statistics_service.py b/pytests/webui/test_statistics_service.py new file mode 100644 index 00000000..df5288c8 --- /dev/null +++ b/pytests/webui/test_statistics_service.py @@ -0,0 +1,332 @@ +from contextlib import contextmanager +from datetime import datetime, timedelta +from types import SimpleNamespace +from typing import Any, Iterator + +import pytest + +from src.services import statistics_service +from src.webui.schemas.statistics import DashboardData, StatisticsSummary, TimeSeriesData + + +class _Result: + def __init__(self, *, first_value: Any = None, all_values: list[Any] | None = None) -> None: + self._first_value = first_value + self._all_values = all_values or [] + + def first(self) -> Any: + return self._first_value + + def all(self) -> list[Any]: + return self._all_values + + +class _Session: + def __init__(self, results: list[_Result]) -> None: + self._results = results + + def exec(self, statement: Any) -> _Result: + del statement + return self._results.pop(0) + + +class _MemoryStore: + def __init__(self) -> None: + self.store: dict[str, Any] = {} + + def __getitem__(self, item: str) -> Any: + return self.store.get(item) + + def __setitem__(self, key: str, value: Any) -> None: + self.store[key] = value + + +def _patch_session_results(monkeypatch: pytest.MonkeyPatch, results: list[_Result]) -> list[bool]: + auto_commit_calls: list[bool] = [] + + @contextmanager + def _fake_get_db_session(auto_commit: bool = True) -> Iterator[_Session]: + auto_commit_calls.append(auto_commit) + yield _Session([results.pop(0)]) + + monkeypatch.setattr(statistics_service, "get_db_session", _fake_get_db_session) + return auto_commit_calls + + +def _patch_session_result_group(monkeypatch: pytest.MonkeyPatch, results: list[_Result]) -> list[bool]: + auto_commit_calls: list[bool] = [] + + @contextmanager + def _fake_get_db_session(auto_commit: bool = True) -> Iterator[_Session]: + auto_commit_calls.append(auto_commit) + yield _Session(results) + + monkeypatch.setattr(statistics_service, "get_db_session", _fake_get_db_session) + return auto_commit_calls + + +def _build_dashboard_data(total_requests: int = 1) -> DashboardData: + return DashboardData( + summary=StatisticsSummary(total_requests=total_requests), + model_stats=[], + hourly_data=[], + daily_data=[], + recent_activity=[], + ) + + +def _build_dashboard_data_with_time_series() -> DashboardData: + return DashboardData( + summary=StatisticsSummary(total_requests=1), + model_stats=[], + hourly_data=[ + TimeSeriesData(timestamp="2026-05-06T10:00:00", requests=0, cost=0.0, tokens=0), + TimeSeriesData(timestamp="2026-05-06T11:00:00", requests=2, cost=0.5, tokens=50), + TimeSeriesData(timestamp="2026-05-06T12:00:00", requests=0, cost=0.0, tokens=0), + ], + daily_data=[ + TimeSeriesData(timestamp="2026-05-05T00:00:00", requests=0, cost=0.0, tokens=0), + TimeSeriesData(timestamp="2026-05-06T00:00:00", requests=3, cost=0.7, tokens=70), + ], + recent_activity=[], + ) + + +def test_shared_fetch_queries_disable_auto_commit(monkeypatch: pytest.MonkeyPatch) -> None: + now = datetime(2026, 5, 6, 12, 0, 0) + online_record = SimpleNamespace(start_timestamp=now - timedelta(minutes=5), end_timestamp=now) + usage_record = SimpleNamespace( + timestamp=now, + request_type="chat.reply", + model_api_provider_name="provider", + model_assign_name="chat-main", + model_name="gpt-a", + prompt_tokens=10, + completion_tokens=5, + cost=0.01, + time_cost=1.2, + ) + message_record = SimpleNamespace(timestamp=now, message_id="msg-1") + tool_record = SimpleNamespace(timestamp=now, tool_name="reply") + auto_commit_calls = _patch_session_results( + monkeypatch, + [ + _Result(all_values=[online_record]), + _Result(all_values=[usage_record]), + _Result(all_values=[message_record]), + _Result(all_values=[tool_record]), + ], + ) + + online_ranges = statistics_service.fetch_online_time_since(now - timedelta(hours=1)) + usage_records = statistics_service.fetch_model_usage_since(now - timedelta(hours=1)) + messages = statistics_service.fetch_messages_since(now - timedelta(hours=1)) + tool_records = statistics_service.fetch_tool_records_since(now - timedelta(hours=1)) + + assert online_ranges == [(online_record.start_timestamp, online_record.end_timestamp)] + assert usage_records == [ + { + "timestamp": now, + "request_type": "chat.reply", + "model_api_provider_name": "provider", + "model_assign_name": "chat-main", + "model_name": "gpt-a", + "prompt_tokens": 10, + "completion_tokens": 5, + "cost": 0.01, + "time_cost": 1.2, + } + ] + assert messages == [message_record] + assert tool_records == [tool_record] + assert auto_commit_calls == [False, False, False, False] + + +def test_get_earliest_statistics_time_uses_min_valid_timestamp(monkeypatch: pytest.MonkeyPatch) -> None: + fallback_time = datetime(2026, 5, 6, 12, 0, 0) + earliest_time = datetime(2026, 5, 1, 8, 30, 0) + auto_commit_calls = _patch_session_result_group( + monkeypatch, + [ + _Result(first_value=datetime(2026, 5, 3, 9, 0, 0)), + _Result(first_value=earliest_time), + _Result(first_value=None), + _Result(first_value=datetime(2026, 5, 2, 9, 0, 0)), + ], + ) + + result = statistics_service.get_earliest_statistics_time(fallback_time) + + assert result == earliest_time + assert auto_commit_calls == [False] + + +def test_get_earliest_statistics_time_falls_back_when_query_fails(monkeypatch: pytest.MonkeyPatch) -> None: + fallback_time = datetime(2026, 5, 6, 12, 0, 0) + + @contextmanager + def _fake_get_db_session(auto_commit: bool = True) -> Iterator[_Session]: + del auto_commit + raise RuntimeError("database unavailable") + yield _Session([]) + + monkeypatch.setattr(statistics_service, "get_db_session", _fake_get_db_session) + + assert statistics_service.get_earliest_statistics_time(fallback_time) == fallback_time + + +def test_dashboard_statistics_cache_roundtrip(monkeypatch: pytest.MonkeyPatch) -> None: + memory_store = _MemoryStore() + now = datetime.now() + dashboard_data = _build_dashboard_data(total_requests=7) + monkeypatch.setattr(statistics_service, "local_storage", memory_store) + + statistics_service.store_dashboard_statistics_cache({24: dashboard_data}, generated_at=now) + cached_data = statistics_service.get_cached_dashboard_statistics(24) + + assert cached_data is not None + assert cached_data.summary.total_requests == 7 + + +def test_dashboard_statistics_cache_stores_sparse_time_series(monkeypatch: pytest.MonkeyPatch) -> None: + memory_store = _MemoryStore() + generated_at = datetime(2026, 5, 6, 12, 0, 0) + dashboard_data = _build_dashboard_data_with_time_series() + monkeypatch.setattr(statistics_service, "local_storage", memory_store) + + statistics_service.store_dashboard_statistics_cache({2: dashboard_data}, generated_at=generated_at) + + raw_cache = memory_store[statistics_service.DASHBOARD_STATISTICS_CACHE_KEY] + raw_entry = raw_cache["entries"]["2"] + assert raw_entry["sparse"] is True + assert raw_entry["hourly_data"] == [ + {"timestamp": "2026-05-06T11:00:00", "requests": 2, "cost": 0.5, "tokens": 50} + ] + assert raw_entry["daily_data"] == [ + {"timestamp": "2026-05-06T00:00:00", "requests": 3, "cost": 0.7, "tokens": 70} + ] + + cached_data = statistics_service.get_cached_dashboard_statistics(2, max_age_seconds=10**9) + assert cached_data is not None + assert [item.timestamp for item in cached_data.hourly_data] == [ + "2026-05-06T10:00:00", + "2026-05-06T11:00:00", + "2026-05-06T12:00:00", + ] + assert cached_data.hourly_data[0].requests == 0 + assert cached_data.hourly_data[1].requests == 2 + assert cached_data.hourly_data[2].requests == 0 + + +@pytest.mark.asyncio +async def test_get_dashboard_statistics_prefers_cache(monkeypatch: pytest.MonkeyPatch) -> None: + memory_store = _MemoryStore() + dashboard_data = _build_dashboard_data(total_requests=9) + monkeypatch.setattr(statistics_service, "local_storage", memory_store) + statistics_service.store_dashboard_statistics_cache({24: dashboard_data}, generated_at=datetime.now()) + + async def _fail_compute_dashboard_statistics(hours: int = 24) -> DashboardData: + del hours + raise AssertionError("cache should be used") + + monkeypatch.setattr(statistics_service, "compute_dashboard_statistics", _fail_compute_dashboard_statistics) + + result = await statistics_service.get_dashboard_statistics(24) + + assert result.summary.total_requests == 9 + + +@pytest.mark.asyncio +async def test_get_dashboard_statistics_returns_empty_when_cache_missing(monkeypatch: pytest.MonkeyPatch) -> None: + memory_store = _MemoryStore() + monkeypatch.setattr(statistics_service, "local_storage", memory_store) + + async def _fail_compute_dashboard_statistics(hours: int = 24) -> DashboardData: + del hours + raise AssertionError("dashboard API should not compute fallback data") + + monkeypatch.setattr(statistics_service, "compute_dashboard_statistics", _fail_compute_dashboard_statistics) + + result = await statistics_service.get_dashboard_statistics(24) + + assert result.summary.total_requests == 0 + assert result.model_stats == [] + + +@pytest.mark.asyncio +async def test_get_summary_statistics_aggregates_database_and_message_counts(monkeypatch: pytest.MonkeyPatch) -> None: + start_time = datetime(2026, 5, 6, 10, 0, 0) + end_time = datetime(2026, 5, 6, 12, 0, 0) + online_records = [ + SimpleNamespace( + start_timestamp=start_time - timedelta(minutes=30), + end_timestamp=start_time + timedelta(minutes=30), + ), + SimpleNamespace( + start_timestamp=start_time + timedelta(hours=1), + end_timestamp=end_time + timedelta(minutes=30), + ), + ] + auto_commit_calls = _patch_session_results( + monkeypatch, + [ + _Result(first_value=(3, 1.5, 900, 2.5)), + _Result(all_values=online_records), + ], + ) + + def _fake_count_messages(**kwargs: Any) -> int: + return 5 if kwargs.get("has_reply_to") is None else 2 + + monkeypatch.setattr(statistics_service, "count_messages", _fake_count_messages) + + summary = await statistics_service.get_summary_statistics(start_time, end_time) + + assert summary.total_requests == 3 + assert summary.total_cost == 1.5 + assert summary.total_tokens == 900 + assert summary.avg_response_time == 2.5 + assert summary.online_time == 5400 + assert summary.total_messages == 5 + assert summary.total_replies == 2 + assert summary.cost_per_hour == pytest.approx(1.0) + assert summary.tokens_per_hour == pytest.approx(600.0) + assert auto_commit_calls == [False, False] + + +@pytest.mark.asyncio +async def test_get_model_statistics_groups_by_display_model_name(monkeypatch: pytest.MonkeyPatch) -> None: + now = datetime(2026, 5, 6, 12, 0, 0) + records = [ + SimpleNamespace( + model_assign_name="chat-main", + model_name="gpt-a", + cost=0.4, + total_tokens=100, + time_cost=2.0, + ), + SimpleNamespace( + model_assign_name="chat-main", + model_name="gpt-a", + cost=0.6, + total_tokens=200, + time_cost=4.0, + ), + SimpleNamespace( + model_assign_name=None, + model_name="gpt-b", + cost=0.2, + total_tokens=50, + time_cost=0.0, + ), + ] + _patch_session_results(monkeypatch, [_Result(all_values=records)]) + + stats = await statistics_service.get_model_statistics(now - timedelta(hours=24)) + + assert [item.model_name for item in stats] == ["chat-main", "gpt-b"] + assert stats[0].request_count == 2 + assert stats[0].total_cost == pytest.approx(1.0) + assert stats[0].total_tokens == 300 + assert stats[0].avg_response_time == pytest.approx(3.0) + assert stats[1].avg_response_time == 0.0 diff --git a/pytests/webui/test_system_routes.py b/pytests/webui/test_system_routes.py new file mode 100644 index 00000000..a812aca4 --- /dev/null +++ b/pytests/webui/test_system_routes.py @@ -0,0 +1,13 @@ +from src.webui.routers import system + + +def test_is_newer_version_detects_patch_update() -> None: + assert system._is_newer_version("1.0.7", "1.0.6") is True + + +def test_is_newer_version_ignores_same_version_with_shorter_parts() -> None: + assert system._is_newer_version("1.0.0", "1.0") is False + + +def test_is_newer_version_handles_unknown_current_version() -> None: + assert system._is_newer_version("1.0.7", "unknown") is False diff --git a/src/A_memorix/core/runtime/sdk_memory_kernel.py b/src/A_memorix/core/runtime/sdk_memory_kernel.py index 4340aac0..dfbbbd77 100644 --- a/src/A_memorix/core/runtime/sdk_memory_kernel.py +++ b/src/A_memorix/core/runtime/sdk_memory_kernel.py @@ -3139,9 +3139,7 @@ class SDKMemoryKernel: return {"success": False, "queued": False, "reason": "db_save_failed"} logger.debug( - "反馈纠错任务入队: query_tool_id=%s due_at=%s", - clean_tool_id, - due_at.isoformat(), + f"反馈纠错任务入队: query_tool_id={clean_tool_id} due_at={due_at.isoformat()}", ) return { "success": True, diff --git a/src/A_memorix/core/storage/metadata_store.py b/src/A_memorix/core/storage/metadata_store.py index 2667e683..70de6ef4 100644 --- a/src/A_memorix/core/storage/metadata_store.py +++ b/src/A_memorix/core/storage/metadata_store.py @@ -162,20 +162,16 @@ class MetadataStore: def _run_runtime_auto_migration(self, *, current_version: int) -> None: """对 1.0 之后的已版本化库执行轻量自动迁移。""" logger.info( - "检测到 metadata schema 需要运行时自动迁移: current=%s, target=%s", - current_version, - SCHEMA_VERSION, + f"检测到 metadata schema 需要运行时自动迁移: current={current_version}, target={SCHEMA_VERSION}", ) self._migrate_schema() alias_result = self.rebuild_relation_hash_aliases() knowledge_type_result = self.normalize_paragraph_knowledge_types() self.set_schema_version(SCHEMA_VERSION) logger.info( - "metadata schema 运行时自动迁移完成: %s -> %s, alias_inserted=%s, knowledge_normalized=%s", - current_version, - SCHEMA_VERSION, - int(alias_result.get("inserted", 0) or 0), - int(knowledge_type_result.get("normalized", 0) or 0), + f"metadata schema 运行时自动迁移完成: {current_version} -> {SCHEMA_VERSION}, " + f"alias_inserted={int(alias_result.get('inserted', 0) or 0)}, " + f"knowledge_normalized={int(knowledge_type_result.get('normalized', 0) or 0)}", ) def _ensure_memory_feedback_task_columns(self, cursor: sqlite3.Cursor) -> None: diff --git a/src/A_memorix/core/utils/web_import_manager.py b/src/A_memorix/core/utils/web_import_manager.py index a4db5c54..9d9c3ba6 100644 --- a/src/A_memorix/core/utils/web_import_manager.py +++ b/src/A_memorix/core/utils/web_import_manager.py @@ -3126,7 +3126,7 @@ class ImportTaskManager: ) -> None: content = str(processed.chunk.text or "") if is_probable_hash_token(content): - logger.warning("跳过疑似哈希段落写入: source=%s preview=%s", self._source_label(file_record), content[:32]) + logger.warning(f"跳过疑似哈希段落写入: source={self._source_label(file_record)} preview={content[:32]}") return para_hash = self.plugin.metadata_store.add_paragraph( content=content, @@ -3208,10 +3208,7 @@ class ImportTaskManager: return "" if any(is_probable_hash_token(token) for token in (subject_token, predicate_token, object_token)): logger.warning( - "跳过疑似哈希关系写入: %s | %s | %s", - subject_token[:24], - predicate_token[:24], - object_token[:24], + f"跳过疑似哈希关系写入: {subject_token[:24]} | {predicate_token[:24]} | {object_token[:24]}", ) return "" diff --git a/src/A_memorix/host_service.py b/src/A_memorix/host_service.py index 5e915423..d05f109f 100644 --- a/src/A_memorix/host_service.py +++ b/src/A_memorix/host_service.py @@ -309,7 +309,7 @@ class AMemorixHostService: try: config_model = _get_config_manager().get_global_config().a_memorix except Exception as exc: - logger.warning("读取 A_Memorix 主配置失败,使用默认值: %s", exc) + logger.warning(f"读取 A_Memorix 主配置失败,使用默认值: {exc}") defaults = self._build_default_config() self._config_cache = defaults return dict(defaults) diff --git a/src/chat/utils/statistic.py b/src/chat/utils/statistic.py index a6e4e996..da70db7b 100644 --- a/src/chat/utils/statistic.py +++ b/src/chat/utils/statistic.py @@ -8,13 +8,21 @@ from typing import cast from typing_extensions import TypedDict -from sqlmodel import col, func, select +from sqlmodel import col, select from src.common.logger import get_logger from src.common.database.database import get_db_session -from src.common.database.database_model import Messages, ModelUsage, OnlineTime, ToolRecord +from src.common.database.database_model import OnlineTime from src.manager.async_task_manager import AsyncTask from src.manager.local_store_manager import local_storage +from src.services.statistics_service import ( + fetch_messages_since, + fetch_model_usage_since, + fetch_online_time_since, + fetch_tool_records_since, + get_earliest_statistics_time, + refresh_dashboard_statistics_cache, +) logger = get_logger("maibot_statistic") @@ -249,7 +257,7 @@ class StatisticOutputTask(AsyncTask): deploy_time = datetime(2000, 1, 1) local_storage["deploy_time"] = now.timestamp() - self.all_time_start_time = self._get_all_time_start_time(deploy_time) + self.all_time_start_time = get_earliest_statistics_time(deploy_time) self.stat_period: list[tuple[str, timedelta, str]] = [ ("all_time", now - self.all_time_start_time, "自部署以来"), # 必须保留"all_time" @@ -265,23 +273,6 @@ class StatisticOutputTask(AsyncTask): 统计时间段 [(统计名称, 统计时间段, 统计描述), ...] """ - @staticmethod - def _get_all_time_start_time(fallback_time: datetime) -> datetime: - """获取统计数据的最早时间,避免全量统计展示窗口漏掉历史数据。""" - try: - with get_db_session(auto_commit=False) as session: - start_times = [ - session.exec(select(func.min(ModelUsage.timestamp))).first(), - session.exec(select(func.min(Messages.timestamp))).first(), - session.exec(select(func.min(OnlineTime.start_timestamp))).first(), - session.exec(select(func.min(ToolRecord.timestamp))).first(), - ] - valid_start_times = [item for item in start_times if isinstance(item, datetime)] - if valid_start_times: - return min(valid_start_times) - except Exception as e: - logger.warning(f"获取全量统计起始时间失败,将使用部署时间:{e}") - return fallback_time def _statistic_console_output(self, stats: StatPeriodMapping, now: datetime) -> None: """ @@ -324,6 +315,10 @@ class StatisticOutputTask(AsyncTask): # 等待数据收集完成 stats = await collect_task + try: + await refresh_dashboard_statistics_cache() + except Exception as e: + logger.warning(f"刷新 WebUI 统计缓存失败,将继续生成 HTML 报告: {e}") logger.info("统计数据收集完成") # 并行执行控制台输出和HTML报告生成 @@ -354,6 +349,10 @@ class StatisticOutputTask(AsyncTask): logger.info("正在后台收集统计数据...") stats = await loop.run_in_executor(executor, self._collect_all_statistics, now) + try: + await refresh_dashboard_statistics_cache() + except Exception as e: + logger.warning(f"刷新 WebUI 统计缓存失败,将继续生成 HTML 报告: {e}") logger.info("统计数据收集完成") # 创建并发的输出任务 @@ -453,33 +452,6 @@ class StatisticOutputTask(AsyncTask): counter = cast(defaultdict[str, list[float]], stats_period[key]) counter[subkey].append(value) - @staticmethod - def _fetch_online_time_since(query_start_time: datetime) -> list[tuple[datetime, datetime]]: - with get_db_session(auto_commit=False) as session: - statement = select(OnlineTime).where(col(OnlineTime.end_timestamp) >= query_start_time) - records = session.exec(statement).all() - return [(record.start_timestamp, record.end_timestamp) for record in records] - - @staticmethod - def _fetch_model_usage_since(query_start_time: datetime) -> list[dict[str, object]]: - with get_db_session(auto_commit=False) as session: - statement = select(ModelUsage).where(col(ModelUsage.timestamp) >= query_start_time) - records = session.exec(statement).all() - return [ - { - "timestamp": record.timestamp, - "request_type": record.request_type, - "model_api_provider_name": record.model_api_provider_name, - "model_assign_name": record.model_assign_name, - "model_name": record.model_name, - "prompt_tokens": record.prompt_tokens, - "completion_tokens": record.completion_tokens, - "cost": record.cost, - "time_cost": record.time_cost, - } - for record in records - ] - @staticmethod def _collect_model_request_for_period(collect_period: list[tuple[str, datetime]]) -> StatPeriodMapping: """ @@ -500,7 +472,7 @@ class StatisticOutputTask(AsyncTask): # 以最早的时间戳为起始时间获取记录 # Assuming LLMUsage.timestamp is a DateTimeField query_start_time = collect_period[-1][1] - records = StatisticOutputTask._fetch_model_usage_since(query_start_time) + records = fetch_model_usage_since(query_start_time) for record in records: record_timestamp = cast(datetime, record["timestamp"]) for idx, (_, period_start) in enumerate(collect_period): @@ -647,7 +619,7 @@ class StatisticOutputTask(AsyncTask): query_start_time = collect_period[-1][1] # Assuming OnlineTime.end_timestamp is a DateTimeField - records = StatisticOutputTask._fetch_online_time_since(query_start_time) + records = fetch_online_time_since(query_start_time) for record_start_timestamp, record_end_timestamp in records: for idx, (_, period_boundary_start) in enumerate(collect_period): if record_end_timestamp >= period_boundary_start: @@ -688,9 +660,7 @@ class StatisticOutputTask(AsyncTask): } query_start_timestamp = collect_period[-1][1] - with get_db_session(auto_commit=False) as session: - statement = select(Messages).where(col(Messages.timestamp) >= query_start_timestamp) - messages = session.exec(statement).all() + messages = fetch_messages_since(query_start_timestamp) for message in messages: message_time_ts = message.timestamp.timestamp() @@ -737,9 +707,7 @@ class StatisticOutputTask(AsyncTask): # 使用 ToolRecord 中的 reply 工具次数作为回复数基准 try: tool_query_start_timestamp = collect_period[-1][1] - with get_db_session(auto_commit=False) as session: - statement = select(ToolRecord).where(col(ToolRecord.timestamp) >= tool_query_start_timestamp) - tool_records = session.exec(statement).all() + tool_records = fetch_tool_records_since(tool_query_start_timestamp) for tool_record in tool_records: if tool_record.tool_name != "reply": continue @@ -865,7 +833,7 @@ class StatisticOutputTask(AsyncTask): "module": defaultdict(lambda: {"count": 0.0, "sum": 0.0, "sum_sq": 0.0}), } - records = self._fetch_model_usage_since(self.all_time_start_time) + records = fetch_model_usage_since(self.all_time_start_time) for record in records: time_cost = cast(float | None, record["time_cost"]) or 0.0 if time_cost <= 0: @@ -1806,7 +1774,7 @@ class StatisticOutputTask(AsyncTask): # 查询LLM使用记录 query_start_time = start_time - records = StatisticOutputTask._fetch_model_usage_since(query_start_time) + records = fetch_model_usage_since(query_start_time) for record in records: record_time = cast(datetime, record["timestamp"]) @@ -1835,9 +1803,7 @@ class StatisticOutputTask(AsyncTask): # 查询消息记录 query_start_timestamp = start_time.timestamp() - with get_db_session(auto_commit=False) as session: - statement = select(Messages).where(col(Messages.timestamp) >= start_time) - messages = session.exec(statement).all() + messages = fetch_messages_since(start_time) for message in messages: message_time_ts = message.timestamp.timestamp() @@ -2197,7 +2163,7 @@ class StatisticOutputTask(AsyncTask): # 查询LLM使用记录 query_start_time = start_time - records = StatisticOutputTask._fetch_model_usage_since(query_start_time) + records = fetch_model_usage_since(query_start_time) for record in records: record_time = cast(datetime, record["timestamp"]) @@ -2216,9 +2182,7 @@ class StatisticOutputTask(AsyncTask): # 查询消息记录 query_start_timestamp = start_time.timestamp() - with get_db_session(auto_commit=False) as session: - statement = select(Messages).where(col(Messages.timestamp) >= start_time) - messages = session.exec(statement).all() + messages = fetch_messages_since(start_time) for message in messages: message_time_ts = message.timestamp.timestamp() @@ -2232,7 +2196,7 @@ class StatisticOutputTask(AsyncTask): total_replies[interval_index] += 1 # 查询在线时间记录 - records = StatisticOutputTask._fetch_online_time_since(start_time) + records = fetch_online_time_since(start_time) for record_start, record_end in records: # 找到记录覆盖的所有时间间隔 for idx, time_point in enumerate(time_points): @@ -2491,6 +2455,10 @@ class AsyncStatisticOutputTask(AsyncTask): # 数据收集任务 stats = await loop.run_in_executor(executor, self._statistic_task._collect_all_statistics, now) + try: + await refresh_dashboard_statistics_cache() + except Exception as e: + logger.warning(f"刷新 WebUI 统计缓存失败,将继续生成 HTML 报告: {e}") logger.info("统计数据收集完成") # 创建并发的输出任务 diff --git a/src/common/prompt_i18n.py b/src/common/prompt_i18n.py index 8075d913..5b487ded 100644 --- a/src/common/prompt_i18n.py +++ b/src/common/prompt_i18n.py @@ -134,7 +134,7 @@ def _read_metadata_file(metadata_path: Path) -> dict[str, Any]: else: metadata = parse_toml(metadata_path.read_text(encoding="utf-8")) except Exception as exc: - logger.warning("读取 Prompt 元信息文件 %s 失败:%s", metadata_path, exc) + logger.warning(f"读取 Prompt 元信息文件 {metadata_path} 失败:{exc}") return {} return dict(metadata) if isinstance(metadata, dict) else {} diff --git a/src/config/config.py b/src/config/config.py index 50e40cff..932fa9d3 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -55,7 +55,7 @@ BOT_CONFIG_PATH: Path = (CONFIG_DIR / "bot_config.toml").resolve().absolute() MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute() LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute() A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute() -MMC_VERSION: str = "1.0.0-pre.11" +MMC_VERSION: str = "1.0.0-pre.13" CONFIG_VERSION: str = "8.10.9" MODEL_CONFIG_VERSION: str = "1.15.3" diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 5a1a4651..ee6cf87c 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -93,7 +93,7 @@ class BotConfig(ConfigBase): "x-icon": "user-circle", }, ) - """机器人昵称""" + """""" alias_names: list[str] = Field( default_factory=lambda: [], @@ -130,6 +130,7 @@ class PersonalityConfig(ConfigBase): "x-icon": "user-circle", "x-textarea-min-height": 40, "x-textarea-rows": 1, + "x-description-display": "icon", }, ) """人格,建议200字以内,描述人格特质和身份特征;可以写完整设定。要求第二人称""" @@ -146,6 +147,7 @@ class PersonalityConfig(ConfigBase): "x-icon": "message-square", "x-textarea-min-height": 40, "x-textarea-rows": 1, + "x-description-display": "icon", }, ) """默认表达风格,描述麦麦说话的表达风格,表达习惯,如要修改,可以酌情新增内容,建议1-2行""" @@ -514,6 +516,7 @@ class MessageReceiveConfig(ConfigBase): json_schema_extra={ "x-widget": "input", "x-icon": "image", + "advanced": True, }, ) """ diff --git a/src/core/tooling.py b/src/core/tooling.py index 3cf438d1..31281cc0 100644 --- a/src/core/tooling.py +++ b/src/core/tooling.py @@ -419,9 +419,7 @@ class ToolRegistry: return await provider.invoke(invocation, context) except Exception as exc: logger.exception( - "工具调用异常: tool=%s provider=%s", - invocation.tool_name, - getattr(provider, "provider_name", ""), + f"工具调用异常: tool={invocation.tool_name} provider={getattr(provider, 'provider_name', '')}", ) error_message = str(exc).strip() if error_message: diff --git a/src/emoji_system/emoji_manager.py b/src/emoji_system/emoji_manager.py index 272f2670..cd6f0e9f 100644 --- a/src/emoji_system/emoji_manager.py +++ b/src/emoji_system/emoji_manager.py @@ -768,11 +768,8 @@ class EmojiManager: selected_emoji, similarity = random.choice(top_emojis) self.update_emoji_usage(selected_emoji) logger.info( - "[获取表情包] 为[%s]选中表情包: %s(%s),相似度: %.4f", - emotion_label, - selected_emoji.file_name, - ",".join(_get_emoji_emotions(selected_emoji)), - similarity, + f"[获取表情包] 为[{emotion_label}]选中表情包: " + f"{selected_emoji.file_name}({','.join(_get_emoji_emotions(selected_emoji))}),相似度: {similarity:.4f}", ) return selected_emoji diff --git a/src/main.py b/src/main.py index 1c519453..2294c308 100644 --- a/src/main.py +++ b/src/main.py @@ -81,6 +81,7 @@ class MainSystem: await config_manager.start_file_watcher() a_memorix_host_service.register_config_reload_callback() + prompt_manager.load_prompts() # 添加在线时间统计任务 await async_task_manager.add_task(OnlineTimeRecordTask()) @@ -121,8 +122,6 @@ class MainSystem: self.app.register_message_handler(chat_bot.message_process) self.app.register_custom_message_handler("message_id_echo", chat_bot.echo_message_process) - prompt_manager.load_prompts() - # 触发 ON_START 事件 from src.core.event_bus import event_bus from src.core.types import EventType @@ -161,10 +160,8 @@ async def main() -> None: """主函数""" system = MainSystem() try: - await asyncio.gather( - system.initialize(), - system.schedule_tasks(), - ) + await system.initialize() + await system.schedule_tasks() finally: disable_stage_status_board() emoji_manager.shutdown() diff --git a/src/plugin_runtime/host/message_utils.py b/src/plugin_runtime/host/message_utils.py index 7f72fff3..a68af32d 100644 --- a/src/plugin_runtime/host/message_utils.py +++ b/src/plugin_runtime/host/message_utils.py @@ -191,7 +191,7 @@ class PluginMessageUtils: return "" return base64.b64encode(image_path.read_bytes()).decode("utf-8") except Exception as exc: - logger.debug("通过 hash 加载历史媒体失败: type=%s hash=%s error=%s", image_type, binary_hash, exc) + logger.debug(f"通过 hash 加载历史媒体失败: type={image_type} hash={binary_hash} error={exc}") return "" @staticmethod diff --git a/src/services/memory_flow_service.py b/src/services/memory_flow_service.py index 5ef7feb2..24098de4 100644 --- a/src/services/memory_flow_service.py +++ b/src/services/memory_flow_service.py @@ -48,7 +48,7 @@ class PersonFactWritebackService: except asyncio.CancelledError: pass except Exception as exc: - logger.warning("关闭人物事实写回 worker 失败: %s", exc) + logger.warning(f"关闭人物事实写回 worker 失败: {exc}") async def enqueue(self, message: Any) -> None: if not bool(global_config.a_memorix.integration.person_fact_writeback_enabled): @@ -67,7 +67,7 @@ class PersonFactWritebackService: try: await self._handle_message(message) except Exception as exc: - logger.warning("人物事实写回处理失败: %s", exc, exc_info=True) + logger.warning(f"人物事实写回处理失败: {exc}", exc_info=True) finally: self._queue.task_done() except asyncio.CancelledError: @@ -126,7 +126,7 @@ class PersonFactWritebackService: try: replies = find_messages(message_id=reply_to, limit=1) except Exception as exc: - logger.debug("查询 reply_to 目标失败: %s", exc) + logger.debug(f"查询 reply_to 目标失败: {exc}") return None if not replies: return None @@ -166,7 +166,7 @@ class PersonFactWritebackService: try: response_result = await self._extractor.generate_response(prompt) except Exception as exc: - logger.debug("人物事实提取模型调用失败: %s", exc) + logger.debug(f"人物事实提取模型调用失败: {exc}") return [] return self._parse_fact_list(response_result.response) @@ -248,7 +248,7 @@ class ChatSummaryWritebackService: except asyncio.CancelledError: pass except Exception as exc: - logger.warning("关闭聊天摘要写回 worker 失败: %s", exc) + logger.warning(f"关闭聊天摘要写回 worker 失败: {exc}") async def enqueue(self, message: Any) -> None: if not bool(global_config.a_memorix.integration.chat_summary_writeback_enabled): @@ -267,7 +267,7 @@ class ChatSummaryWritebackService: try: await self._handle_message(message) except Exception as exc: - logger.warning("聊天摘要写回处理失败: %s", exc, exc_info=True) + logger.warning(f"聊天摘要写回处理失败: {exc}", exc_info=True) finally: self._queue.task_done() except asyncio.CancelledError: @@ -319,21 +319,16 @@ class ChatSummaryWritebackService: ) if not getattr(result, "success", False): logger.warning( - "聊天摘要自动写回失败: session_id=%s detail=%s", - session_id, - getattr(result, "detail", ""), + f"聊天摘要自动写回失败: session_id={session_id} detail={getattr(result, 'detail', '')}", ) return state.last_trigger_message_count = total_message_count state.last_trigger_time = time.time() logger.info( - "聊天摘要自动写回成功: session_id=%s trigger=%s total_messages=%s context_length=%s detail=%s", - session_id, - "message_threshold", - total_message_count, - context_length, - getattr(result, "detail", ""), + f"聊天摘要自动写回成功: session_id={session_id} trigger=message_threshold " + f"total_messages={total_message_count} context_length={context_length} " + f"detail={getattr(result, 'detail', '')}", ) async def _load_last_trigger_message_count(self, *, session_id: str, total_message_count: int) -> int: @@ -363,7 +358,7 @@ class ChatSummaryWritebackService: # 至少避免重启后立刻重复写入一条相近摘要。 return total_message_count except Exception as exc: - logger.debug("恢复聊天摘要写回游标失败: session_id=%s error=%s", session_id, exc) + logger.debug(f"恢复聊天摘要写回游标失败: session_id={session_id} error={exc}") return 0 @staticmethod diff --git a/src/services/memory_service.py b/src/services/memory_service.py index e4d0e216..d7ec6cd4 100644 --- a/src/services/memory_service.py +++ b/src/services/memory_service.py @@ -230,7 +230,7 @@ class MemoryService: ) return self._coerce_search_result(payload) except Exception as exc: - logger.warning("长期记忆搜索失败: %s", exc) + logger.warning(f"长期记忆搜索失败: {exc}") return MemorySearchResult(success=False, error=str(exc)) async def enqueue_feedback_task( @@ -253,7 +253,7 @@ class MemoryService: timeout_ms=10000, ) except Exception as exc: - logger.warning("反馈纠错任务入队失败: %s", exc) + logger.warning(f"反馈纠错任务入队失败: {exc}") return {"success": False, "queued": False, "reason": str(exc)} return payload if isinstance(payload, dict) else {"success": False, "queued": False, "reason": "invalid_payload"} @@ -291,7 +291,7 @@ class MemoryService: ) return self._coerce_write_result(payload) except Exception as exc: - logger.warning("长期记忆写入摘要失败: %s", exc) + logger.warning(f"长期记忆写入摘要失败: {exc}") return MemoryWriteResult(success=False, detail=str(exc)) async def ingest_text( @@ -338,7 +338,7 @@ class MemoryService: ) return self._coerce_write_result(payload) except Exception as exc: - logger.warning("长期记忆写入文本失败: %s", exc) + logger.warning(f"长期记忆写入文本失败: {exc}") return MemoryWriteResult(success=False, detail=str(exc)) async def get_person_profile(self, person_id: str, *, chat_id: str = "", limit: int = 10) -> PersonProfileResult: @@ -352,7 +352,7 @@ class MemoryService: ) return self._coerce_profile_result(payload) except Exception as exc: - logger.warning("获取人物画像失败: %s", exc) + logger.warning(f"获取人物画像失败: {exc}") return PersonProfileResult() async def maintain_memory( @@ -373,7 +373,7 @@ class MemoryService: return MemoryWriteResult(success=False, detail="invalid_payload") return MemoryWriteResult(success=bool(payload.get("success")), detail=str(payload.get("detail", "") or "")) except Exception as exc: - logger.warning("记忆维护失败: %s", exc) + logger.warning(f"记忆维护失败: {exc}") return MemoryWriteResult(success=False, detail=str(exc)) async def memory_stats(self) -> Dict[str, Any]: @@ -381,77 +381,77 @@ class MemoryService: payload = await self._invoke("memory_stats", {}) return payload if isinstance(payload, dict) else {} except Exception as exc: - logger.warning("获取记忆统计失败: %s", exc) + logger.warning(f"获取记忆统计失败: {exc}") return {} async def graph_admin(self, *, action: str, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_graph_admin", action=action, **kwargs) except Exception as exc: - logger.warning("图谱管理调用失败: %s", exc) + logger.warning(f"图谱管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def source_admin(self, *, action: str, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_source_admin", action=action, **kwargs) except Exception as exc: - logger.warning("来源管理调用失败: %s", exc) + logger.warning(f"来源管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def episode_admin(self, *, action: str, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_episode_admin", action=action, **kwargs) except Exception as exc: - logger.warning("Episode 管理调用失败: %s", exc) + logger.warning(f"Episode 管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def profile_admin(self, *, action: str, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_profile_admin", action=action, **kwargs) except Exception as exc: - logger.warning("画像管理调用失败: %s", exc) + logger.warning(f"画像管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def feedback_admin(self, *, action: str, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_feedback_admin", action=action, **kwargs) except Exception as exc: - logger.warning("反馈纠错管理调用失败: %s", exc) + logger.warning(f"反馈纠错管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def runtime_admin(self, *, action: str, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_runtime_admin", action=action, **kwargs) except Exception as exc: - logger.warning("运行时管理调用失败: %s", exc) + logger.warning(f"运行时管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def import_admin(self, *, action: str, timeout_ms: int = 120000, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_import_admin", action=action, timeout_ms=timeout_ms, **kwargs) except Exception as exc: - logger.warning("导入管理调用失败: %s", exc) + logger.warning(f"导入管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def tuning_admin(self, *, action: str, timeout_ms: int = 120000, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_tuning_admin", action=action, timeout_ms=timeout_ms, **kwargs) except Exception as exc: - logger.warning("调优管理调用失败: %s", exc) + logger.warning(f"调优管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def v5_admin(self, *, action: str, timeout_ms: int = 30000, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_v5_admin", action=action, timeout_ms=timeout_ms, **kwargs) except Exception as exc: - logger.warning("V5 记忆管理调用失败: %s", exc) + logger.warning(f"V5 记忆管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def delete_admin(self, *, action: str, timeout_ms: int = 120000, **kwargs) -> Dict[str, Any]: try: return await self._invoke_admin("memory_delete_admin", action=action, timeout_ms=timeout_ms, **kwargs) except Exception as exc: - logger.warning("删除管理调用失败: %s", exc) + logger.warning(f"删除管理调用失败: {exc}") return {"success": False, "error": str(exc)} async def get_recycle_bin(self, *, limit: int = 50) -> Dict[str, Any]: @@ -459,7 +459,7 @@ class MemoryService: payload = await self._invoke("maintain_memory", {"action": "recycle_bin", "limit": max(1, int(limit or 50))}) return payload if isinstance(payload, dict) else {"success": False, "error": "invalid_payload"} except Exception as exc: - logger.warning("获取回收站失败: %s", exc) + logger.warning(f"获取回收站失败: {exc}") return {"success": False, "error": str(exc)} async def restore_memory(self, *, target: str) -> MemoryWriteResult: diff --git a/src/services/statistics_service.py b/src/services/statistics_service.py new file mode 100644 index 00000000..889746fb --- /dev/null +++ b/src/services/statistics_service.py @@ -0,0 +1,491 @@ +from datetime import datetime, timedelta +from typing import Any, Dict, List + +from sqlalchemy import desc, func, or_ +from sqlmodel import col, select + +from src.common.database.database import get_db_session +from src.common.database.database_model import Messages, ModelUsage, OnlineTime, ToolRecord +from src.common.logger import get_logger +from src.common.message_repository import count_messages +from src.manager.local_store_manager import local_storage +from src.webui.schemas.statistics import DashboardData, ModelStatistics, StatisticsSummary, TimeSeriesData + +logger = get_logger("statistics_service") + +DASHBOARD_STATISTICS_CACHE_KEY = "webui_dashboard_statistics_cache" +DASHBOARD_STATISTICS_CACHE_VERSION = 1 +DEFAULT_DASHBOARD_CACHE_MAX_AGE_SECONDS = 600 +DEFAULT_DASHBOARD_CACHE_HOURS = (24, 168, 720) +_SPARSE_TIME_SERIES_FIELDS = ("hourly_data", "daily_data") + + +async def get_dashboard_statistics(hours: int = 24, *, use_cache: bool = True) -> DashboardData: + """获取 WebUI 仪表盘统计数据。""" + if use_cache: + cached_data = get_cached_dashboard_statistics(hours) + if cached_data is not None: + return cached_data + + return build_empty_dashboard_statistics() + + +def build_empty_dashboard_statistics() -> DashboardData: + """构造空的 WebUI 仪表盘统计数据。""" + return DashboardData( + summary=StatisticsSummary(), + model_stats=[], + hourly_data=[], + daily_data=[], + recent_activity=[], + ) + + +async def compute_dashboard_statistics(hours: int = 24) -> DashboardData: + """获取 WebUI 仪表盘统计数据。""" + now = datetime.now() + start_time = now - timedelta(hours=hours) + + summary = await get_summary_statistics(start_time, now) + model_stats = await get_model_statistics(start_time) + hourly_data = await get_hourly_statistics(start_time, now) + daily_data = await get_daily_statistics(now - timedelta(days=7), now) + recent_activity = await get_recent_activity(limit=10) + + return DashboardData( + summary=summary, + model_stats=model_stats, + hourly_data=hourly_data, + daily_data=daily_data, + recent_activity=recent_activity, + ) + + +def get_cached_dashboard_statistics( + hours: int = 24, + *, + max_age_seconds: int = DEFAULT_DASHBOARD_CACHE_MAX_AGE_SECONDS, +) -> DashboardData | None: + """从本地快照读取 WebUI 仪表盘统计数据。""" + raw_cache = local_storage[DASHBOARD_STATISTICS_CACHE_KEY] + if not isinstance(raw_cache, dict): + return None + if raw_cache.get("version") != DASHBOARD_STATISTICS_CACHE_VERSION: + return None + + generated_at = raw_cache.get("generated_at") + if not isinstance(generated_at, (int, float)): + return None + if datetime.now().timestamp() - float(generated_at) > max_age_seconds: + return None + + entries = raw_cache.get("entries") + if not isinstance(entries, dict): + return None + + entry = entries.get(str(hours)) + if not isinstance(entry, dict): + return None + + try: + expanded_entry = _expand_dashboard_cache_entry(entry, hours=hours, generated_at=float(generated_at)) + return DashboardData.model_validate(expanded_entry) + except Exception as e: + logger.warning(f"读取 WebUI 统计缓存失败,将实时计算: {e}") + return None + + +def store_dashboard_statistics_cache(entries: dict[int, DashboardData], *, generated_at: datetime | None = None) -> None: + """保存 WebUI 仪表盘统计数据快照。""" + snapshot_time = generated_at or datetime.now() + local_storage[DASHBOARD_STATISTICS_CACHE_KEY] = { + "version": DASHBOARD_STATISTICS_CACHE_VERSION, + "generated_at": snapshot_time.timestamp(), + "entries": {str(hours): _compact_dashboard_cache_entry(data) for hours, data in entries.items()}, + } + + +def update_dashboard_statistics_cache_entry( + hours: int, + data: DashboardData, + *, + generated_at: datetime | None = None, +) -> None: + """更新单个 WebUI 仪表盘统计缓存条目。""" + raw_cache = local_storage[DASHBOARD_STATISTICS_CACHE_KEY] + entries: dict[str, Any] = {} + if isinstance(raw_cache, dict) and isinstance(raw_cache.get("entries"), dict): + entries.update(raw_cache["entries"]) + + snapshot_time = generated_at or datetime.now() + entries[str(hours)] = _compact_dashboard_cache_entry(data) + local_storage[DASHBOARD_STATISTICS_CACHE_KEY] = { + "version": DASHBOARD_STATISTICS_CACHE_VERSION, + "generated_at": snapshot_time.timestamp(), + "entries": entries, + } + + +async def refresh_dashboard_statistics_cache(hours_values: tuple[int, ...] = DEFAULT_DASHBOARD_CACHE_HOURS) -> None: + """刷新 WebUI 仪表盘统计数据快照。""" + cache_entries: dict[int, DashboardData] = {} + for hours in hours_values: + cache_entries[hours] = await compute_dashboard_statistics(hours=hours) + store_dashboard_statistics_cache(cache_entries) + + +def _compact_dashboard_cache_entry(data: DashboardData) -> dict[str, Any]: + """压缩 WebUI 仪表盘缓存条目,去掉全 0 时间桶。""" + entry = data.model_dump(mode="json") + for field_name in _SPARSE_TIME_SERIES_FIELDS: + series = entry.get(field_name) + if isinstance(series, list): + entry[field_name] = [item for item in series if not _is_empty_time_series_item(item)] + entry["sparse"] = True + return entry + + +def _expand_dashboard_cache_entry(entry: dict[str, Any], *, hours: int, generated_at: float) -> dict[str, Any]: + """将稀疏缓存条目展开为前端需要的完整时间序列。""" + if entry.get("sparse") is not True: + return entry + + expanded = dict(entry) + generated_datetime = datetime.fromtimestamp(generated_at) + expanded["hourly_data"] = _expand_time_series( + sparse_series=entry.get("hourly_data"), + start_time=generated_datetime - timedelta(hours=hours), + end_time=generated_datetime, + step=timedelta(hours=1), + timestamp_format="%Y-%m-%dT%H:00:00", + ) + expanded["daily_data"] = _expand_time_series( + sparse_series=entry.get("daily_data"), + start_time=generated_datetime - timedelta(days=7), + end_time=generated_datetime, + step=timedelta(days=1), + timestamp_format="%Y-%m-%dT00:00:00", + ) + expanded.pop("sparse", None) + return expanded + + +def _expand_time_series( + *, + sparse_series: Any, + start_time: datetime, + end_time: datetime, + step: timedelta, + timestamp_format: str, +) -> list[dict[str, Any]]: + sparse_items = sparse_series if isinstance(sparse_series, list) else [] + sparse_by_timestamp = { + item.get("timestamp"): item + for item in sparse_items + if isinstance(item, dict) and isinstance(item.get("timestamp"), str) + } + + result: list[dict[str, Any]] = [] + current = _floor_time_for_format(start_time, timestamp_format) + while current <= end_time: + timestamp = current.strftime(timestamp_format) + item = sparse_by_timestamp.get(timestamp) + if isinstance(item, dict): + result.append(item) + else: + result.append({"timestamp": timestamp, "requests": 0, "cost": 0.0, "tokens": 0}) + current += step + return result + + +def _floor_time_for_format(value: datetime, timestamp_format: str) -> datetime: + if "%H" in timestamp_format: + return value.replace(minute=0, second=0, microsecond=0) + return value.replace(hour=0, minute=0, second=0, microsecond=0) + + +def _is_empty_time_series_item(item: Any) -> bool: + if not isinstance(item, dict): + return False + return ( + int(item.get("requests") or 0) == 0 + and float(item.get("cost") or 0.0) == 0.0 + and int(item.get("tokens") or 0) == 0 + ) + + +async def get_summary_statistics(start_time: datetime, end_time: datetime) -> StatisticsSummary: + """获取指定时间范围内的摘要统计数据。""" + summary = StatisticsSummary( + total_requests=0, + total_cost=0.0, + total_tokens=0, + online_time=0.0, + total_messages=0, + total_replies=0, + avg_response_time=0.0, + cost_per_hour=0.0, + tokens_per_hour=0.0, + ) + + with get_db_session(auto_commit=False) as session: + statement = select( + func.count().label("total_requests"), + func.sum(col(ModelUsage.cost)).label("total_cost"), + func.sum(col(ModelUsage.total_tokens)).label("total_tokens"), + func.avg(col(ModelUsage.time_cost)).label("avg_response_time"), + ).where(col(ModelUsage.timestamp) >= start_time, col(ModelUsage.timestamp) <= end_time) + result = session.exec(statement).first() + + if result: + total_requests, total_cost, total_tokens, avg_response_time = result + summary.total_requests = total_requests or 0 + summary.total_cost = float(total_cost or 0.0) + summary.total_tokens = total_tokens or 0 + summary.avg_response_time = float(avg_response_time or 0.0) + + with get_db_session(auto_commit=False) as session: + statement = select(OnlineTime).where( + or_( + col(OnlineTime.start_timestamp) >= start_time, + col(OnlineTime.end_timestamp) >= start_time, + ) + ) + online_records = session.exec(statement).all() + + for record in online_records: + start = max(record.start_timestamp, start_time) + end = min(record.end_timestamp, end_time) + if end > start: + summary.online_time += (end - start).total_seconds() + + summary.total_messages = count_messages(start_time=start_time.timestamp(), end_time=end_time.timestamp()) + summary.total_replies = count_messages( + start_time=start_time.timestamp(), + end_time=end_time.timestamp(), + has_reply_to=True, + ) + + if summary.online_time > 0: + online_hours = summary.online_time / 3600.0 + summary.cost_per_hour = summary.total_cost / online_hours + summary.tokens_per_hour = summary.total_tokens / online_hours + + return summary + + +async def get_model_statistics(start_time: datetime) -> List[ModelStatistics]: + """获取指定时间之后的模型统计数据。""" + statement = ( + select(ModelUsage) + .where(col(ModelUsage.timestamp) >= start_time) + .order_by(desc(col(ModelUsage.timestamp))) + .limit(200) + ) + + with get_db_session(auto_commit=False) as session: + records = session.exec(statement).all() + + aggregates: Dict[str, Dict[str, float | int]] = {} + for record in records: + model_name = record.model_assign_name or record.model_name or "unknown" + if model_name not in aggregates: + aggregates[model_name] = { + "request_count": 0, + "total_cost": 0.0, + "total_tokens": 0, + "total_time_cost": 0.0, + "time_cost_count": 0, + } + + bucket = aggregates[model_name] + bucket["request_count"] = int(bucket["request_count"]) + 1 + bucket["total_cost"] = float(bucket["total_cost"]) + float(record.cost or 0.0) + bucket["total_tokens"] = int(bucket["total_tokens"]) + int(record.total_tokens or 0) + if record.time_cost: + bucket["total_time_cost"] = float(bucket["total_time_cost"]) + float(record.time_cost) + bucket["time_cost_count"] = int(bucket["time_cost_count"]) + 1 + + result: List[ModelStatistics] = [] + for model_name, bucket in sorted( + aggregates.items(), + key=lambda item: float(item[1]["request_count"]), + reverse=True, + )[:10]: + time_cost_count = int(bucket["time_cost_count"]) + avg_time_cost = float(bucket["total_time_cost"]) / time_cost_count if time_cost_count > 0 else 0.0 + result.append( + ModelStatistics( + model_name=model_name, + request_count=int(bucket["request_count"]), + total_cost=float(bucket["total_cost"]), + total_tokens=int(bucket["total_tokens"]), + avg_response_time=avg_time_cost, + ) + ) + + return result + + +async def get_hourly_statistics(start_time: datetime, end_time: datetime) -> List[TimeSeriesData]: + """按小时聚合 LLM 请求、费用和 token。""" + hour_expr = func.strftime("%Y-%m-%dT%H:00:00", col(ModelUsage.timestamp)) + statement = ( + select( + hour_expr.label("hour"), + func.count().label("requests"), + func.sum(col(ModelUsage.cost)).label("cost"), + func.sum(col(ModelUsage.total_tokens)).label("tokens"), + ) + .where(col(ModelUsage.timestamp) >= start_time, col(ModelUsage.timestamp) <= end_time) + .group_by(hour_expr) + ) + + with get_db_session(auto_commit=False) as session: + rows = session.exec(statement).all() + + data_dict = {row[0]: row for row in rows} + result = [] + current = start_time.replace(minute=0, second=0, microsecond=0) + while current <= end_time: + hour_str = current.strftime("%Y-%m-%dT%H:00:00") + if hour_str in data_dict: + row = data_dict[hour_str] + result.append( + TimeSeriesData( + timestamp=hour_str, + requests=row[1] or 0, + cost=float(row[2] or 0.0), + tokens=row[3] or 0, + ) + ) + else: + result.append(TimeSeriesData(timestamp=hour_str, requests=0, cost=0.0, tokens=0)) + current += timedelta(hours=1) + + return result + + +async def get_daily_statistics(start_time: datetime, end_time: datetime) -> List[TimeSeriesData]: + """按天聚合 LLM 请求、费用和 token。""" + day_expr = func.strftime("%Y-%m-%dT00:00:00", col(ModelUsage.timestamp)) + statement = ( + select( + day_expr.label("day"), + func.count().label("requests"), + func.sum(col(ModelUsage.cost)).label("cost"), + func.sum(col(ModelUsage.total_tokens)).label("tokens"), + ) + .where(col(ModelUsage.timestamp) >= start_time, col(ModelUsage.timestamp) <= end_time) + .group_by(day_expr) + ) + + with get_db_session(auto_commit=False) as session: + rows = session.exec(statement).all() + + data_dict = {row[0]: row for row in rows} + result = [] + current = start_time.replace(hour=0, minute=0, second=0, microsecond=0) + while current <= end_time: + day_str = current.strftime("%Y-%m-%dT00:00:00") + if day_str in data_dict: + row = data_dict[day_str] + result.append( + TimeSeriesData( + timestamp=day_str, + requests=row[1] or 0, + cost=float(row[2] or 0.0), + tokens=row[3] or 0, + ) + ) + else: + result.append(TimeSeriesData(timestamp=day_str, requests=0, cost=0.0, tokens=0)) + current += timedelta(days=1) + + return result + + +async def get_recent_activity(limit: int = 10) -> List[Dict[str, Any]]: + """获取最近的 LLM 调用记录。""" + with get_db_session(auto_commit=False) as session: + statement = select(ModelUsage).order_by(desc(col(ModelUsage.timestamp))).limit(limit) + records = session.exec(statement).all() + + activities = [] + for record in records: + activities.append( + { + "timestamp": record.timestamp.isoformat(), + "model": record.model_assign_name or record.model_name, + "request_type": record.request_type, + "tokens": record.total_tokens or 0, + "cost": record.cost or 0.0, + "time_cost": record.time_cost or 0.0, + "status": None, + } + ) + + return activities + + +def fetch_online_time_since(query_start_time: datetime) -> list[tuple[datetime, datetime]]: + """获取指定时间之后仍有覆盖的在线时间区间。""" + with get_db_session(auto_commit=False) as session: + statement = select(OnlineTime).where(col(OnlineTime.end_timestamp) >= query_start_time) + records = session.exec(statement).all() + return [(record.start_timestamp, record.end_timestamp) for record in records] + + +def fetch_model_usage_since(query_start_time: datetime) -> list[dict[str, object]]: + """获取指定时间之后的 LLM 使用记录。""" + with get_db_session(auto_commit=False) as session: + statement = select(ModelUsage).where(col(ModelUsage.timestamp) >= query_start_time) + records = session.exec(statement).all() + return [ + { + "timestamp": record.timestamp, + "request_type": record.request_type, + "model_api_provider_name": record.model_api_provider_name, + "model_assign_name": record.model_assign_name, + "model_name": record.model_name, + "prompt_tokens": record.prompt_tokens, + "completion_tokens": record.completion_tokens, + "cost": record.cost, + "time_cost": record.time_cost, + } + for record in records + ] + + +def fetch_messages_since(query_start_time: datetime) -> list[Messages]: + """获取指定时间之后的消息记录。""" + with get_db_session(auto_commit=False) as session: + statement = select(Messages).where(col(Messages.timestamp) >= query_start_time) + return list(session.exec(statement).all()) + + +def fetch_tool_records_since(query_start_time: datetime) -> list[ToolRecord]: + """获取指定时间之后的工具调用记录。""" + with get_db_session(auto_commit=False) as session: + statement = select(ToolRecord).where(col(ToolRecord.timestamp) >= query_start_time) + return list(session.exec(statement).all()) + + +def get_earliest_statistics_time(fallback_time: datetime) -> datetime: + """获取统计数据中最早的记录时间。""" + try: + with get_db_session(auto_commit=False) as session: + start_times = [ + session.exec(select(func.min(ModelUsage.timestamp))).first(), + session.exec(select(func.min(Messages.timestamp))).first(), + session.exec(select(func.min(OnlineTime.start_timestamp))).first(), + session.exec(select(func.min(ToolRecord.timestamp))).first(), + ] + except Exception as e: + logger.warning(f"获取全量统计起始时间失败,将使用回退时间: {e}") + return fallback_time + + valid_start_times = [item for item in start_times if isinstance(item, datetime)] + if valid_start_times: + return min(valid_start_times) + return fallback_time diff --git a/src/webui/routers/chat/service.py b/src/webui/routers/chat/service.py index 7a35a8c3..1ccaf9f8 100644 --- a/src/webui/routers/chat/service.py +++ b/src/webui/routers/chat/service.py @@ -360,11 +360,8 @@ class ChatConnectionManager: existing.virtual_config = virtual_config existing.sender = sender logger.debug( - "WebUI 聊天会话复用: session=%s, connection=%s, client_session=%s, channel=%s", - session_id, - connection_id, - client_session_id, - channel_key, + f"WebUI 聊天会话复用: session={session_id}, connection={connection_id}, " + f"client_session={client_session_id}, channel={channel_key}", ) return if existing_session_id is not None: @@ -387,12 +384,8 @@ class ChatConnectionManager: self.user_sessions.setdefault(user_id, set()).add(session_id) self._bind_channel(session_id, channel_key) logger.info( - "WebUI 聊天会话已连接: session=%s, connection=%s, client_session=%s, user=%s, channel=%s", - session_id, - connection_id, - client_session_id, - user_id, - channel_key, + f"WebUI 聊天会话已连接: session={session_id}, connection={connection_id}, " + f"client_session={client_session_id}, user={user_id}, channel={channel_key}", ) def disconnect(self, session_id: str) -> None: @@ -420,7 +413,7 @@ class ChatConnectionManager: if not user_session_ids: del self.user_sessions[session_connection.user_id] - logger.info("WebUI 聊天会话已断开: session=%s", session_id) + logger.info(f"WebUI 聊天会话已断开: session={session_id}") def disconnect_connection(self, connection_id: str) -> None: """断开物理连接下的全部逻辑聊天会话。 @@ -495,7 +488,7 @@ class ChatConnectionManager: try: await session_connection.sender(message) except Exception as exc: - logger.error("发送聊天消息失败: session=%s, error=%s", session_id, exc) + logger.error(f"发送聊天消息失败: session={session_id}, error={exc}") async def broadcast(self, message: Dict[str, Any]) -> None: """向全部逻辑聊天会话广播消息。 @@ -659,10 +652,8 @@ def resolve_initial_virtual_identity( group_name=group_name or "WebUI虚拟群聊", ) logger.info( - "虚拟身份模式已通过参数激活: %s @ %s, group_id=%s", - virtual_config.user_nickname, - virtual_config.platform, - virtual_group_id, + f"虚拟身份模式已通过参数激活: {virtual_config.user_nickname} @ " + f"{virtual_config.platform}, group_id={virtual_group_id}", ) return virtual_config except Exception as exc: diff --git a/src/webui/routers/statistics.py b/src/webui/routers/statistics.py index c101eba3..3c6b8034 100644 --- a/src/webui/routers/statistics.py +++ b/src/webui/routers/statistics.py @@ -1,349 +1,34 @@ -"""统计数据 API 路由""" - from datetime import datetime, timedelta -from typing import Any, Dict, List from fastapi import APIRouter, Depends, HTTPException -from pydantic import BaseModel, Field -from sqlalchemy import desc, func, or_ -from sqlmodel import col, select -from src.common.database.database import get_db_session -from src.common.database.database_model import ModelUsage, OnlineTime from src.common.logger import get_logger -from src.common.message_repository import count_messages +from src.services.statistics_service import get_dashboard_statistics, get_model_statistics, get_summary_statistics from src.webui.dependencies import require_auth +from src.webui.schemas.statistics import DashboardData logger = get_logger("webui.statistics") router = APIRouter(prefix="/statistics", tags=["statistics"], dependencies=[Depends(require_auth)]) -class StatisticsSummary(BaseModel): - """统计数据摘要""" - - total_requests: int = Field(0, description="总请求数") - total_cost: float = Field(0.0, description="总花费") - total_tokens: int = Field(0, description="总token数") - online_time: float = Field(0.0, description="在线时间(秒)") - total_messages: int = Field(0, description="总消息数") - total_replies: int = Field(0, description="总回复数") - avg_response_time: float = Field(0.0, description="平均响应时间") - cost_per_hour: float = Field(0.0, description="每小时花费") - tokens_per_hour: float = Field(0.0, description="每小时token数") - - -class ModelStatistics(BaseModel): - """模型统计""" - - model_name: str - request_count: int - total_cost: float - total_tokens: int - avg_response_time: float - - -class TimeSeriesData(BaseModel): - """时间序列数据""" - - timestamp: str - requests: int = 0 - cost: float = 0.0 - tokens: int = 0 - - -class DashboardData(BaseModel): - """仪表盘数据""" - - summary: StatisticsSummary - model_stats: List[ModelStatistics] - hourly_data: List[TimeSeriesData] - daily_data: List[TimeSeriesData] - recent_activity: List[Dict[str, Any]] - - @router.get("/dashboard", response_model=DashboardData) -async def get_dashboard_data(hours: int = 24): - """ - 获取仪表盘统计数据 - - Args: - hours: 统计时间范围(小时),默认24小时 - - Returns: - 仪表盘数据 - """ +async def get_dashboard_data(hours: int = 24) -> DashboardData: + """获取仪表盘统计数据。""" try: - now = datetime.now() - start_time = now - timedelta(hours=hours) - - # 获取摘要数据 - summary = await _get_summary_statistics(start_time, now) - - # 获取模型统计 - model_stats = await _get_model_statistics(start_time) - - # 获取小时级时间序列数据 - hourly_data = await _get_hourly_statistics(start_time, now) - - # 获取日级时间序列数据(最近7天) - daily_start = now - timedelta(days=7) - daily_data = await _get_daily_statistics(daily_start, now) - - # 获取最近活动 - recent_activity = await _get_recent_activity(limit=10) - - return DashboardData( - summary=summary, - model_stats=model_stats, - hourly_data=hourly_data, - daily_data=daily_data, - recent_activity=recent_activity, - ) + return await get_dashboard_statistics(hours=hours) except Exception as e: logger.error(f"获取仪表盘数据失败: {e}") raise HTTPException(status_code=500, detail=f"获取统计数据失败: {str(e)}") from e -async def _get_summary_statistics(start_time: datetime, end_time: datetime) -> StatisticsSummary: - """获取摘要统计数据(优化:使用数据库聚合)""" - summary = StatisticsSummary( - total_requests=0, - total_cost=0.0, - total_tokens=0, - online_time=0.0, - total_messages=0, - total_replies=0, - avg_response_time=0.0, - cost_per_hour=0.0, - tokens_per_hour=0.0, - ) - - # 使用聚合查询替代全量加载 - with get_db_session() as session: - statement = select( - func.count().label("total_requests"), - func.sum(col(ModelUsage.cost)).label("total_cost"), - func.sum(col(ModelUsage.total_tokens)).label("total_tokens"), - func.avg(col(ModelUsage.time_cost)).label("avg_response_time"), - ).where(col(ModelUsage.timestamp) >= start_time, col(ModelUsage.timestamp) <= end_time) - result = session.exec(statement).first() - - if result: - total_requests, total_cost, total_tokens, avg_response_time = result - summary.total_requests = total_requests or 0 - summary.total_cost = float(total_cost or 0.0) - summary.total_tokens = total_tokens or 0 - summary.avg_response_time = float(avg_response_time or 0.0) - - # 查询在线时间 - 这个数据量通常不大,保留原逻辑 - with get_db_session() as session: - statement = select(OnlineTime).where( - or_( - col(OnlineTime.start_timestamp) >= start_time, - col(OnlineTime.end_timestamp) >= start_time, - ) - ) - online_records = session.exec(statement).all() - - for record in online_records: - start = max(record.start_timestamp, start_time) - end = min(record.end_timestamp, end_time) - if end > start: - summary.online_time += (end - start).total_seconds() - - summary.total_messages = count_messages(start_time=start_time.timestamp(), end_time=end_time.timestamp()) - summary.total_replies = count_messages( - start_time=start_time.timestamp(), - end_time=end_time.timestamp(), - has_reply_to=True, - ) - - # 计算派生指标 - if summary.online_time > 0: - online_hours = summary.online_time / 3600.0 - summary.cost_per_hour = summary.total_cost / online_hours - summary.tokens_per_hour = summary.total_tokens / online_hours - - return summary - - -async def _get_model_statistics(start_time: datetime) -> List[ModelStatistics]: - """获取模型统计数据(优化:使用数据库聚合和分组)""" - # 使用GROUP BY聚合,避免全量加载 - statement = ( - select(ModelUsage) - .where(col(ModelUsage.timestamp) >= start_time) - .order_by(desc(col(ModelUsage.timestamp))) - .limit(200) - ) - - with get_db_session() as session: - rows = session.exec(statement).all() - - aggregates: Dict[str, Dict[str, float | int]] = {} - for record in rows: - model_name = record.model_assign_name or record.model_name or "unknown" - if model_name not in aggregates: - aggregates[model_name] = { - "request_count": 0, - "total_cost": 0.0, - "total_tokens": 0, - "total_time_cost": 0.0, - "time_cost_count": 0, - } - bucket = aggregates[model_name] - bucket["request_count"] = int(bucket["request_count"]) + 1 - bucket["total_cost"] = float(bucket["total_cost"]) + float(record.cost or 0.0) - bucket["total_tokens"] = int(bucket["total_tokens"]) + int(record.total_tokens or 0) - if record.time_cost: - bucket["total_time_cost"] = float(bucket["total_time_cost"]) + float(record.time_cost) - bucket["time_cost_count"] = int(bucket["time_cost_count"]) + 1 - - result: List[ModelStatistics] = [] - for model_name, bucket in sorted( - aggregates.items(), - key=lambda item: float(item[1]["request_count"]), - reverse=True, - )[:10]: - time_cost_count = int(bucket["time_cost_count"]) - avg_time_cost = float(bucket["total_time_cost"]) / time_cost_count if time_cost_count > 0 else 0.0 - result.append( - ModelStatistics( - model_name=model_name, - request_count=int(bucket["request_count"]), - total_cost=float(bucket["total_cost"]), - total_tokens=int(bucket["total_tokens"]), - avg_response_time=avg_time_cost, - ) - ) - - return result - - -async def _get_hourly_statistics(start_time: datetime, end_time: datetime) -> List[TimeSeriesData]: - """获取小时级统计数据(优化:使用数据库聚合)""" - # SQLite的日期时间函数进行小时分组 - # 使用strftime将timestamp格式化为小时级别 - hour_expr = func.strftime("%Y-%m-%dT%H:00:00", col(ModelUsage.timestamp)) - statement = ( - select( - hour_expr.label("hour"), - func.count().label("requests"), - func.sum(col(ModelUsage.cost)).label("cost"), - func.sum(col(ModelUsage.total_tokens)).label("tokens"), - ) - .where(col(ModelUsage.timestamp) >= start_time, col(ModelUsage.timestamp) <= end_time) - .group_by(hour_expr) - ) - - with get_db_session() as session: - rows = session.exec(statement).all() - - # 转换为字典以快速查找 - data_dict = {row[0]: row for row in rows} - - # 填充所有小时(包括没有数据的) - result = [] - current = start_time.replace(minute=0, second=0, microsecond=0) - while current <= end_time: - hour_str = current.strftime("%Y-%m-%dT%H:00:00") - if hour_str in data_dict: - row = data_dict[hour_str] - result.append( - TimeSeriesData( - timestamp=hour_str, - requests=row[1] or 0, - cost=float(row[2] or 0.0), - tokens=row[3] or 0, - ) - ) - else: - result.append(TimeSeriesData(timestamp=hour_str, requests=0, cost=0.0, tokens=0)) - current += timedelta(hours=1) - - return result - - -async def _get_daily_statistics(start_time: datetime, end_time: datetime) -> List[TimeSeriesData]: - """获取日级统计数据(优化:使用数据库聚合)""" - # 使用strftime按日期分组 - day_expr = func.strftime("%Y-%m-%dT00:00:00", col(ModelUsage.timestamp)) - statement = ( - select( - day_expr.label("day"), - func.count().label("requests"), - func.sum(col(ModelUsage.cost)).label("cost"), - func.sum(col(ModelUsage.total_tokens)).label("tokens"), - ) - .where(col(ModelUsage.timestamp) >= start_time, col(ModelUsage.timestamp) <= end_time) - .group_by(day_expr) - ) - - with get_db_session() as session: - rows = session.exec(statement).all() - - # 转换为字典 - data_dict = {row[0]: row for row in rows} - - # 填充所有天 - result = [] - current = start_time.replace(hour=0, minute=0, second=0, microsecond=0) - while current <= end_time: - day_str = current.strftime("%Y-%m-%dT00:00:00") - if day_str in data_dict: - row = data_dict[day_str] - result.append( - TimeSeriesData( - timestamp=day_str, - requests=row[1] or 0, - cost=float(row[2] or 0.0), - tokens=row[3] or 0, - ) - ) - else: - result.append(TimeSeriesData(timestamp=day_str, requests=0, cost=0.0, tokens=0)) - current += timedelta(days=1) - - return result - - -async def _get_recent_activity(limit: int = 10) -> List[Dict[str, Any]]: - """获取最近活动""" - with get_db_session() as session: - statement = select(ModelUsage).order_by(desc(col(ModelUsage.timestamp))).limit(limit) - records = session.exec(statement).all() - - activities = [] - for record in records: - activities.append( - { - "timestamp": record.timestamp.isoformat(), - "model": record.model_assign_name or record.model_name, - "request_type": record.request_type, - "tokens": record.total_tokens or 0, - "cost": record.cost or 0.0, - "time_cost": record.time_cost or 0.0, - "status": None, - } - ) - - return activities - - @router.get("/summary") async def get_summary(hours: int = 24): - """ - 获取统计摘要 - - Args: - hours: 统计时间范围(小时) - """ + """获取统计摘要。""" try: now = datetime.now() start_time = now - timedelta(hours=hours) - summary = await _get_summary_statistics(start_time, now) - return summary + return await get_summary_statistics(start_time, now) except Exception as e: logger.error(f"获取统计摘要失败: {e}") raise HTTPException(status_code=500, detail=str(e)) from e @@ -351,17 +36,10 @@ async def get_summary(hours: int = 24): @router.get("/models") async def get_model_stats(hours: int = 24): - """ - 获取模型统计 - - Args: - hours: 统计时间范围(小时) - """ + """获取模型统计。""" try: - now = datetime.now() - start_time = now - timedelta(hours=hours) - stats = await _get_model_statistics(start_time) - return stats + start_time = datetime.now() - timedelta(hours=hours) + return await get_model_statistics(start_time) except Exception as e: logger.error(f"获取模型统计失败: {e}") raise HTTPException(status_code=500, detail=str(e)) from e diff --git a/src/webui/routers/system.py b/src/webui/routers/system.py index 837939c3..83df4902 100644 --- a/src/webui/routers/system.py +++ b/src/webui/routers/system.py @@ -4,13 +4,17 @@ 提供系统重启、状态查询等功能 """ -import os -import time from datetime import datetime +from importlib.metadata import PackageNotFoundError, version as get_package_version +from typing import Any, Dict, Optional from fastapi import APIRouter, Depends, HTTPException from pydantic import BaseModel +import httpx +import os +import time + from src.common.logger import get_logger from src.config.config import MMC_VERSION from src.webui.dependencies import require_auth @@ -20,6 +24,10 @@ logger = get_logger("webui_system") # 记录启动时间 _start_time = time.time() +_DASHBOARD_PACKAGE_NAME = "maibot-dashboard" +_PYPI_JSON_URL = f"https://pypi.org/pypi/{_DASHBOARD_PACKAGE_NAME}/json" +_PYPI_CACHE_TTL_SECONDS = 60 * 60 * 6 +_pypi_version_cache: Dict[str, Any] = {"checked_at": 0.0, "latest_version": None} class RestartResponse(BaseModel): @@ -38,6 +46,72 @@ class StatusResponse(BaseModel): start_time: str +class DashboardVersionResponse(BaseModel): + """WebUI 版本检查响应""" + + current_version: str + latest_version: Optional[str] = None + has_update: bool = False + package_name: str = _DASHBOARD_PACKAGE_NAME + pypi_url: str = f"https://pypi.org/project/{_DASHBOARD_PACKAGE_NAME}/" + + +def _get_installed_dashboard_version() -> str: + try: + return get_package_version(_DASHBOARD_PACKAGE_NAME) + except PackageNotFoundError: + return "unknown" + + +def _normalize_version(version: str) -> tuple[int, ...]: + clean_version = version.strip().lower().removeprefix("v") + numeric_part = clean_version.split("-", 1)[0].split("+", 1)[0] + parts = [] + for item in numeric_part.split("."): + number = "" + for char in item: + if not char.isdigit(): + break + number += char + parts.append(int(number) if number else 0) + return tuple(parts) + + +def _is_newer_version(latest_version: Optional[str], current_version: str) -> bool: + if not latest_version or not current_version or current_version == "unknown": + return False + + latest_parts = _normalize_version(latest_version) + current_parts = _normalize_version(current_version) + width = max(len(latest_parts), len(current_parts)) + return latest_parts + (0,) * (width - len(latest_parts)) > current_parts + (0,) * (width - len(current_parts)) + + +async def _get_latest_dashboard_version_from_pypi() -> Optional[str]: + now = time.time() + cached_version = _pypi_version_cache.get("latest_version") + checked_at = float(_pypi_version_cache.get("checked_at", 0.0)) + if cached_version and now - checked_at < _PYPI_CACHE_TTL_SECONDS: + return str(cached_version) + + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(_PYPI_JSON_URL) + response.raise_for_status() + payload = response.json() + except Exception as e: + logger.debug(f"检查 WebUI PyPI 版本失败: {e}") + return str(cached_version) if cached_version else None + + latest_version = payload.get("info", {}).get("version") + if isinstance(latest_version, str) and latest_version.strip(): + _pypi_version_cache["checked_at"] = now + _pypi_version_cache["latest_version"] = latest_version.strip() + return latest_version.strip() + + return str(cached_version) if cached_version else None + + @router.post("/restart", response_model=RestartResponse) async def restart_maibot(): """ @@ -89,6 +163,19 @@ async def get_maibot_status(): raise HTTPException(status_code=500, detail=f"获取状态失败: {str(e)}") from e +@router.get("/dashboard-version", response_model=DashboardVersionResponse) +async def get_dashboard_version(current_version: Optional[str] = None): + """获取 WebUI 当前版本和 PyPI 最新版本。""" + resolved_current_version = current_version or _get_installed_dashboard_version() + latest_version = await _get_latest_dashboard_version_from_pypi() + + return DashboardVersionResponse( + current_version=resolved_current_version, + latest_version=latest_version, + has_update=_is_newer_version(latest_version, resolved_current_version), + ) + + # 可选:添加更多系统控制功能 diff --git a/src/webui/routers/websocket/manager.py b/src/webui/routers/websocket/manager.py index 81d6b929..54d8c72d 100644 --- a/src/webui/routers/websocket/manager.py +++ b/src/webui/routers/websocket/manager.py @@ -77,7 +77,7 @@ class UnifiedWebSocketManager: except asyncio.CancelledError: raise except Exception as exc: - logger.error("统一 WebSocket 发送失败: connection=%s, error=%s", connection.connection_id, exc) + logger.error(f"统一 WebSocket 发送失败: connection={connection.connection_id}, error={exc}") async def connect(self, connection_id: str, websocket: WebSocket) -> WebSocketConnection: """注册一个新的物理 WebSocket 连接。 @@ -108,7 +108,7 @@ class UnifiedWebSocketManager: try: await self._close_websocket(connection) except Exception as exc: - logger.debug("关闭统一 WebSocket 底层连接时出现异常: connection=%s, error=%s", connection_id, exc) + logger.debug(f"关闭统一 WebSocket 底层连接时出现异常: connection={connection_id}, error={exc}") await connection.send_queue.put(None) if connection.sender_task is not None: @@ -117,7 +117,7 @@ class UnifiedWebSocketManager: except asyncio.CancelledError: pass except Exception as exc: - logger.debug("等待发送协程退出时出现异常: connection=%s, error=%s", connection_id, exc) + logger.debug(f"等待发送协程退出时出现异常: connection={connection_id}, error={exc}") def get_connection(self, connection_id: str) -> Optional[WebSocketConnection]: """获取指定连接上下文。 diff --git a/src/webui/routers/websocket/unified.py b/src/webui/routers/websocket/unified.py index 98d2ffc8..767d65ad 100644 --- a/src/webui/routers/websocket/unified.py +++ b/src/webui/routers/websocket/unified.py @@ -544,7 +544,7 @@ async def websocket_endpoint(websocket: WebSocket, token: Optional[str] = Query( connection_id = uuid.uuid4().hex await websocket_manager.connect(connection_id=connection_id, websocket=websocket) - logger.info("统一 WebSocket 客户端已连接: connection=%s", connection_id) + logger.info(f"统一 WebSocket 客户端已连接: connection={connection_id}") await websocket_manager.send_event( connection_id, domain="system", @@ -565,17 +565,15 @@ async def websocket_endpoint(websocket: WebSocket, token: Optional[str] = Query( continue await handle_client_message(connection_id, cast(Dict[str, Any], raw_message)) except WebSocketDisconnect: - logger.info("统一 WebSocket 客户端已断开: connection=%s", connection_id) + logger.info(f"统一 WebSocket 客户端已断开: connection={connection_id}") except asyncio.CancelledError: - logger.warning("统一 WebSocket 连接处理被取消: connection=%s", connection_id) + logger.warning(f"统一 WebSocket 连接处理被取消: connection={connection_id}") raise except Exception as exc: - logger.error("统一 WebSocket 处理失败: connection=%s, error=%s", connection_id, exc, exc_info=True) + logger.error(f"统一 WebSocket 处理失败: connection={connection_id}, error={exc}", exc_info=True) finally: chat_manager.disconnect_connection(connection_id) await websocket_manager.disconnect(connection_id) logger.info( - "统一 WebSocket 连接清理完成: connection=%s, 剩余连接=%s", - connection_id, - len(websocket_manager.connections), + f"统一 WebSocket 连接清理完成: connection={connection_id}, 剩余连接={len(websocket_manager.connections)}", ) diff --git a/uv.lock b/uv.lock index dba2d9cf..4853ca9a 100644 --- a/uv.lock +++ b/uv.lock @@ -1511,7 +1511,7 @@ requires-dist = [ { name = "httpx", extras = ["socks"] }, { name = "jieba", specifier = ">=0.42.1" }, { name = "json-repair", specifier = ">=0.47.6" }, - { name = "maibot-dashboard", specifier = ">=1.0.5" }, + { name = "maibot-dashboard", specifier = ">=1.0.6" }, { name = "maibot-plugin-sdk", specifier = ">=2.4.0" }, { name = "maim-message", specifier = ">=0.6.2" }, { name = "matplotlib", specifier = ">=3.10.5" }, @@ -1549,11 +1549,11 @@ dev = [ [[package]] name = "maibot-dashboard" -version = "1.0.5" +version = "1.0.6" source = { registry = "https://pypi.tuna.tsinghua.edu.cn/simple" } -sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/b8/a7/eb1032664ea98b58a861412aca19b31066dc3368f1264a2a53970bd9385c/maibot_dashboard-1.0.5.tar.gz", hash = "sha256:3480723e42120defbaf8ebb952c45bc3e0cd9274a04c5acda0331e55e15ebdc1", size = 2477306, upload-time = "2026-05-05T10:40:34.327Z" } +sdist = { url = "https://pypi.tuna.tsinghua.edu.cn/packages/73/76/a2a47f902f20bbaa699584d7fa9676f591503e0d6954de65aa0a90c07000/maibot_dashboard-1.0.6.tar.gz", hash = "sha256:f383d3505a102554a51bf49d1fc56a8ba8c5db60a3c41b7eab4513a6fd0a1f88", size = 2485522, upload-time = "2026-05-06T10:44:36.42Z" } wheels = [ - { url = "https://pypi.tuna.tsinghua.edu.cn/packages/3a/a0/ad4f7c1d381875ca8d1aeedf5ff6e94692f64cf558479f9e845e47bca830/maibot_dashboard-1.0.5-py3-none-any.whl", hash = "sha256:67bfbb82a1ddd666d20cc958864db38df2e5493f77df0cb049ae83987b1dd65d", size = 2542631, upload-time = "2026-05-05T10:40:32.5Z" }, + { url = "https://pypi.tuna.tsinghua.edu.cn/packages/ae/14/a62631e60c9606a793d6740ef61fc0b8868cf8a79c9f192667026874799b/maibot_dashboard-1.0.6-py3-none-any.whl", hash = "sha256:36299d7039fbb98fd8aa1fb31d2bbc040d1018d9d87ebcf09194e4efb0cf9af7", size = 2552642, upload-time = "2026-05-06T10:44:34.216Z" }, ] [[package]]
管理 AI 模型厂商的 API 配置
配置可用的模型列表
+ 找到 {filteredProviders.length} 个结果 +
- 找到 {filteredProviders.length} 个结果 -
+ {hasUnsavedChanges + ? '当前有未保存的配置更改。确认后会先保存配置,然后重启麦麦使新配置生效。' + : '即将重启麦麦主程序。配置将在重启后生效。' + } +