fix:优化聊天流信息的展示和检索,优化chat_prompt无效的问题,优化部分群定义问题

This commit is contained in:
SengokuCola
2026-05-07 18:06:55 +08:00
parent 93cef02d92
commit b6808d4b73
21 changed files with 1219 additions and 165 deletions

View File

@@ -1,4 +1,4 @@
import { Activity, Boxes, Database, FileSearch, FileText, Hash, Home, MessageSquare, Network, Package, ScrollText, Settings, Sliders, Smile } from 'lucide-react'
import { Activity, Boxes, BrainCircuit, Database, FileSearch, FileText, Hash, Home, MessageSquare, Network, Package, ScrollText, Settings, Sliders, Smile } from 'lucide-react'
import type { MenuSection } from './types'
@@ -39,6 +39,7 @@ export const menuSections: MenuSection[] = [
title: 'sidebar.groups.system',
items: [
{ icon: FileSearch, label: 'sidebar.menu.logViewer', path: '/logs', searchDescription: 'search.items.logsDesc' },
{ icon: BrainCircuit, label: 'sidebar.menu.reasoningProcess', path: '/reasoning-process', searchDescription: 'search.items.reasoningProcessDesc' },
{ icon: Settings, label: 'sidebar.menu.settings', path: '/settings', searchDescription: 'search.items.settingsDesc' },
],
},

View File

@@ -40,6 +40,7 @@
"pluginConfig": "Plugin Management",
"mcpSettings": "MCP Settings",
"logViewer": "Log Viewer",
"reasoningProcess": "Reasoning Process",
"maisakaMonitor": "MaiSaka Chat Monitor",
"localChat": "Local Chat",
"settings": "Settings"
@@ -793,6 +794,7 @@
"pluginsDesc": "Browse and install plugins",
"logs": "Log Viewer",
"logsDesc": "View system logs",
"reasoningProcessDesc": "Browse Maisaka prompt reasoning logs",
"settings": "Settings",
"settingsDesc": "Configure system settings"
}

View File

@@ -40,6 +40,7 @@
"pluginConfig": "プラグイン管理",
"mcpSettings": "MCP 設定",
"logViewer": "ログビューア",
"reasoningProcess": "推論プロセス",
"maisakaMonitor": "MaiSaka チャット監視",
"localChat": "ローカルチャット",
"settings": "設定"
@@ -793,6 +794,7 @@
"pluginsDesc": "プラグインを閉覧してインストール",
"logs": "ログビューア",
"logsDesc": "システムログを表示",
"reasoningProcessDesc": "Maisaka prompt の推論ログを閲覧",
"settings": "設定",
"settingsDesc": "システム設定を構成"
}

View File

@@ -40,6 +40,7 @@
"pluginConfig": "플러그인 관리",
"mcpSettings": "MCP 설정",
"logViewer": "로그 뷰어",
"reasoningProcess": "추론 과정",
"maisakaMonitor": "MaiSaka 채팅 모니터",
"localChat": "로컬 채팅",
"settings": "설정"
@@ -793,6 +794,7 @@
"pluginsDesc": "플러그인 탐색 및 설치",
"logs": "로그 뷰어",
"logsDesc": "시스템 로그 보기",
"reasoningProcessDesc": "Maisaka prompt 추론 로그 보기",
"settings": "설정",
"settingsDesc": "시스템 설정 구성"
}

View File

@@ -40,6 +40,7 @@
"pluginConfig": "插件管理",
"mcpSettings": "MCP 设置",
"logViewer": "日志查看器",
"reasoningProcess": "推理过程",
"maisakaMonitor": "麦麦观察",
"localChat": "本地聊天室",
"settings": "系统设置"
@@ -793,6 +794,7 @@
"pluginsDesc": "浏览和安装插件",
"logs": "日志查看器",
"logsDesc": "查看系统日志",
"reasoningProcessDesc": "浏览 Maisaka prompt 推理记录",
"settings": "系统设置",
"settingsDesc": "配置系统参数"
}

View File

@@ -0,0 +1,67 @@
import { parseResponse, throwIfError } from '@/lib/api-helpers'
import { resolveApiPath } from '@/lib/api-base'
import { fetchWithAuth } from '@/lib/fetch-with-auth'
const API_BASE = '/api/webui/reasoning-process'
export type ReasoningPromptFile = {
stage: string
session_id: string
stem: string
timestamp: number | null
text_path: string | null
html_path: string | null
size: number
modified_at: number
}
export type ReasoningPromptListResponse = {
items: ReasoningPromptFile[]
total: number
page: number
page_size: number
stages: string[]
sessions: string[]
}
export type ReasoningPromptContentResponse = {
path: string
content: string
size: number
modified_at: number
}
export type ReasoningPromptListParams = {
stage?: string
session?: string
search?: string
page?: number
pageSize?: number
}
export async function listReasoningPromptFiles(
params: ReasoningPromptListParams
): Promise<ReasoningPromptListResponse> {
const queryParams = new URLSearchParams()
queryParams.set('stage', params.stage ?? 'all')
queryParams.set('session', params.session ?? 'all')
queryParams.set('search', params.search ?? '')
queryParams.set('page', String(params.page ?? 1))
queryParams.set('page_size', String(params.pageSize ?? 50))
const response = await fetchWithAuth(`${API_BASE}/files?${queryParams}`, { cache: 'no-store' })
return throwIfError(await parseResponse<ReasoningPromptListResponse>(response))
}
export async function getReasoningPromptFile(
path: string
): Promise<ReasoningPromptContentResponse> {
const response = await fetchWithAuth(`${API_BASE}/file?path=${encodeURIComponent(path)}`, {
cache: 'no-store',
})
return throwIfError(await parseResponse<ReasoningPromptContentResponse>(response))
}
export async function getReasoningPromptHtmlUrl(path: string): Promise<string> {
return resolveApiPath(`${API_BASE}/html?path=${encodeURIComponent(path)}`)
}

View File

@@ -162,6 +162,12 @@ const logsRoute = createRoute({
component: lazyRouteComponent(() => import('./routes/logs'), 'LogViewerPage'),
})
const reasoningProcessRoute = createRoute({
getParentRoute: () => protectedRoute,
path: '/reasoning-process',
component: lazyRouteComponent(() => import('./routes/reasoning-process'), 'ReasoningProcessPage'),
})
// MaiSaka 聊天流监控路由
const plannerMonitorRoute = createRoute({
getParentRoute: () => protectedRoute,
@@ -289,6 +295,7 @@ const routeTree = rootRoute.addChildren([
pluginMirrorsRoute,
mcpSettingsRoute,
logsRoute,
reasoningProcessRoute,
plannerMonitorRoute,
chatRoute,
settingsRoute,

View File

@@ -17,9 +17,6 @@ export function PlannerMonitorPage() {
<Activity className="h-6 w-6 sm:h-7 sm:w-7" />
</h1>
<p className="text-muted-foreground mt-1 sm:mt-2 text-sm sm:text-base">
MaiSaka
</p>
</div>
</div>

View File

@@ -4,6 +4,7 @@
* 管理 WebSocket 订阅与事件流的状态。
*/
import { useCallback, useEffect, useState } from 'react'
import { openDB, type DBSchema, type IDBPDatabase } from 'idb'
import type { MaisakaMonitorEvent } from '@/lib/maisaka-monitor-client'
import { maisakaMonitorClient } from '@/lib/maisaka-monitor-client'
@@ -34,9 +35,14 @@ export interface SessionInfo {
eventCount: number
}
/** 最大保留的时间线条目数 */
const MAX_TIMELINE_ENTRIES = 500
/** 前端内存中最多恢复/展示的时间线条目数,避免一次渲染过多节点。 */
const MAX_TIMELINE_ENTRIES = 3000
/** IndexedDB 中最多持久化的时间线条目数。 */
const MAX_PERSISTED_TIMELINE_ENTRIES = 10000
const PERSIST_PRUNE_INTERVAL = 200
const BACKGROUND_COLLECTION_STORAGE_KEY = 'maisaka-monitor-background-collection'
const MONITOR_DB_NAME = 'maisaka-monitor-db'
const MONITOR_DB_VERSION = 1
function resolveSessionDisplayName({
fallbackName,
@@ -81,6 +87,39 @@ let monitorSubscriptionStarted = false
let monitorSubscriptionPromise: Promise<void> | null = null
let monitorUnsubscribe: (() => Promise<void>) | null = null
const storeListeners = new Set<() => void>()
let persistSnapshotTimer: ReturnType<typeof setTimeout> | null = null
let monitorDbPromise: Promise<IDBPDatabase<MaisakaMonitorDb>> | null = null
let persistedEntryCountSincePrune = 0
let pendingPersistEntries: TimelineEntry[] = []
let pendingPersistSessionIds = new Set<string>()
let pendingPersistMeta = false
interface PersistedTimelineEntry extends TimelineEntry {
persistedAt: number
}
interface MonitorMetaRecord {
key: string
value: unknown
}
interface MaisakaMonitorDb extends DBSchema {
timeline: {
key: string
value: PersistedTimelineEntry
indexes: {
'by-timestamp': number
}
}
sessions: {
key: string
value: SessionInfo
}
meta: {
key: string
value: MonitorMetaRecord
}
}
function notifyStoreListeners() {
storeListeners.forEach((listener) => listener())
@@ -98,6 +137,163 @@ function loadBackgroundCollectionPreference() {
return backgroundCollectionEnabled
}
function getMonitorDb() {
if (typeof window === 'undefined' || !window.indexedDB) {
return null
}
monitorDbPromise ??= openDB<MaisakaMonitorDb>(MONITOR_DB_NAME, MONITOR_DB_VERSION, {
upgrade(db) {
const timelineStore = db.createObjectStore('timeline', { keyPath: 'id' })
timelineStore.createIndex('by-timestamp', 'timestamp')
db.createObjectStore('sessions', { keyPath: 'sessionId' })
db.createObjectStore('meta', { keyPath: 'key' })
},
})
return monitorDbPromise
}
function toTimelineEntry(entry: PersistedTimelineEntry): TimelineEntry {
return {
id: entry.id,
type: entry.type,
data: entry.data,
timestamp: entry.timestamp,
sessionId: entry.sessionId,
}
}
async function loadMonitorSnapshot() {
if (typeof window === 'undefined') {
return
}
try {
const dbPromise = getMonitorDb()
if (!dbPromise) {
return
}
const db = await dbPromise
const [timelineRecords, sessionRecords, selectedSessionMeta, entryCounterMeta] = await Promise.all([
db.getAllFromIndex('timeline', 'by-timestamp'),
db.getAll('sessions'),
db.get('meta', 'selectedSession'),
db.get('meta', 'entryCounter'),
])
cachedTimeline = timelineRecords
.slice(-MAX_TIMELINE_ENTRIES)
.map(toTimelineEntry)
cachedSessions = new Map(sessionRecords.map((session) => [session.sessionId, session]))
cachedSelectedSession = typeof selectedSessionMeta?.value === 'string' ? selectedSessionMeta.value : null
entryCounter = typeof entryCounterMeta?.value === 'number' ? entryCounterMeta.value : cachedTimeline.length
notifyStoreListeners()
} catch (error) {
console.warn('读取 MaiSaka 观察 IndexedDB 缓存失败,已忽略:', error)
}
}
async function prunePersistedTimeline(db: IDBPDatabase<MaisakaMonitorDb>) {
const keys = await db.getAllKeysFromIndex('timeline', 'by-timestamp')
const overflowCount = keys.length - MAX_PERSISTED_TIMELINE_ENTRIES
if (overflowCount <= 0) {
return
}
const tx = db.transaction('timeline', 'readwrite')
for (const key of keys.slice(0, overflowCount)) {
await tx.store.delete(key)
}
await tx.done
}
async function flushMonitorSnapshot() {
try {
const dbPromise = getMonitorDb()
if (!dbPromise) {
return
}
const entries = pendingPersistEntries
const sessionIds = Array.from(pendingPersistSessionIds)
const shouldPersistMeta = pendingPersistMeta
pendingPersistEntries = []
pendingPersistSessionIds = new Set()
pendingPersistMeta = false
if (entries.length === 0 && sessionIds.length === 0 && !shouldPersistMeta) {
return
}
const db = await dbPromise
const tx = db.transaction(['timeline', 'sessions', 'meta'], 'readwrite')
const persistedAt = Date.now()
for (const entry of entries) {
await tx.objectStore('timeline').put({ ...entry, persistedAt })
}
for (const sessionId of sessionIds) {
const session = cachedSessions.get(sessionId)
if (session) {
await tx.objectStore('sessions').put(session)
}
}
await tx.objectStore('meta').put({ key: 'selectedSession', value: cachedSelectedSession })
await tx.objectStore('meta').put({ key: 'entryCounter', value: entryCounter })
await tx.done
persistedEntryCountSincePrune += entries.length
if (persistedEntryCountSincePrune >= PERSIST_PRUNE_INTERVAL) {
persistedEntryCountSincePrune = 0
await prunePersistedTimeline(db)
}
} catch (error) {
console.warn('保存 MaiSaka 观察 IndexedDB 缓存失败,已忽略:', error)
}
}
async function clearPersistedMonitorSnapshot() {
try {
const dbPromise = getMonitorDb()
if (!dbPromise) {
return
}
const db = await dbPromise
const tx = db.transaction(['timeline', 'sessions', 'meta'], 'readwrite')
await Promise.all([
tx.objectStore('timeline').clear(),
tx.objectStore('sessions').clear(),
tx.objectStore('meta').clear(),
])
await tx.done
} catch (error) {
console.warn('清空 MaiSaka 观察 IndexedDB 缓存失败,已忽略:', error)
}
}
function schedulePersistMonitorSnapshot(entry?: TimelineEntry, sessionId?: string) {
if (typeof window === 'undefined') {
return
}
if (entry) {
pendingPersistEntries.push(entry)
}
if (sessionId) {
pendingPersistSessionIds.add(sessionId)
}
pendingPersistMeta = true
if (persistSnapshotTimer !== null) {
window.clearTimeout(persistSnapshotTimer)
}
persistSnapshotTimer = window.setTimeout(() => {
persistSnapshotTimer = null
void flushMonitorSnapshot()
}, 300)
}
void loadMonitorSnapshot()
function shouldKeepMonitorActive() {
return activeConsumerCount > 0 || backgroundCollectionEnabled
}
@@ -172,13 +368,14 @@ function handleMonitorEvent(event: MaisakaMonitorEvent) {
return
}
appendTimelineEntry({
const entry: TimelineEntry = {
id: `evt_${++entryCounter}_${Date.now()}`,
type: event.type,
data: event.data,
timestamp,
sessionId,
})
}
appendTimelineEntry(entry)
updateSessionInfo(event, sessionId, timestamp)
@@ -186,6 +383,7 @@ function handleMonitorEvent(event: MaisakaMonitorEvent) {
cachedSelectedSession = sessionId
}
schedulePersistMonitorSnapshot(entry, sessionId)
notifyStoreListeners()
}
@@ -263,13 +461,22 @@ export function useMaisakaMonitor() {
const clearTimeline = useCallback(() => {
cachedTimeline = []
cachedSessions = new Map()
cachedSelectedSession = null
setTimeline([])
setSessions(new Map())
setSelectedSessionState(null)
pendingPersistEntries = []
pendingPersistSessionIds = new Set()
pendingPersistMeta = false
void clearPersistedMonitorSnapshot()
notifyStoreListeners()
}, [])
const setSelectedSession = useCallback((sessionId: string | null) => {
cachedSelectedSession = sessionId
setSelectedSessionState(sessionId)
schedulePersistMonitorSnapshot()
notifyStoreListeners()
}, [])

View File

@@ -0,0 +1,380 @@
import { useEffect, useState } from 'react'
import {
Clock,
Code2,
FileCode2,
FileText,
RefreshCw,
Search,
} from 'lucide-react'
import { Badge } from '@/components/ui/badge'
import { Button } from '@/components/ui/button'
import { Input } from '@/components/ui/input'
import { ScrollArea } from '@/components/ui/scroll-area'
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from '@/components/ui/select'
import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'
import {
getReasoningPromptFile,
getReasoningPromptHtmlUrl,
listReasoningPromptFiles,
type ReasoningPromptFile,
} from '@/lib/reasoning-process-api'
import { cn } from '@/lib/utils'
const PAGE_SIZE = 50
function formatTime(timestamp: number | null, modifiedAt: number): string {
const value = timestamp ? timestamp : modifiedAt * 1000
return new Date(value).toLocaleString('zh-CN', {
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit',
second: '2-digit',
})
}
function formatSize(size: number): string {
if (size < 1024) return `${size} B`
if (size < 1024 * 1024) return `${(size / 1024).toFixed(1)} KB`
return `${(size / 1024 / 1024).toFixed(1)} MB`
}
export function ReasoningProcessPage() {
const [items, setItems] = useState<ReasoningPromptFile[]>([])
const [stages, setStages] = useState<string[]>([])
const [sessions, setSessions] = useState<string[]>([])
const [stage, setStage] = useState('all')
const [session, setSession] = useState('all')
const [search, setSearch] = useState('')
const [page, setPage] = useState(1)
const [refreshKey, setRefreshKey] = useState(0)
const [total, setTotal] = useState(0)
const [selected, setSelected] = useState<ReasoningPromptFile | null>(null)
const [textContent, setTextContent] = useState('')
const [activePreview, setActivePreview] = useState<'text' | 'html'>('text')
const [htmlPreviewUrl, setHtmlPreviewUrl] = useState('')
const [loading, setLoading] = useState(false)
const [contentLoading, setContentLoading] = useState(false)
const [error, setError] = useState<string | null>(null)
const totalPages = Math.max(1, Math.ceil(total / PAGE_SIZE))
useEffect(() => {
let ignore = false
async function loadFiles() {
setLoading(true)
setError(null)
try {
const data = await listReasoningPromptFiles({
stage,
session,
search,
page,
pageSize: PAGE_SIZE,
})
if (ignore) return
setItems(data.items)
setStages(data.stages)
setSessions(data.sessions)
setTotal(data.total)
setSelected((current) => {
if (
current &&
data.items.some(
(item) =>
item.stem === current.stem &&
item.stage === current.stage &&
item.session_id === current.session_id
)
) {
return current
}
return data.items[0] ?? null
})
} catch (err) {
if (!ignore) setError(err instanceof Error ? err.message : '加载推理过程失败')
} finally {
if (!ignore) setLoading(false)
}
}
loadFiles()
return () => {
ignore = true
}
}, [page, refreshKey, search, session, stage])
useEffect(() => {
let ignore = false
async function loadContent() {
if (!selected?.text_path) {
setTextContent('')
return
}
setContentLoading(true)
try {
const data = await getReasoningPromptFile(selected.text_path)
if (!ignore) setTextContent(data.content)
} catch (err) {
if (!ignore) {
setTextContent(err instanceof Error ? err.message : '读取文本失败')
}
} finally {
if (!ignore) setContentLoading(false)
}
}
async function loadHtmlPreviewUrl() {
if (!selected?.html_path) {
setHtmlPreviewUrl('')
return
}
const url = await getReasoningPromptHtmlUrl(selected.html_path)
if (!ignore) setHtmlPreviewUrl(url)
}
if (selected?.html_path && !selected.text_path) {
setActivePreview('html')
} else {
setActivePreview('text')
}
loadContent()
loadHtmlPreviewUrl()
return () => {
ignore = true
}
}, [selected])
function resetToFirstPage(nextAction: () => void) {
nextAction()
setPage(1)
}
return (
<div className="flex h-full min-h-0 flex-col gap-3 overflow-hidden p-3 lg:p-4">
<div className="flex flex-col gap-2 lg:flex-row lg:items-center lg:justify-between">
<div>
<h1 className="text-xl font-semibold tracking-normal text-foreground"></h1>
<p className="text-sm text-muted-foreground"> logs/maisaka_prompt prompt </p>
</div>
<Button
variant="outline"
size="sm"
onClick={() => setRefreshKey((current) => current + 1)}
disabled={loading}
>
<RefreshCw className={cn('h-4 w-4', loading && 'animate-spin')} />
</Button>
</div>
<div className="grid flex-shrink-0 grid-cols-1 gap-2 md:grid-cols-[180px_240px_1fr]">
<Select
value={stage}
onValueChange={(value) =>
resetToFirstPage(() => {
setStage(value)
setSession('all')
})
}
>
<SelectTrigger>
<SelectValue placeholder="阶段" />
</SelectTrigger>
<SelectContent>
<SelectItem value="all"></SelectItem>
{stages.map((item) => (
<SelectItem key={item} value={item}>
{item}
</SelectItem>
))}
</SelectContent>
</Select>
<Select
value={session}
onValueChange={(value) => resetToFirstPage(() => setSession(value))}
>
<SelectTrigger>
<SelectValue placeholder="会话" />
</SelectTrigger>
<SelectContent>
<SelectItem value="all"></SelectItem>
{sessions.map((item) => (
<SelectItem key={item} value={item}>
{item}
</SelectItem>
))}
</SelectContent>
</Select>
<div className="relative">
<Search className="pointer-events-none absolute left-3 top-1/2 h-4 w-4 -translate-y-1/2 text-muted-foreground" />
<Input
value={search}
onChange={(event) => resetToFirstPage(() => setSearch(event.target.value))}
className="pl-9"
placeholder="搜索阶段、会话或文件名"
/>
</div>
</div>
{error && (
<div className="rounded-md border border-destructive/30 bg-destructive/10 px-3 py-2 text-sm text-destructive">
{error}
</div>
)}
<div className="grid min-h-0 flex-1 grid-cols-1 gap-3 lg:grid-cols-[360px_1fr]">
<div className="flex min-h-0 flex-col overflow-hidden rounded-md border bg-background">
<div className="flex h-11 flex-shrink-0 items-center justify-between border-b px-3 text-sm text-muted-foreground">
<span>{total} </span>
<span>
{page} / {totalPages}
</span>
</div>
<ScrollArea className="min-h-0 flex-1">
<div className="space-y-1 p-2">
{items.map((item) => {
const active = selected?.stage === item.stage && selected?.session_id === item.session_id && selected?.stem === item.stem
return (
<button
key={`${item.stage}/${item.session_id}/${item.stem}`}
type="button"
onClick={() => setSelected(item)}
className={cn(
'flex w-full flex-col gap-2 rounded-md border px-3 py-2 text-left text-sm transition-colors',
active
? 'border-primary bg-primary/10 text-foreground'
: 'border-transparent hover:border-border hover:bg-muted/60'
)}
>
<div className="flex items-center justify-between gap-2">
<Badge variant="secondary" className="max-w-[150px] truncate">
{item.stage}
</Badge>
<span className="flex items-center gap-1 text-xs text-muted-foreground">
<Clock className="h-3.5 w-3.5" />
{formatTime(item.timestamp, item.modified_at)}
</span>
</div>
<div className="truncate font-medium">{item.session_id}</div>
<div className="flex items-center justify-between gap-2 text-xs text-muted-foreground">
<span className="truncate">{item.stem}</span>
<span className="shrink-0">{formatSize(item.size)}</span>
</div>
</button>
)
})}
{!loading && items.length === 0 && (
<div className="px-3 py-10 text-center text-sm text-muted-foreground">
</div>
)}
</div>
</ScrollArea>
<div className="flex h-12 flex-shrink-0 items-center justify-between border-t px-3">
<Button
variant="outline"
size="sm"
disabled={page <= 1 || loading}
onClick={() => setPage((current) => Math.max(1, current - 1))}
>
</Button>
<Button
variant="outline"
size="sm"
disabled={page >= totalPages || loading}
onClick={() => setPage((current) => Math.min(totalPages, current + 1))}
>
</Button>
</div>
</div>
<div className="flex min-h-0 flex-col overflow-hidden rounded-md border bg-background">
<div className="flex min-h-14 flex-shrink-0 flex-col gap-1 border-b px-4 py-3 md:flex-row md:items-center md:justify-between">
<div className="min-w-0">
<div className="truncate text-sm font-medium">
{selected ? `${selected.stage}/${selected.session_id}/${selected.stem}` : '未选择记录'}
</div>
<div className="text-xs text-muted-foreground">
{selected ? `${formatSize(selected.size)} · ${formatTime(selected.timestamp, selected.modified_at)}` : '从左侧列表选择一条记录'}
</div>
</div>
{selected && (
<div className="flex items-center gap-2 text-xs text-muted-foreground">
{selected.text_path && (
<span className="inline-flex items-center gap-1">
<FileText className="h-3.5 w-3.5" />
txt
</span>
)}
{selected.html_path && (
<span className="inline-flex items-center gap-1">
<FileCode2 className="h-3.5 w-3.5" />
html
</span>
)}
</div>
)}
</div>
<Tabs
value={activePreview}
onValueChange={(value) => setActivePreview(value as 'text' | 'html')}
className="flex min-h-0 flex-1 flex-col"
>
<div className="flex flex-shrink-0 border-b px-3 py-2">
<TabsList>
<TabsTrigger value="text" disabled={!selected?.text_path}>
<FileText className="mr-1 h-4 w-4" />
</TabsTrigger>
<TabsTrigger value="html" disabled={!selected?.html_path}>
<Code2 className="mr-1 h-4 w-4" />
HTML
</TabsTrigger>
</TabsList>
</div>
<TabsContent value="text" className="m-0 min-h-0 flex-1 overflow-hidden">
<ScrollArea className="h-full">
<pre className="min-h-full whitespace-pre-wrap break-words p-4 font-mono text-xs leading-5 text-foreground">
{contentLoading ? '正在读取...' : textContent || '没有文本内容'}
</pre>
</ScrollArea>
</TabsContent>
<TabsContent value="html" className="m-0 min-h-0 flex-1 overflow-hidden">
{selected?.html_path && htmlPreviewUrl ? (
<iframe
title="推理过程 HTML 预览"
src={htmlPreviewUrl}
sandbox=""
className="h-full w-full border-0 bg-white"
/>
) : (
<div className="flex h-full items-center justify-center text-sm text-muted-foreground">
HTML
</div>
)}
</TabsContent>
</Tabs>
</div>
</div>
</div>
)
}

View File

@@ -0,0 +1,89 @@
from types import SimpleNamespace
from src.chat.message_receive.chat_manager import chat_manager
from src.common.utils.utils_config import ChatConfigUtils, ExpressionConfigUtils
from src.common.utils.utils_session import SessionUtils
from src.config.config import global_config
def test_get_chat_prompt_for_chat_merges_multiple_matching_prompts(monkeypatch):
session_id = SessionUtils.calculate_session_id("qq", group_id="1036092828")
monkeypatch.setattr(
global_config.chat,
"chat_prompts",
[
{"platform": "qq", "item_id": "1036092828", "rule_type": "group", "prompt": "你也是群管理员,可以适当进行管理"},
{"platform": "qq", "item_id": "1036092828", "rule_type": "group", "prompt": "这个群是技术实验群,请你专心讨论技术"},
{"platform": "qq", "item_id": "other", "rule_type": "group", "prompt": "不应该生效"},
],
)
monkeypatch.setattr(chat_manager, "get_session_by_session_id", lambda _session_id: None)
result = ChatConfigUtils.get_chat_prompt_for_chat(session_id, True)
assert result == "你也是群管理员,可以适当进行管理\n这个群是技术实验群,请你专心讨论技术"
def test_get_chat_prompt_for_chat_matches_routed_session_by_chat_stream(monkeypatch):
session_id = SessionUtils.calculate_session_id("qq", group_id="1036092828", account_id="bot-a")
monkeypatch.setattr(
global_config.chat,
"chat_prompts",
[
{"platform": "qq", "item_id": "1036092828", "rule_type": "group", "prompt": "路由会话也应该生效"},
],
)
monkeypatch.setattr(
chat_manager,
"get_session_by_session_id",
lambda _session_id: SimpleNamespace(platform="qq", group_id="1036092828", user_id=None),
)
result = ChatConfigUtils.get_chat_prompt_for_chat(session_id, True)
assert result == "路由会话也应该生效"
def test_expression_learning_list_matches_routed_session_by_chat_stream(monkeypatch):
session_id = SessionUtils.calculate_session_id("qq", group_id="1036092828", account_id="bot-a")
monkeypatch.setattr(
global_config.expression,
"learning_list",
[
{
"platform": "qq",
"item_id": "1036092828",
"rule_type": "group",
"use_expression": False,
"enable_learning": False,
"enable_jargon_learning": True,
}
],
)
monkeypatch.setattr(
chat_manager,
"get_session_by_session_id",
lambda _session_id: SimpleNamespace(platform="qq", group_id="1036092828", user_id=None),
)
assert ExpressionConfigUtils.get_expression_config_for_chat(session_id) == (False, False, True)
def test_talk_value_rules_match_routed_session_by_chat_stream(monkeypatch):
session_id = SessionUtils.calculate_session_id("qq", group_id="1036092828", account_id="bot-a")
monkeypatch.setattr(global_config.chat, "talk_value", 0.1)
monkeypatch.setattr(global_config.chat, "enable_talk_value_rules", True)
monkeypatch.setattr(
global_config.chat,
"talk_value_rules",
[
{"platform": "qq", "item_id": "1036092828", "rule_type": "group", "time": "00:00-23:59", "value": 0.7}
],
)
monkeypatch.setattr(
chat_manager,
"get_session_by_session_id",
lambda _session_id: SimpleNamespace(platform="qq", group_id="1036092828", user_id=None),
)
assert ChatConfigUtils.get_talk_value(session_id, True) == 0.7

View File

@@ -39,6 +39,50 @@ def test_resolve_expression_group_scope_returns_related_sessions(monkeypatch: py
assert has_global_share is False
def test_resolve_expression_group_scope_matches_routed_sessions(monkeypatch: pytest.MonkeyPatch) -> None:
current_session_id = SessionUtils.calculate_session_id("qq", group_id="10001", account_id="bot-a")
related_session_id = SessionUtils.calculate_session_id("qq", group_id="10002", account_id="bot-a")
monkeypatch.setattr(
selector_module,
"global_config",
SimpleNamespace(
expression=SimpleNamespace(
expression_groups=[
SimpleNamespace(
expression_groups=[
_build_target("qq", "10001"),
_build_target("qq", "10002"),
]
)
]
)
),
)
monkeypatch.setattr(
selector_module.ChatConfigUtils,
"_get_chat_stream",
lambda session_id: SimpleNamespace(platform="qq", group_id="10001", user_id=None)
if session_id == current_session_id
else None,
)
target_session_ids = {
"10001": current_session_id,
"10002": related_session_id,
}
monkeypatch.setattr(
selector_module.ChatConfigUtils,
"get_target_session_ids",
lambda target_item: {target_session_ids[target_item.item_id]},
)
selector = MaisakaExpressionSelector()
related_session_ids, has_global_share = selector._resolve_expression_group_scope(current_session_id)
assert related_session_ids == {current_session_id, related_session_id}
assert has_global_share is False
def test_resolve_expression_group_scope_uses_star_as_global_share(monkeypatch: pytest.MonkeyPatch) -> None:
current_session_id = SessionUtils.calculate_session_id("qq", group_id="10001")

View File

@@ -11,8 +11,7 @@ from src.chat.message_receive.message import SessionMessage
from src.common.database.database import get_db_session
from src.common.database.database_model import Expression
from src.common.logger import get_logger
from src.common.utils.utils_config import ExpressionConfigUtils
from src.common.utils.utils_session import SessionUtils
from src.common.utils.utils_config import ChatConfigUtils, ExpressionConfigUtils
from src.config.config import global_config
from src.learners.learner_utils_old import weighted_sample
from src.maisaka.context_messages import LLMContextMessage
@@ -65,14 +64,9 @@ class MaisakaExpressionSelector:
if not platform or not item_id:
continue
rule_type = target_item.rule_type
target_session_id = SessionUtils.calculate_session_id(
platform,
group_id=item_id if rule_type == "group" else None,
user_id=None if rule_type == "group" else item_id,
)
group_session_ids.add(target_session_id)
if target_session_id == session_id:
target_session_ids = ChatConfigUtils.get_target_session_ids(target_item)
group_session_ids.update(target_session_ids)
if ChatConfigUtils.target_matches_session(target_item, session_id):
contains_current_session = True
if contains_global_share_marker:

View File

@@ -30,7 +30,7 @@ from src.common.data_models.message_component_data_model import (
VoiceComponent,
)
from src.common.logger import get_logger
from src.common.utils.utils_session import SessionUtils
from src.common.utils.utils_config import ChatConfigUtils
from src.config.config import global_config
from src.config.model_configs import ModelInfo
from src.core.types import ActionInfo
@@ -211,46 +211,7 @@ class BaseMaisakaReplyGenerator:
@staticmethod
def _get_chat_prompt_for_chat(chat_id: str, is_group_chat: Optional[bool]) -> str:
"""根据聊天流 ID 获取匹配的额外 prompt。"""
if not global_config.chat.chat_prompts:
return ""
for chat_prompt_item in global_config.chat.chat_prompts:
if hasattr(chat_prompt_item, "platform"):
platform = str(chat_prompt_item.platform or "").strip()
item_id = str(chat_prompt_item.item_id or "").strip()
rule_type = str(chat_prompt_item.rule_type or "").strip()
prompt_content = str(chat_prompt_item.prompt or "").strip()
elif isinstance(chat_prompt_item, str):
parts = chat_prompt_item.split(":", 3)
if len(parts) != 4:
continue
platform, item_id, rule_type, prompt_content = parts
platform = platform.strip()
item_id = item_id.strip()
rule_type = rule_type.strip()
prompt_content = prompt_content.strip()
else:
continue
if not platform or not item_id or not prompt_content:
continue
if rule_type == "group":
config_is_group = True
config_chat_id = SessionUtils.calculate_session_id(platform, group_id=item_id)
elif rule_type == "private":
config_is_group = False
config_chat_id = SessionUtils.calculate_session_id(platform, user_id=item_id)
else:
continue
if config_is_group != is_group_chat:
continue
if config_chat_id == chat_id:
return prompt_content
return ""
return ChatConfigUtils.get_chat_prompt_for_chat(chat_id, is_group_chat)
def _build_group_chat_attention_block(self, session_id: str) -> str:
"""构建当前聊天场景下的额外注意事项块。"""

View File

@@ -1,7 +1,7 @@
from typing import Optional
from src.common.logger import get_logger
from src.config.config import global_config
from src.common.utils.utils_config import ExpressionConfigUtils
logger = get_logger("common_utils")
@@ -11,29 +11,7 @@ class TempMethodsExpression:
@staticmethod
def _find_expression_config_item(chat_stream_id: Optional[str] = None):
if not global_config.expression.learning_list:
return None
if chat_stream_id:
for config_item in global_config.expression.learning_list:
if not config_item.platform and not config_item.item_id:
continue
stream_id = TempMethodsExpression._get_stream_id(
config_item.platform,
str(config_item.item_id),
(config_item.rule_type == "group"),
)
if stream_id is None:
continue
if stream_id != chat_stream_id:
continue
return config_item
for config_item in global_config.expression.learning_list:
if not config_item.platform and not config_item.item_id:
return config_item
return None
return ExpressionConfigUtils._find_expression_config_item(chat_stream_id)
@staticmethod
def get_expression_config_for_chat(chat_stream_id: Optional[str] = None) -> tuple[bool, bool, bool]:
@@ -46,15 +24,7 @@ class TempMethodsExpression:
Returns:
tuple: (是否使用表达, 是否学习表达, 是否启用 jargon 学习)
"""
config_item = TempMethodsExpression._find_expression_config_item(chat_stream_id)
if config_item is None:
return True, True, True
return (
config_item.use_expression,
config_item.enable_learning,
config_item.enable_jargon_learning,
)
return ExpressionConfigUtils.get_expression_config_for_chat(chat_stream_id)
@staticmethod
def _get_stream_id(

View File

@@ -150,9 +150,11 @@ class BaseImageDataModel(BaseDatabaseDataModel[Images]):
file_ext = self.file_name.split(".")[-1].lower()
if file_ext != self.image_format:
logger.warning(
f"[初始化] {self.file_name} 文件扩展名与实际格式不符: ext`{file_ext}`!=`{self.image_format}`"
)
log_message = f"[初始化] {self.file_name} 文件扩展名与实际格式不符: ext`{file_ext}`!=`{self.image_format}`"
if file_ext == "tmp":
logger.debug(log_message)
else:
logger.warning(log_message)
self._rename_file_to_match_format()
return True

View File

@@ -1,4 +1,4 @@
from typing import Optional
from typing import Iterator, Optional
import time
@@ -18,16 +18,8 @@ class ExpressionConfigUtils:
for config_item in global_config.expression.learning_list:
if not config_item.platform and not config_item.item_id:
continue
stream_id = ExpressionConfigUtils._get_stream_id(
config_item.platform,
str(config_item.item_id),
(config_item.rule_type == "group"),
)
if stream_id is None:
continue
if stream_id != session_id:
continue
return config_item
if ChatConfigUtils.target_matches_session(config_item, session_id):
return config_item
for config_item in global_config.expression.learning_list:
if not config_item.platform and not config_item.item_id:
@@ -84,6 +76,180 @@ class ExpressionConfigUtils:
class ChatConfigUtils:
@staticmethod
def _iter_matching_chat_prompts(session_id: str, is_group_chat: Optional[bool]) -> Iterator[str]:
try:
from src.chat.message_receive.chat_manager import chat_manager
from src.common.utils.utils_session import SessionUtils
chat_stream = chat_manager.get_session_by_session_id(session_id)
session_utils = SessionUtils
except Exception as e:
logger.debug(f"解析额外 Prompt 聊天流失败: session_id={session_id} error={e}")
chat_stream = None
session_utils = None
for chat_prompt_item in global_config.chat.chat_prompts:
if hasattr(chat_prompt_item, "platform"):
platform = str(chat_prompt_item.platform or "").strip()
item_id = str(chat_prompt_item.item_id or "").strip()
rule_type = str(chat_prompt_item.rule_type or "").strip()
prompt_content = str(chat_prompt_item.prompt or "").strip()
elif isinstance(chat_prompt_item, str):
parts = chat_prompt_item.split(":", 3)
if len(parts) != 4:
continue
platform, item_id, rule_type, prompt_content = parts
platform = platform.strip()
item_id = item_id.strip()
rule_type = rule_type.strip()
prompt_content = prompt_content.strip()
else:
continue
if not platform or not item_id or not prompt_content:
continue
if rule_type == "group":
config_is_group = True
target_attr = "group_id"
elif rule_type == "private":
config_is_group = False
target_attr = "user_id"
else:
continue
if is_group_chat is not None and config_is_group != is_group_chat:
continue
if chat_stream is not None:
chat_stream_platform = str(chat_stream.platform or "").strip()
chat_stream_target_id = str(getattr(chat_stream, target_attr) or "").strip()
if chat_stream_platform == platform and chat_stream_target_id == item_id:
yield prompt_content
continue
if session_utils is None:
continue
try:
if rule_type == "group":
config_chat_id = session_utils.calculate_session_id(platform, group_id=item_id)
else:
config_chat_id = session_utils.calculate_session_id(platform, user_id=item_id)
except Exception as e:
logger.debug(f"生成额外 Prompt 聊天流 ID 失败: platform={platform} item_id={item_id} error={e}")
continue
if config_chat_id == session_id:
yield prompt_content
@staticmethod
def get_chat_prompt_for_chat(session_id: str, is_group_chat: Optional[bool]) -> str:
"""根据聊天流 ID 获取匹配的额外 Prompt允许同一聊天流配置多条。"""
if not session_id or not global_config.chat.chat_prompts:
return ""
prompt_contents = list(ChatConfigUtils._iter_matching_chat_prompts(session_id, is_group_chat))
if not prompt_contents:
return ""
logger.debug(f"匹配到 {len(prompt_contents)} 条聊天额外 Prompt: session_id={session_id}")
return "\n".join(prompt_contents)
@staticmethod
def _target_values(target_item) -> tuple[str, str, str]:
platform = str(target_item.platform or "").strip()
item_id = str(target_item.item_id or "").strip()
rule_type = str(target_item.rule_type or "").strip()
return platform, item_id, rule_type
@staticmethod
def _get_chat_stream(session_id: str):
try:
from src.chat.message_receive.chat_manager import chat_manager
return chat_manager.get_session_by_session_id(session_id)
except Exception as e:
logger.debug(f"获取聊天流失败: session_id={session_id} error={e}")
return None
@staticmethod
def _get_stream_id(platform: str, id_str: str, is_group: bool = False) -> Optional[str]:
try:
from src.common.utils.utils_session import SessionUtils
if is_group:
return SessionUtils.calculate_session_id(platform, group_id=str(id_str))
return SessionUtils.calculate_session_id(platform, user_id=str(id_str))
except Exception as e:
logger.error(f"生成聊天流 ID 失败: {e}")
return None
@staticmethod
def target_matches_session(target_item, session_id: str, is_group_chat: Optional[bool] = None) -> bool:
"""判断 platform/item_id/rule_type 配置目标是否命中当前聊天流。"""
if not session_id:
return False
platform, item_id, rule_type = ChatConfigUtils._target_values(target_item)
if not platform or not item_id:
return False
if rule_type == "group":
config_is_group = True
target_attr = "group_id"
elif rule_type == "private":
config_is_group = False
target_attr = "user_id"
else:
return False
if is_group_chat is not None and config_is_group != is_group_chat:
return False
chat_stream = ChatConfigUtils._get_chat_stream(session_id)
if chat_stream is not None:
chat_stream_platform = str(chat_stream.platform or "").strip()
chat_stream_target_id = str(getattr(chat_stream, target_attr) or "").strip()
return chat_stream_platform == platform and chat_stream_target_id == item_id
return ChatConfigUtils._get_stream_id(platform, item_id, config_is_group) == session_id
@staticmethod
def get_target_session_ids(target_item) -> set[str]:
"""获取配置目标对应的已知聊天流 ID并保留无路由 ID 作为兼容回退。"""
platform, item_id, rule_type = ChatConfigUtils._target_values(target_item)
if not platform or not item_id:
return set()
if rule_type == "group":
is_group = True
target_attr = "group_id"
elif rule_type == "private":
is_group = False
target_attr = "user_id"
else:
return set()
session_ids: set[str] = set()
if fallback_session_id := ChatConfigUtils._get_stream_id(platform, item_id, is_group):
session_ids.add(fallback_session_id)
try:
from src.chat.message_receive.chat_manager import chat_manager
for session_id, chat_stream in chat_manager.sessions.items():
chat_stream_platform = str(chat_stream.platform or "").strip()
chat_stream_target_id = str(getattr(chat_stream, target_attr) or "").strip()
if chat_stream_platform == platform and chat_stream_target_id == item_id:
session_ids.add(session_id)
except Exception as e:
logger.debug(f"解析配置目标已知聊天流失败: platform={platform} item_id={item_id} error={e}")
return session_ids
@staticmethod
def _resolve_is_group_chat(session_id: Optional[str]) -> Optional[bool]:
if not session_id:
@@ -117,16 +283,10 @@ class ChatConfigUtils:
# 优先匹配会话相关的规则
if session_id:
from src.common.utils.utils_session import SessionUtils
for rule in global_config.chat.talk_value_rules:
if not rule.platform and not rule.item_id:
continue # 一起留空表示全局
if rule.rule_type == "group":
rule_session_id = SessionUtils.calculate_session_id(rule.platform, group_id=str(rule.item_id))
else:
rule_session_id = SessionUtils.calculate_session_id(rule.platform, user_id=str(rule.item_id))
if rule_session_id != session_id:
if not ChatConfigUtils.target_matches_session(rule, session_id, is_group_chat):
continue # 不匹配的会话 ID跳过
parsed_range = ChatConfigUtils.parse_range(rule.time)
if not parsed_range:

View File

@@ -45,6 +45,7 @@ class MainSystem:
self.app: MessageServer = get_global_api()
self.server: Server = get_global_server()
self.webui_task: asyncio.Task[None] | None = None
self.webui_server: WebUIServer | None = None # 独立的 WebUI 服务器
def _setup_webui_server(self) -> None:
@@ -69,16 +70,23 @@ class MainSystem:
enable_stage_status_board()
logger.info(t("startup.waking_up", nickname=global_config.bot.nickname))
await self._auto_update_webui_dashboard()
# 设置独立的 WebUI 服务器
self._setup_webui_server()
# 其他初始化任务
await asyncio.gather(self._init_components())
self.webui_task = asyncio.create_task(self._run_webui_startup_sequence(), name="webui_startup")
try:
await self._init_components()
except Exception:
self.webui_task.cancel()
await asyncio.gather(self.webui_task, return_exceptions=True)
raise
logger.info(t("startup.initialization_completed_banner", nickname=global_config.bot.nickname))
async def _run_webui_startup_sequence(self) -> None:
"""按顺序检查 WebUI 更新并启动 WebUI同时允许主初始化并行执行。"""
await self._auto_update_webui_dashboard()
self._setup_webui_server()
if self.webui_server:
await self.webui_server.start()
async def _auto_update_webui_dashboard(self) -> None:
"""启动时自动检查并更新 WebUI dashboard。"""
if not global_config.webui.enabled:
@@ -166,7 +174,9 @@ class MainSystem:
]
# 如果 WebUI 服务器已初始化,添加到任务列表
if self.webui_server:
if self.webui_task:
tasks.append(self.webui_task)
elif self.webui_server:
tasks.append(self.webui_server.start())
await asyncio.gather(*tasks)

View File

@@ -10,7 +10,7 @@ from rich.console import RenderableType
from src.common.data_models.llm_service_data_models import LLMGenerationOptions
from src.common.logger import get_logger
from src.common.prompt_i18n import load_prompt
from src.common.utils.utils_session import SessionUtils
from src.common.utils.utils_config import ChatConfigUtils
from src.config.config import global_config
from src.core.tooling import ToolAvailabilityContext, ToolRegistry
from src.llm_models.model_client.base_client import BaseClient
@@ -384,49 +384,7 @@ class MaisakaChatLoopService:
@staticmethod
def _get_chat_prompt_for_chat(chat_id: str, is_group_chat: Optional[bool]) -> str:
"""根据聊天流 ID 获取匹配的额外提示。"""
if not global_config.chat.chat_prompts:
return ""
for chat_prompt_item in global_config.chat.chat_prompts:
if hasattr(chat_prompt_item, "platform"):
platform = str(chat_prompt_item.platform or "").strip()
item_id = str(chat_prompt_item.item_id or "").strip()
rule_type = str(chat_prompt_item.rule_type or "").strip()
prompt_content = str(chat_prompt_item.prompt or "").strip()
elif isinstance(chat_prompt_item, str):
parts = chat_prompt_item.split(":", 3)
if len(parts) != 4:
continue
platform, item_id, rule_type, prompt_content = parts
platform = platform.strip()
item_id = item_id.strip()
rule_type = rule_type.strip()
prompt_content = prompt_content.strip()
else:
continue
if not platform or not item_id or not prompt_content:
continue
if rule_type == "group":
config_is_group = True
config_chat_id = SessionUtils.calculate_session_id(platform, group_id=item_id)
elif rule_type == "private":
config_is_group = False
config_chat_id = SessionUtils.calculate_session_id(platform, user_id=item_id)
else:
continue
if is_group_chat is not None and config_is_group != is_group_chat:
continue
if config_chat_id == chat_id:
logger.debug(f"匹配到 Maisaka 聊天额外提示chat_id: {chat_id}, prompt: {prompt_content[:50]}...")
return prompt_content
return ""
return ChatConfigUtils.get_chat_prompt_for_chat(chat_id, is_group_chat)
def set_extra_tools(self, tools: Sequence[ToolDefinitionInput]) -> None:
"""设置额外工具定义。

View File

@@ -0,0 +1,197 @@
"""推理过程日志浏览接口。"""
from pathlib import Path
from fastapi import APIRouter, Depends, HTTPException, Query
from fastapi.responses import FileResponse
from pydantic import BaseModel, Field
from src.webui.dependencies import require_auth
router = APIRouter(prefix="/reasoning-process", tags=["reasoning-process"], dependencies=[Depends(require_auth)])
PROJECT_ROOT = Path(__file__).resolve().parents[3]
PROMPT_LOG_ROOT = (PROJECT_ROOT / "logs" / "maisaka_prompt").resolve()
ALLOWED_SUFFIXES = {".txt", ".html"}
class ReasoningPromptFile(BaseModel):
"""推理过程日志条目。"""
stage: str
session_id: str
stem: str
timestamp: int | None = None
text_path: str | None = None
html_path: str | None = None
size: int = 0
modified_at: float = 0
class ReasoningPromptListResponse(BaseModel):
"""推理过程日志列表响应。"""
items: list[ReasoningPromptFile]
total: int
page: int
page_size: int
stages: list[str] = Field(default_factory=list)
sessions: list[str] = Field(default_factory=list)
class ReasoningPromptContentResponse(BaseModel):
"""推理过程文本内容响应。"""
path: str
content: str
size: int
modified_at: float
def _to_safe_relative_path(relative_path: str) -> Path:
safe_path = Path(relative_path)
if safe_path.is_absolute() or ".." in safe_path.parts:
raise HTTPException(status_code=400, detail="路径不合法")
return safe_path
def _resolve_prompt_log_path(relative_path: str, allowed_suffixes: set[str]) -> Path:
safe_path = _to_safe_relative_path(relative_path)
resolved_path = (PROMPT_LOG_ROOT / safe_path).resolve()
try:
resolved_path.relative_to(PROMPT_LOG_ROOT)
except ValueError as exc:
raise HTTPException(status_code=400, detail="路径不合法") from exc
if resolved_path.suffix.lower() not in allowed_suffixes:
raise HTTPException(status_code=400, detail="不支持的文件类型")
if not resolved_path.is_file():
raise HTTPException(status_code=404, detail="文件不存在")
return resolved_path
def _relative_posix_path(path: Path) -> str:
return path.relative_to(PROMPT_LOG_ROOT).as_posix()
def _collect_prompt_files() -> tuple[list[ReasoningPromptFile], list[str], list[str]]:
if not PROMPT_LOG_ROOT.is_dir():
return [], [], []
records: dict[tuple[str, str, str], dict[str, object]] = {}
stages: set[str] = set()
sessions: set[str] = set()
for file_path in PROMPT_LOG_ROOT.rglob("*"):
if not file_path.is_file() or file_path.suffix.lower() not in ALLOWED_SUFFIXES:
continue
try:
relative_path = file_path.relative_to(PROMPT_LOG_ROOT)
except ValueError:
continue
parts = relative_path.parts
if len(parts) < 3:
continue
stage, session_id = parts[0], parts[1]
stem = file_path.stem
key = (stage, session_id, stem)
stat = file_path.stat()
stages.add(stage)
sessions.add(session_id)
record = records.setdefault(
key,
{
"stage": stage,
"session_id": session_id,
"stem": stem,
"timestamp": int(stem) if stem.isdigit() else None,
"text_path": None,
"html_path": None,
"size": 0,
"modified_at": 0.0,
},
)
record["size"] = int(record["size"]) + stat.st_size
record["modified_at"] = max(float(record["modified_at"]), stat.st_mtime)
if file_path.suffix.lower() == ".txt":
record["text_path"] = _relative_posix_path(file_path)
elif file_path.suffix.lower() == ".html":
record["html_path"] = _relative_posix_path(file_path)
items = [ReasoningPromptFile(**record) for record in records.values()]
items.sort(key=lambda item: (item.modified_at, item.timestamp or 0), reverse=True)
return items, sorted(stages), sorted(sessions)
@router.get("/files", response_model=ReasoningPromptListResponse)
async def list_reasoning_prompt_files(
stage: str = Query("all"),
session: str = Query("all"),
search: str = Query(""),
page: int = Query(1, ge=1),
page_size: int = Query(50, ge=10, le=200),
):
"""列出 logs/maisaka_prompt 下的推理过程日志。"""
items, stages, sessions = _collect_prompt_files()
normalized_search = search.strip().lower()
if stage != "all":
items = [item for item in items if item.stage == stage]
if session != "all":
items = [item for item in items if item.session_id == session]
if normalized_search:
items = [
item
for item in items
if normalized_search in item.stage.lower()
or normalized_search in item.session_id.lower()
or normalized_search in item.stem.lower()
]
total = len(items)
start = (page - 1) * page_size
end = start + page_size
return ReasoningPromptListResponse(
items=items[start:end],
total=total,
page=page,
page_size=page_size,
stages=stages,
sessions=sessions,
)
@router.get("/file", response_model=ReasoningPromptContentResponse)
async def get_reasoning_prompt_file(path: str = Query(...)):
"""读取推理过程 txt 日志内容。"""
file_path = _resolve_prompt_log_path(path, {".txt"})
stat = file_path.stat()
return ReasoningPromptContentResponse(
path=_relative_posix_path(file_path),
content=file_path.read_text(encoding="utf-8", errors="replace"),
size=stat.st_size,
modified_at=stat.st_mtime,
)
@router.get("/html")
async def get_reasoning_prompt_html(path: str = Query(...)):
"""预览推理过程 html 日志内容。"""
file_path = _resolve_prompt_log_path(path, {".html"})
return FileResponse(
file_path,
media_type="text/html; charset=utf-8",
headers={"X-Robots-Tag": "noindex, nofollow"},
)

View File

@@ -20,6 +20,7 @@ from src.webui.routers.memory import router as memory_router
from src.webui.routers.model import router as model_router
from src.webui.routers.person import router as person_router
from src.webui.routers.plugin import router as plugin_router
from src.webui.routers.reasoning_process import router as reasoning_process_router
from src.webui.routers.statistics import router as statistics_router
from src.webui.routers.system import router as system_router
from src.webui.routers.websocket.auth import router as ws_auth_router
@@ -46,6 +47,7 @@ router.include_router(emoji_router)
router.include_router(plugin_router)
# 注册系统控制路由
router.include_router(system_router)
router.include_router(reasoning_process_router)
# 注册模型列表获取路由
router.include_router(model_router)
# 注册长期记忆管理路由