Merge pull request #1653 from Mai-with-u/dev

Dev
This commit is contained in:
SengokuCola
2026-05-07 21:53:12 +08:00
committed by GitHub
35 changed files with 1825 additions and 473 deletions

View File

@@ -33,6 +33,29 @@ export interface SessionStartEvent {
timestamp: number
}
export interface StageStatusEvent {
session_id: string
session_name?: string
stage: string
detail: string
round_text: string
agent_state: string
stage_started_at: number
updated_at: number
timestamp: number
}
export interface StageRemovedEvent {
session_id: string
session_name?: string
timestamp: number
}
export interface StageSnapshotEvent {
entries: StageStatusEvent[]
timestamp: number
}
export interface MessageIngestedEvent {
session_id: string
speaker_name: string
@@ -41,6 +64,15 @@ export interface MessageIngestedEvent {
timestamp: number
}
export interface MessageSentEvent {
session_id: string
speaker_name: string
content: string
message_id: string
source_kind?: string
timestamp: number
}
export interface CycleStartEvent {
session_id: string
cycle_id: number
@@ -143,9 +175,12 @@ export interface PlannerFinalizedEvent {
request: MaisakaRequestBlock | null
planner: MaisakaPlannerBlock | null
tools: MaisakaFinalizedToolResult[]
interrupted?: boolean
final_state: {
time_records: Record<string, number>
agent_state: string
end_reason?: string
end_detail?: string
}
}
@@ -154,6 +189,8 @@ export interface CycleEndEvent {
cycle_id: number
time_records: Record<string, number>
agent_state: string
end_reason?: string
end_detail?: string
timestamp: number
}
@@ -181,7 +218,11 @@ export interface ReplierResponseEvent {
export type MaisakaMonitorEvent =
| { type: 'session.start'; data: SessionStartEvent }
| { type: 'stage.status'; data: StageStatusEvent }
| { type: 'stage.removed'; data: StageRemovedEvent }
| { type: 'stage.snapshot'; data: StageSnapshotEvent }
| { type: 'message.ingested'; data: MessageIngestedEvent }
| { type: 'message.sent'; data: MessageSentEvent }
| { type: 'cycle.start'; data: CycleStartEvent }
| { type: 'timing_gate.result'; data: TimingGateResultEvent }
| { type: 'planner.request'; data: PlannerRequestEvent }

View File

@@ -22,6 +22,7 @@ export type ReasoningPromptListResponse = {
page_size: number
stages: string[]
sessions: string[]
selected_session: string
}
export type ReasoningPromptContentResponse = {
@@ -43,8 +44,8 @@ export async function listReasoningPromptFiles(
params: ReasoningPromptListParams
): Promise<ReasoningPromptListResponse> {
const queryParams = new URLSearchParams()
queryParams.set('stage', params.stage ?? 'all')
queryParams.set('session', params.session ?? 'all')
queryParams.set('stage', params.stage ?? 'planner')
queryParams.set('session', params.session ?? 'auto')
queryParams.set('search', params.search ?? '')
queryParams.set('page', String(params.page ?? 1))
queryParams.set('page_size', String(params.pageSize ?? 50))

View File

@@ -51,6 +51,50 @@ export interface DashboardVersionStatus {
pypi_url: string
}
export interface CacheDirectoryStats {
key: string
label: string
path: string
exists: boolean
file_count: number
total_size: number
db_records: number
}
export interface DatabaseFileStats {
path: string
exists: boolean
size: number
}
export interface DatabaseTableStats {
name: string
rows: number
}
export interface DatabaseStorageStats {
files: DatabaseFileStats[]
tables: DatabaseTableStats[]
total_size: number
}
export interface LocalCacheStats {
directories: CacheDirectoryStats[]
database: DatabaseStorageStats
}
export interface LocalCacheCleanupResult {
success: boolean
message: string
target: 'images' | 'emoji' | 'log_files' | 'database_logs'
removed_files: number
removed_bytes: number
removed_records: number
}
export type LocalCacheCleanupTarget = LocalCacheCleanupResult['target']
export type LogCleanupTable = 'llm_usage' | 'tool_records' | 'mai_messages'
/**
* 检查 WebUI 是否有 PyPI 新版本
*/
@@ -70,3 +114,38 @@ export async function getDashboardVersionStatus(
return await response.json()
}
export async function getLocalCacheStats(): Promise<LocalCacheStats> {
const response = await fetchWithAuth('/api/webui/system/local-cache', {
method: 'GET',
headers: getAuthHeaders(),
})
if (!response.ok) {
const error = await response.json()
throw new Error(error.detail || '获取本地缓存统计失败')
}
return await response.json()
}
export async function cleanupLocalCache(
target: LocalCacheCleanupTarget,
tables: LogCleanupTable[] = []
): Promise<LocalCacheCleanupResult> {
const response = await fetchWithAuth('/api/webui/system/local-cache/cleanup', {
method: 'POST',
headers: {
...getAuthHeaders(),
'Content-Type': 'application/json',
},
body: JSON.stringify({ target, tables }),
})
if (!response.ok) {
const error = await response.json()
throw new Error(error.detail || '清理本地缓存失败')
}
return await response.json()
}

View File

@@ -6,6 +6,7 @@
*/
import {
Activity,
AlertCircle,
ArrowRight,
Bot,
Brain,
@@ -38,13 +39,14 @@ import type {
CycleStartEvent,
MaisakaToolCall,
MessageIngestedEvent,
MessageSentEvent,
PlannerFinalizedEvent,
PlannerResponseEvent,
ReplierResponseEvent,
TimingGateResultEvent,
ToolExecutionEvent,
} from '@/lib/maisaka-monitor-client'
import type { SessionInfo, TimelineEntry } from './use-maisaka-monitor'
import type { SessionInfo, StageStatusInfo, TimelineEntry } from './use-maisaka-monitor'
import { useMaisakaMonitor } from './use-maisaka-monitor'
// ─── 工具函数 ──────────────────────────────────────────────────
@@ -78,11 +80,13 @@ function formatRelativeTime(ts: number): string {
function SessionSidebar({
sessions,
stageStatuses,
selectedSession,
onSelect,
collapsed,
}: {
sessions: Map<string, SessionInfo>
stageStatuses: Map<string, StageStatusInfo>
selectedSession: string | null
onSelect: (id: string) => void
collapsed: boolean
@@ -110,31 +114,36 @@ function SessionSidebar({
return (
<div className={cn('flex flex-col gap-1', collapsed ? 'items-center p-2' : 'p-2')}>
{sortedSessions.map((session) => (
{sortedSessions.map((session) => {
const status = stageStatuses.get(session.sessionId)
return (
<button
key={session.sessionId}
onClick={() => onSelect(session.sessionId)}
title={session.sessionName}
className={cn(
'rounded-lg text-left text-sm transition-colors',
'max-w-full overflow-hidden rounded-lg text-left text-sm transition-colors',
'hover:bg-accent/50',
collapsed
? 'flex h-10 w-10 items-center justify-center p-0'
: 'flex w-full flex-col items-start gap-0.5 px-2.5 py-2',
: 'flex w-full min-w-0 flex-col items-start gap-0.5 px-2.5 py-2',
selectedSession === session.sessionId && 'bg-accent text-accent-foreground',
)}
>
<div className={cn('flex w-full items-center', collapsed ? 'justify-center' : 'justify-between gap-2')}>
<div className={cn('flex min-w-0 items-center gap-2', !collapsed && 'flex-1')}>
<span className="flex h-7 w-7 shrink-0 items-center justify-center rounded-md bg-primary/10 text-xs font-semibold text-primary">
<div className={cn('flex w-full min-w-0 items-center', collapsed ? 'justify-center' : 'justify-between gap-2')}>
<div className={cn('flex min-w-0 items-center gap-2 overflow-hidden', !collapsed && 'flex-1')}>
<span className="relative flex h-7 w-7 shrink-0 items-center justify-center rounded-md bg-primary/10 text-xs font-semibold text-primary">
{getSessionInitial(session)}
{status && (
<span className="absolute -right-0.5 -top-0.5 h-2.5 w-2.5 rounded-full bg-emerald-500 ring-2 ring-background" />
)}
</span>
{false && session.isGroupChat !== undefined && (
<Badge variant="outline" className="h-4 shrink-0 px-1 text-[10px]">
{session.isGroupChat ? '群' : '私'}
</Badge>
)}
{!collapsed && <span className="min-w-0 flex-1 truncate font-medium" title={session.sessionName}>
{!collapsed && <span className="block min-w-0 flex-1 overflow-hidden text-ellipsis whitespace-nowrap font-medium" title={session.sessionName}>
{session.sessionName}
</span>}
</div>
@@ -142,17 +151,58 @@ function SessionSidebar({
{session.eventCount}
</Badge>}
</div>
{!collapsed && <span className="text-xs text-muted-foreground">
{formatRelativeTime(session.lastActivity)}
</span>}
{!collapsed && (
<div className="flex w-full min-w-0 items-center justify-between gap-2 overflow-hidden text-xs text-muted-foreground">
<span className="shrink-0">{formatRelativeTime(session.lastActivity)}</span>
{status && <span className="min-w-0 truncate text-primary">{status.stage}</span>}
</div>
)}
</button>
))}
)
})}
</div>
)
}
// ─── 单条时间线事件渲染 ──────────────────────────────────────
function StageStatusPanel({ status }: { status?: StageStatusInfo }) {
if (!status) {
return (
<div className="mb-3 rounded-md border bg-muted/30 px-3 py-2 text-sm text-muted-foreground">
</div>
)
}
return (
<div className="mb-3 rounded-md border bg-background px-3 py-2">
<div className="flex flex-wrap items-center gap-2">
<Badge variant="default" className="gap-1">
<Activity className="h-3 w-3" />
{status.stage || '未知阶段'}
</Badge>
{status.roundText && (
<Badge variant="secondary" className="text-[10px]">
{status.roundText}
</Badge>
)}
{status.agentState && (
<Badge variant={status.agentState === 'running' ? 'default' : 'outline'} className="text-[10px]">
{status.agentState}
</Badge>
)}
<span className="ml-auto text-xs text-muted-foreground">
{formatRelativeTime(status.updatedAt)}
</span>
</div>
{status.detail && (
<p className="mt-1 text-sm text-muted-foreground">{status.detail}</p>
)}
</div>
)
}
function MessageIngestedCard({ data }: { data: MessageIngestedEvent }) {
return (
<div className="flex items-start gap-3">
@@ -172,6 +222,26 @@ function MessageIngestedCard({ data }: { data: MessageIngestedEvent }) {
)
}
function MessageSentCard({ data }: { data: MessageSentEvent }) {
return (
<div className="flex items-start gap-3 rounded-md border border-emerald-500/30 bg-emerald-500/5 px-3 py-2">
<div className="mt-1 flex h-7 w-7 shrink-0 items-center justify-center rounded-full bg-emerald-500/15 text-emerald-500">
<Bot className="h-3.5 w-3.5" />
</div>
<div className="flex-1 min-w-0">
<div className="mb-1 flex items-center gap-2">
<span className="font-medium text-sm">{data.speaker_name || '麦麦'}</span>
<Badge variant="outline" className="text-[10px]"></Badge>
<span className="text-xs text-muted-foreground">{formatTimestamp(data.timestamp)}</span>
</div>
<p className="text-sm text-foreground/80 whitespace-pre-wrap wrap-break-word leading-relaxed">
{data.content || '[非文本消息]'}
</p>
</div>
</div>
)
}
function CycleStartCard({ data }: { data: CycleStartEvent }) {
return (
<div className="flex items-center gap-3">
@@ -201,7 +271,7 @@ function TimingGateCard({ data }: { data: TimingGateResultEvent }) {
const Icon = config.icon
return (
<div className="flex items-start gap-3">
<div className="flex items-start gap-3 rounded-md border bg-background px-3 py-2 shadow-sm">
<div className="mt-1 flex h-7 w-7 shrink-0 items-center justify-center rounded-full bg-amber-500/15 text-amber-500">
<Timer className="h-3.5 w-3.5" />
</div>
@@ -246,6 +316,38 @@ function openPromptHtml(uri: string) {
window.open(normalized, '_blank', 'noopener,noreferrer')
}
function isPlannerInterrupted(data: PlannerFinalizedEvent) {
const content = data.planner?.content?.trim() ?? ''
return data.interrupted === true || (
content.startsWith('Planner ') &&
data.planner?.prompt_tokens === 0 &&
data.planner?.completion_tokens === 0 &&
data.planner?.tool_calls.length === 0
)
}
function PlannerInterruptedCard({ data }: { data: PlannerFinalizedEvent }) {
const planner = data.planner
return (
<div className="rounded-md border border-amber-500/35 bg-amber-500/5 px-3 py-2">
<div className="flex items-center gap-2 text-sm">
<AlertCircle className="h-4 w-4 shrink-0 text-amber-500" />
<span className="font-medium">Planner </span>
<Badge variant="outline" className="ml-auto text-[10px]">
#{data.cycle_id}
</Badge>
{planner && planner.duration_ms > 0 && (
<span className="text-xs text-muted-foreground">{formatMs(planner.duration_ms)}</span>
)}
</div>
<p className="mt-1 text-sm text-muted-foreground">
{planner?.content || '收到新消息,已停止当前思考并准备重新决策。'}
</p>
</div>
)
}
function PlannerResponseCard({ data }: { data: PlannerResponseEvent }) {
return (
<div className="flex items-start gap-3">
@@ -330,11 +432,26 @@ function PlannerToolCallsBlock({ data }: { data: PlannerFinalizedEvent }) {
duration_ms: 0,
summary: '',
}))
const isFinishTool = (toolName?: string) => toolName?.trim().toLowerCase() === 'finish'
const finishTools = displayTools.filter((tool) => isFinishTool(tool.tool_name))
const regularTools = displayTools.filter((tool) => !isFinishTool(tool.tool_name))
if (displayTools.length <= 0) {
return null
}
if (regularTools.length <= 0 && finishTools.length > 0) {
return (
<div className="rounded-md border border-emerald-500/30 bg-emerald-500/5 px-3 py-2">
<div className="flex items-center gap-2 text-sm">
<CheckCircle2 className="h-4 w-4 shrink-0 text-emerald-500" />
<span className="font-medium"></span>
<span className="text-muted-foreground"></span>
</div>
</div>
)
}
return (
<Card className="border-l-4 border-l-teal-500/60">
<CardHeader className="py-3 px-4 space-y-2">
@@ -342,11 +459,18 @@ function PlannerToolCallsBlock({ data }: { data: PlannerFinalizedEvent }) {
<Wrench className="h-4 w-4 text-teal-500" />
<CardTitle className="text-sm font-medium">Planner </CardTitle>
<Badge variant="secondary" className="ml-auto text-[10px]">
{displayTools.length}
{regularTools.length}
</Badge>
</div>
{finishTools.length > 0 && (
<div className="flex items-center gap-2 rounded-md border border-emerald-500/30 bg-emerald-500/5 px-2.5 py-1.5 text-xs">
<CheckCircle2 className="h-3.5 w-3.5 shrink-0 text-emerald-500" />
<span className="font-medium"></span>
<span className="text-muted-foreground"></span>
</div>
)}
<div className="space-y-2">
{displayTools.map((tool, idx) => (
{regularTools.map((tool, idx) => (
<div
key={`${tool.tool_call_id || tool.tool_name}-${idx}`}
className="rounded-md border bg-muted/40 px-2.5 py-2 text-xs"
@@ -410,30 +534,62 @@ function ToolExecutionCard({ data }: { data: ToolExecutionEvent }) {
)
}
function getCycleEndReasonText(data: CycleEndEvent) {
const reason = data.end_reason ?? ''
const detail = data.end_detail?.trim()
if (detail) {
return detail
}
if (reason === 'finish') return 'Planner 调用 finish结束本轮思考并等待新消息。'
if (reason === 'timing_no_reply') return 'Timing Gate 选择 no_reply本轮不会进入 Planner。'
if (reason === 'max_rounds') return '已达到内部思考轮次上限,本轮处理结束。'
if (reason === 'planner_interrupted') return 'Planner 被新消息打断,当前轮结束。'
if (reason.startsWith('tool_pause:')) return `工具 ${reason.slice('tool_pause:'.length)} 要求暂停当前思考循环。`
if (reason === 'tool_pause') return '工具要求暂停当前思考循环。'
if (reason === 'empty_planner_response') return 'Planner 没有返回文本或工具调用,本轮思考结束。'
if (reason === 'tool_continue') return 'Planner 工具执行完成,继续下一轮内部思考。'
return '本轮思考完成。'
}
function getCycleEndReasonLabel(data: CycleEndEvent) {
const reason = data.end_reason ?? ''
if (reason === 'finish') return 'finish 结束'
if (reason === 'timing_no_reply') return 'no_reply 结束'
if (reason === 'max_rounds') return '轮次上限'
if (reason === 'planner_interrupted') return 'Planner 打断'
if (reason.startsWith('tool_pause:')) return '工具暂停'
if (reason === 'tool_pause') return '工具暂停'
if (reason === 'empty_planner_response') return '空响应'
if (reason === 'tool_continue') return '继续下一轮'
return '循环结束'
}
function CycleEndCard({ data }: { data: CycleEndEvent }) {
const totalTime = Object.values(data.time_records).reduce((a, b) => a + b, 0)
return (
<div className="flex items-center gap-3">
<div className="mt-0.5 flex h-7 w-7 shrink-0 items-center justify-center rounded-full bg-slate-500/15 text-slate-500">
<CircleDot className="h-3.5 w-3.5" />
</div>
<div className="flex items-center gap-2 flex-wrap">
<span className="text-sm text-muted-foreground"></span>
<Badge variant="outline" className="text-[10px]">
{formatMs(totalTime * 1000)}
</Badge>
{Object.entries(data.time_records).map(([name, duration]) => (
<span key={name} className="text-[10px] text-muted-foreground">
{name}: {formatMs(duration * 1000)}
</span>
))}
<Badge
variant={data.agent_state === 'running' ? 'default' : 'secondary'}
className="text-[10px]"
>
{data.agent_state}
</Badge>
<div className="my-1 space-y-1.5">
<div className="flex items-center gap-3">
<Separator className="flex-1" />
<div className="flex items-center gap-2 rounded-full border bg-background px-3 py-1">
<CircleDot className="h-3.5 w-3.5 text-slate-500" />
<span className="text-xs text-muted-foreground">{getCycleEndReasonLabel(data)}</span>
<Badge variant="outline" className="text-[10px]">
#{data.cycle_id}
</Badge>
<span className="text-[10px] text-muted-foreground">{formatMs(totalTime * 1000)}</span>
<Badge
variant={data.agent_state === 'running' ? 'default' : 'secondary'}
className="text-[10px]"
>
{data.agent_state}
</Badge>
</div>
<Separator className="flex-1" />
</div>
<p className="text-center text-xs text-muted-foreground">{getCycleEndReasonText(data)}</p>
</div>
)
}
@@ -551,6 +707,8 @@ function TimelineEventRenderer({
switch (entry.type) {
case 'message.ingested':
return <MessageIngestedCard data={entry.data as MessageIngestedEvent} />
case 'message.sent':
return <MessageSentCard data={entry.data as MessageSentEvent} />
case 'cycle.start':
if (!showCycleMarkers) return null
return <CycleStartCard data={entry.data as CycleStartEvent} />
@@ -559,6 +717,12 @@ function TimelineEventRenderer({
case 'planner.response':
return <PlannerResponseCard data={entry.data as PlannerResponseEvent} />
case 'planner.finalized':
if (isPlannerInterrupted(entry.data as PlannerFinalizedEvent)) {
return <PlannerInterruptedCard data={entry.data as PlannerFinalizedEvent} />
}
if ((entry.data as PlannerFinalizedEvent).timing_gate?.result?.action === 'no_reply') {
return null
}
return (
<div className="space-y-2">
<PlannerFinalizedCard data={entry.data as PlannerFinalizedEvent} />
@@ -583,6 +747,7 @@ export function MaisakaMonitor() {
const {
timeline,
sessions,
stageStatuses,
selectedSession,
setSelectedSession,
connected,
@@ -629,7 +794,7 @@ export function MaisakaMonitor() {
// 统计当前会话的各事件类型计数
const stats = {
messages: timeline.filter((e) => e.type === 'message.ingested').length,
messages: timeline.filter((e) => e.type === 'message.ingested' || e.type === 'message.sent').length,
cycles: timeline.filter((e) => e.type === 'cycle.start').length,
toolCalls: timeline.reduce((count, entry) => {
if (entry.type === 'tool.execution') {
@@ -641,6 +806,7 @@ export function MaisakaMonitor() {
return count
}, 0),
}
const selectedStageStatus = selectedSession ? stageStatuses.get(selectedSession) : undefined
return (
<div className="flex h-[calc(100vh-180px)] gap-4">
@@ -674,6 +840,7 @@ export function MaisakaMonitor() {
<ScrollArea className="flex-1">
<SessionSidebar
sessions={sessions}
stageStatuses={stageStatuses}
selectedSession={selectedSession}
onSelect={setSelectedSession}
collapsed={sidebarCollapsed}
@@ -742,6 +909,8 @@ export function MaisakaMonitor() {
</div>
{/* 时间线 */}
<StageStatusPanel status={selectedStageStatus} />
<Card className="flex-1 overflow-hidden">
<ScrollArea
className="h-full"
@@ -759,19 +928,32 @@ export function MaisakaMonitor() {
</div>
) : (
(() => {
const continuedTimingGateCycles = new Set<string>()
const noReplyTimingGateCycles = new Set<string>()
return timeline.map((entry) => {
if (entry.type === 'timing_gate.result') {
const data = entry.data as TimingGateResultEvent
if (data.action === 'continue') {
continuedTimingGateCycles.add(buildCycleKey(data.session_id, data.cycle_id))
if (data.action === 'no_reply') {
noReplyTimingGateCycles.add(buildCycleKey(data.session_id, data.cycle_id))
}
}
if (entry.type === 'planner.response' || entry.type === 'planner.finalized') {
const data = entry.data as PlannerResponseEvent | PlannerFinalizedEvent
if (!continuedTimingGateCycles.has(buildCycleKey(data.session_id, data.cycle_id))) {
const cycleKey = buildCycleKey(data.session_id, data.cycle_id)
if (entry.type === 'planner.finalized' && isPlannerInterrupted(data as PlannerFinalizedEvent)) {
const rendered = <TimelineEventRenderer entry={entry} showCycleMarkers={showCycleMarkers} />
if (!rendered) return null
return (
<div
key={entry.id}
className="animate-in fade-in-0 slide-in-from-bottom-2 duration-300"
>
{rendered}
</div>
)
}
if (noReplyTimingGateCycles.has(cycleKey)) {
return null
}
}
@@ -784,9 +966,6 @@ export function MaisakaMonitor() {
className="animate-in fade-in-0 slide-in-from-bottom-2 duration-300"
>
{rendered}
{entry.type === 'cycle.end' && (
<Separator className="mt-3" />
)}
</div>
)
})
@@ -799,3 +978,4 @@ export function MaisakaMonitor() {
</div>
)
}

View File

@@ -35,6 +35,17 @@ export interface SessionInfo {
eventCount: number
}
export interface StageStatusInfo {
sessionId: string
sessionName?: string
stage: string
detail: string
roundText: string
agentState: string
stageStartedAt: number
updatedAt: number
}
/** 前端内存中最多恢复/展示的时间线条目数,避免一次渲染过多节点。 */
const MAX_TIMELINE_ENTRIES = 3000
/** IndexedDB 中最多持久化的时间线条目数。 */
@@ -78,6 +89,7 @@ function resolveSessionDisplayName({
let entryCounter = 0
let cachedTimeline: TimelineEntry[] = []
let cachedSessions: Map<string, SessionInfo> = new Map()
let cachedStageStatuses: Map<string, StageStatusInfo> = new Map()
let cachedSelectedSession: string | null = null
let cachedConnected = false
let backgroundCollectionEnabled = false
@@ -121,6 +133,23 @@ interface MaisakaMonitorDb extends DBSchema {
}
}
function toStageStatusInfo(raw: Record<string, unknown>): StageStatusInfo | null {
const sessionId = typeof raw.session_id === 'string' ? raw.session_id : ''
if (!sessionId) {
return null
}
return {
sessionId,
sessionName: typeof raw.session_name === 'string' ? raw.session_name : undefined,
stage: typeof raw.stage === 'string' ? raw.stage : '',
detail: typeof raw.detail === 'string' ? raw.detail : '',
roundText: typeof raw.round_text === 'string' ? raw.round_text : '',
agentState: typeof raw.agent_state === 'string' ? raw.agent_state : '',
stageStartedAt: typeof raw.stage_started_at === 'number' ? raw.stage_started_at : Date.now() / 1000,
updatedAt: typeof raw.updated_at === 'number' ? raw.updated_at : Date.now() / 1000,
}
}
function notifyStoreListeners() {
storeListeners.forEach((listener) => listener())
}
@@ -359,15 +388,80 @@ function updateSessionInfo(event: MaisakaMonitorEvent, sessionId: string, timest
cachedSessions = next
}
function updateStageStatus(event: MaisakaMonitorEvent) {
const applyStatusIfFresh = (next: Map<string, StageStatusInfo>, status: StageStatusInfo) => {
const existing = next.get(status.sessionId)
if (existing && status.updatedAt < existing.updatedAt) {
return
}
next.set(status.sessionId, status)
}
if (event.type === 'stage.snapshot') {
const rawEntries = (event.data as unknown as Record<string, unknown>).entries
if (!Array.isArray(rawEntries)) {
return
}
const next = new Map(cachedStageStatuses)
for (const rawEntry of rawEntries) {
if (!rawEntry || typeof rawEntry !== 'object') {
continue
}
const status = toStageStatusInfo(rawEntry as Record<string, unknown>)
if (status) {
applyStatusIfFresh(next, status)
}
}
cachedStageStatuses = next
return
}
if (event.type === 'stage.status') {
const status = toStageStatusInfo(event.data as unknown as Record<string, unknown>)
if (!status) {
return
}
const next = new Map(cachedStageStatuses)
applyStatusIfFresh(next, status)
cachedStageStatuses = next
return
}
if (event.type === 'stage.removed') {
const dataRecord = event.data as unknown as Record<string, unknown>
const sessionId = typeof dataRecord.session_id === 'string' ? dataRecord.session_id : ''
if (!sessionId) {
return
}
const next = new Map(cachedStageStatuses)
next.delete(sessionId)
cachedStageStatuses = next
}
}
function handleMonitorEvent(event: MaisakaMonitorEvent) {
const dataRecord = event.data as unknown as Record<string, unknown>
const sessionId = dataRecord.session_id as string
const timestamp = dataRecord.timestamp as number
if (event.type === 'stage.snapshot') {
updateStageStatus(event)
notifyStoreListeners()
return
}
if (!sessionId || typeof timestamp !== 'number') {
return
}
if (event.type === 'stage.status' || event.type === 'stage.removed') {
updateStageStatus(event)
updateSessionInfo(event, sessionId, timestamp)
schedulePersistMonitorSnapshot(undefined, sessionId)
notifyStoreListeners()
return
}
const entry: TimelineEntry = {
id: `evt_${++entryCounter}_${Date.now()}`,
type: event.type,
@@ -435,6 +529,7 @@ function stopMonitorSubscriptionIfIdle() {
export function useMaisakaMonitor() {
const [timeline, setTimeline] = useState<TimelineEntry[]>(cachedTimeline)
const [sessions, setSessions] = useState<Map<string, SessionInfo>>(new Map(cachedSessions))
const [stageStatuses, setStageStatuses] = useState<Map<string, StageStatusInfo>>(new Map(cachedStageStatuses))
const [selectedSession, setSelectedSessionState] = useState<string | null>(cachedSelectedSession)
const [connected, setConnected] = useState(cachedConnected)
const [backgroundCollection, setBackgroundCollection] = useState(loadBackgroundCollectionPreference)
@@ -445,6 +540,7 @@ export function useMaisakaMonitor() {
const syncFromStore = () => {
setTimeline(cachedTimeline)
setSessions(new Map(cachedSessions))
setStageStatuses(new Map(cachedStageStatuses))
setSelectedSessionState(cachedSelectedSession)
setConnected(cachedConnected)
setBackgroundCollection(backgroundCollectionEnabled)
@@ -462,9 +558,11 @@ export function useMaisakaMonitor() {
const clearTimeline = useCallback(() => {
cachedTimeline = []
cachedSessions = new Map()
cachedStageStatuses = new Map()
cachedSelectedSession = null
setTimeline([])
setSessions(new Map())
setStageStatuses(new Map())
setSelectedSessionState(null)
pendingPersistEntries = []
pendingPersistSessionIds = new Set()
@@ -504,6 +602,7 @@ export function useMaisakaMonitor() {
timeline: filteredTimeline,
allTimeline: timeline,
sessions,
stageStatuses,
selectedSession,
setSelectedSession,
connected,

View File

@@ -29,6 +29,7 @@ import {
import { cn } from '@/lib/utils'
const PAGE_SIZE = 50
const AUTO_SESSION = 'auto'
function formatTime(timestamp: number | null, modifiedAt: number): string {
const value = timestamp ? timestamp : modifiedAt * 1000
@@ -51,8 +52,8 @@ export function ReasoningProcessPage() {
const [items, setItems] = useState<ReasoningPromptFile[]>([])
const [stages, setStages] = useState<string[]>([])
const [sessions, setSessions] = useState<string[]>([])
const [stage, setStage] = useState('all')
const [session, setSession] = useState('all')
const [stage, setStage] = useState('planner')
const [session, setSession] = useState(AUTO_SESSION)
const [search, setSearch] = useState('')
const [page, setPage] = useState(1)
const [refreshKey, setRefreshKey] = useState(0)
@@ -85,6 +86,9 @@ export function ReasoningProcessPage() {
setItems(data.items)
setStages(data.stages)
setSessions(data.sessions)
if (data.selected_session && data.selected_session !== session) {
setSession(data.selected_session)
}
setTotal(data.total)
setSelected((current) => {
if (
@@ -185,7 +189,8 @@ export function ReasoningProcessPage() {
onValueChange={(value) =>
resetToFirstPage(() => {
setStage(value)
setSession('all')
setSession(AUTO_SESSION)
setSelected(null)
})
}
>
@@ -193,7 +198,11 @@ export function ReasoningProcessPage() {
<SelectValue placeholder="阶段" />
</SelectTrigger>
<SelectContent>
<SelectItem value="all"></SelectItem>
{!stages.includes(stage) && (
<SelectItem value={stage}>
{stage}
</SelectItem>
)}
{stages.map((item) => (
<SelectItem key={item} value={item}>
{item}
@@ -205,12 +214,15 @@ export function ReasoningProcessPage() {
<Select
value={session}
onValueChange={(value) => resetToFirstPage(() => setSession(value))}
disabled={sessions.length === 0 && loading}
>
<SelectTrigger>
<SelectValue placeholder="会话" />
</SelectTrigger>
<SelectContent>
<SelectItem value="all"></SelectItem>
{session === AUTO_SESSION && (
<SelectItem value={AUTO_SESSION}></SelectItem>
)}
{sessions.map((item) => (
<SelectItem key={item} value={item}>
{item}

View File

@@ -0,0 +1,313 @@
import { Database, HardDrive, Image, RefreshCw, Sparkles, Trash2 } from 'lucide-react'
import { useCallback, useEffect, useMemo, useState } from 'react'
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
AlertDialogTrigger,
} from '@/components/ui/alert-dialog'
import { Button } from '@/components/ui/button'
import { Checkbox } from '@/components/ui/checkbox'
import { useToast } from '@/hooks/use-toast'
import {
cleanupLocalCache,
getLocalCacheStats,
type CacheDirectoryStats,
type LocalCacheCleanupTarget,
type LocalCacheStats,
type LogCleanupTable,
} from '@/lib/system-api'
const LOG_CLEANUP_OPTIONS: Array<{
table: LogCleanupTable
label: string
description: string
}> = [
{ table: 'llm_usage', label: 'llm_usage', description: '记录 LLM 调用统计信息' },
{ table: 'tool_records', label: 'tool_records', description: '记录工具使用记录' },
{ table: 'mai_messages', label: 'mai_messages', description: '清理收到的消息' },
]
function formatBytes(bytes: number): string {
if (!Number.isFinite(bytes) || bytes <= 0) {
return '0 B'
}
const units = ['B', 'KB', 'MB', 'GB', 'TB']
const unitIndex = Math.min(Math.floor(Math.log(bytes) / Math.log(1024)), units.length - 1)
const value = bytes / 1024 ** unitIndex
return `${value.toFixed(value >= 10 || unitIndex === 0 ? 0 : 1)} ${units[unitIndex]}`
}
function CacheIcon({ cacheKey }: { cacheKey: string }) {
if (cacheKey === 'images') {
return <Image className="h-4 w-4 text-primary" />
}
if (cacheKey === 'emoji' || cacheKey === 'emoji_thumbnails') {
return <Sparkles className="h-4 w-4 text-primary" />
}
return <HardDrive className="h-4 w-4 text-primary" />
}
function DirectoryCard({
item,
cleanupDisabled,
onCleanup,
}: {
item: CacheDirectoryStats
cleanupDisabled: boolean
onCleanup: (target: 'images' | 'emoji' | 'log_files') => void
}) {
const cleanupTarget = item.key === 'images' ? 'images' : item.key === 'emoji' ? 'emoji' : item.key === 'logs' ? 'log_files' : null
const cleanupDescription = cleanupTarget === 'log_files'
? '这会删除 logs 目录中的日志文件。操作不可撤销。'
: '这会删除对应目录中的文件,并移除数据库里的相关记录。操作不可撤销。'
return (
<div className="rounded-lg border bg-card p-4">
<div className="flex flex-col gap-3 sm:flex-row sm:items-start sm:justify-between">
<div className="min-w-0 space-y-2">
<div className="flex items-center gap-2">
<CacheIcon cacheKey={item.key} />
<h4 className="font-semibold">{item.label}</h4>
</div>
<p className="break-all text-xs text-muted-foreground">{item.path}</p>
</div>
{cleanupTarget && (
<AlertDialog>
<AlertDialogTrigger asChild>
<Button variant="outline" size="sm" className="gap-2" disabled={cleanupDisabled}>
<Trash2 className="h-4 w-4" />
</Button>
</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>{item.label}</AlertDialogTitle>
<AlertDialogDescription>
{cleanupDescription}
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel></AlertDialogCancel>
<AlertDialogAction onClick={() => onCleanup(cleanupTarget)}></AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
)}
</div>
<div className="mt-4 grid grid-cols-2 gap-3 sm:grid-cols-4">
<div>
<div className="text-xs text-muted-foreground"></div>
<div className="text-lg font-semibold">{item.file_count}</div>
</div>
<div>
<div className="text-xs text-muted-foreground"></div>
<div className="text-lg font-semibold">{formatBytes(item.total_size)}</div>
</div>
<div>
<div className="text-xs text-muted-foreground"></div>
<div className="text-lg font-semibold">{item.db_records}</div>
</div>
<div>
<div className="text-xs text-muted-foreground"></div>
<div className="text-lg font-semibold">{item.exists ? '存在' : '未创建'}</div>
</div>
</div>
</div>
)
}
export function LocalCacheTab() {
const { toast } = useToast()
const [stats, setStats] = useState<LocalCacheStats | null>(null)
const [isLoading, setIsLoading] = useState(false)
const [cleanupTarget, setCleanupTarget] = useState<LocalCacheCleanupTarget | null>(null)
const [selectedLogTables, setSelectedLogTables] = useState<LogCleanupTable[]>([])
const tableRows = useMemo(() => {
const rows = new Map<string, number>()
for (const table of stats?.database.tables ?? []) {
rows.set(table.name, table.rows)
}
return rows
}, [stats?.database.tables])
const selectedLogRows = selectedLogTables.reduce((total, table) => total + (tableRows.get(table) ?? 0), 0)
const refreshStats = useCallback(async () => {
setIsLoading(true)
try {
setStats(await getLocalCacheStats())
} catch (error) {
toast({
title: '获取本地缓存失败',
description: error instanceof Error ? error.message : '请稍后重试',
variant: 'destructive',
})
} finally {
setIsLoading(false)
}
}, [toast])
const handleDirectoryCleanup = async (target: 'images' | 'emoji' | 'log_files') => {
setCleanupTarget(target)
try {
const result = await cleanupLocalCache(target)
await refreshStats()
toast({
title: result.message,
description: `删除 ${result.removed_files} 个文件,释放 ${formatBytes(result.removed_bytes)},移除 ${result.removed_records} 条记录。`,
})
} catch (error) {
toast({
title: '清理失败',
description: error instanceof Error ? error.message : '请稍后重试',
variant: 'destructive',
})
} finally {
setCleanupTarget(null)
}
}
const handleLogCleanup = async () => {
setCleanupTarget('database_logs')
try {
const result = await cleanupLocalCache('database_logs', selectedLogTables)
setSelectedLogTables([])
await refreshStats()
toast({
title: result.message,
description: `已清理 ${result.removed_records} 条数据库记录。`,
})
} catch (error) {
toast({
title: '数据库清理失败',
description: error instanceof Error ? error.message : '请稍后重试',
variant: 'destructive',
})
} finally {
setCleanupTarget(null)
}
}
const toggleLogTable = (table: LogCleanupTable, checked: boolean) => {
setSelectedLogTables((current) => {
if (checked) {
return current.includes(table) ? current : [...current, table]
}
return current.filter((item) => item !== table)
})
}
useEffect(() => {
void refreshStats()
}, [refreshStats])
return (
<div className="space-y-4 sm:space-y-6">
<div className="rounded-lg border bg-card p-4 sm:p-6">
<div className="flex flex-col gap-3 sm:flex-row sm:items-center sm:justify-between">
<div>
<h3 className="flex items-center gap-2 text-base font-semibold sm:text-lg">
<HardDrive className="h-5 w-5" />
</h3>
<p className="mt-1 text-xs text-muted-foreground sm:text-sm">
data
</p>
</div>
<Button variant="outline" onClick={refreshStats} disabled={isLoading} className="gap-2">
<RefreshCw className={`h-4 w-4 ${isLoading ? 'animate-spin' : ''}`} />
</Button>
</div>
</div>
<div className="grid gap-4">
{(stats?.directories ?? []).map((item) => (
<DirectoryCard
key={item.key}
item={item}
cleanupDisabled={cleanupTarget !== null || isLoading}
onCleanup={handleDirectoryCleanup}
/>
))}
</div>
<div className="rounded-lg border bg-card p-4 sm:p-6">
<div className="flex flex-col gap-3 sm:flex-row sm:items-start sm:justify-between">
<div>
<h3 className="flex items-center gap-2 text-base font-semibold sm:text-lg">
<Database className="h-5 w-5" />
</h3>
<p className="mt-1 text-xs text-muted-foreground sm:text-sm">
</p>
</div>
<AlertDialog>
<AlertDialogTrigger asChild>
<Button variant="outline" className="gap-2" disabled={cleanupTarget !== null || isLoading}>
<Trash2 className="h-4 w-4" />
</Button>
</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle></AlertDialogTitle>
<AlertDialogDescription>
{formatBytes(stats?.database.total_size ?? 0)}
</AlertDialogDescription>
</AlertDialogHeader>
<div className="space-y-3">
{LOG_CLEANUP_OPTIONS.map((option) => {
const rows = tableRows.get(option.table) ?? 0
const checked = selectedLogTables.includes(option.table)
const checkboxId = `log-cleanup-${option.table}`
return (
<label
key={option.table}
htmlFor={checkboxId}
className="flex cursor-pointer items-start gap-3 rounded-md border p-3 hover:bg-muted/50"
>
<Checkbox
id={checkboxId}
checked={checked}
onCheckedChange={(value) => toggleLogTable(option.table, value === true)}
className="mt-0.5"
/>
<span className="min-w-0 flex-1">
<span className="block text-sm font-medium">{option.label}</span>
<span className="block text-xs text-muted-foreground">{option.description}</span>
<span className="mt-1 block text-xs text-muted-foreground"> {rows} </span>
</span>
</label>
)
})}
</div>
<div className="rounded-md bg-muted/50 p-3 text-xs text-muted-foreground">
{selectedLogTables.length} {selectedLogRows}
</div>
<AlertDialogFooter>
<AlertDialogCancel></AlertDialogCancel>
<AlertDialogAction onClick={handleLogCleanup} disabled={selectedLogTables.length === 0}>
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
</div>
</div>
</div>
)
}

View File

@@ -1,4 +1,4 @@
import { Info, Palette, Settings, Shield } from 'lucide-react'
import { HardDrive, Info, Palette, Settings, Shield } from 'lucide-react'
import { useTranslation } from 'react-i18next'
import { ScrollArea } from '@/components/ui/scroll-area'
@@ -6,6 +6,7 @@ import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'
import { AboutTab } from './AboutTab'
import { AppearanceTab } from './AppearanceTab'
import { LocalCacheTab } from './LocalCacheTab'
import { OtherTab } from './OtherTab'
import { SecurityTab } from './SecurityTab'
@@ -23,7 +24,7 @@ export function SettingsPage() {
{/* 标签页 */}
<Tabs defaultValue="appearance" className="w-full">
<TabsList className="grid w-full grid-cols-2 sm:grid-cols-4 gap-0.5 sm:gap-1 h-auto p-1">
<TabsList className="grid w-full grid-cols-2 sm:grid-cols-5 gap-0.5 sm:gap-1 h-auto p-1">
<TabsTrigger value="appearance" className="gap-1 sm:gap-2 text-xs sm:text-sm px-2 sm:px-3 py-2">
<Palette className="h-3.5 w-3.5 sm:h-4 sm:w-4" strokeWidth={2} fill="none" />
<span>{t('settings.tabs.appearance')}</span>
@@ -32,6 +33,10 @@ export function SettingsPage() {
<Shield className="h-3.5 w-3.5 sm:h-4 sm:w-4" strokeWidth={2} fill="none" />
<span>{t('settings.tabs.security')}</span>
</TabsTrigger>
<TabsTrigger value="local-cache" className="gap-1 sm:gap-2 text-xs sm:text-sm px-2 sm:px-3 py-2">
<HardDrive className="h-3.5 w-3.5 sm:h-4 sm:w-4" strokeWidth={2} fill="none" />
<span></span>
</TabsTrigger>
<TabsTrigger value="other" className="gap-1 sm:gap-2 text-xs sm:text-sm px-2 sm:px-3 py-2">
<Settings className="h-3.5 w-3.5 sm:h-4 sm:w-4" strokeWidth={2} fill="none" />
<span>{t('settings.tabs.other')}</span>
@@ -51,6 +56,10 @@ export function SettingsPage() {
<SecurityTab />
</TabsContent>
<TabsContent value="local-cache" className="mt-0">
<LocalCacheTab />
</TabsContent>
<TabsContent value="other" className="mt-0">
<OtherTab />
</TabsContent>

View File

@@ -10,6 +10,7 @@
在当前场景中,不同的人正在互动({bot_name}也是一位参与的用户用户也可能与进行聊天互动你的任务不是生成对用户可见的发言而是进行分析来指导AI进行动作。
“分析”应该体现你对当前局面的判断、你的建议、你的下一步计划,以及你为什么这样想。默认直接输出你当前的最新分析,不要重复之前的分析内容。最新分析应尽量具体,贴近上下文。
你需要先搜集能够帮助{bot_name}进行下一步行动的信息,然后再给出思考
如果获取的信息无命中、被过滤、或证据不足,不要编造信息。
{group_chat_attention_block}
@@ -22,16 +23,16 @@
- 其他定义的工具,你可以视情况合适使用
工具使用规则:
1. 你可以使用多个工具。
2. 如果存在工具可以帮助你执行某些动作,完成某些目标,直接使用该工具来完成任务
3. 如果看到 `<system-reminder>` 中列出了 deferred tools而你需要其中某个工具先调用 tool_search() 搜索该工具,等它在后续轮次变为可用后再正常调用。
1. 你可以一次使用多个工具。
2. 如果工具执行出现问题,尝试解决或使用替代方案
3. 你可以进行多次工具搜索,聚合不同的信息源,进行多种操作来辅助你
4. 如果存在工具可以帮助你执行某些动作,完成某些目标,直接使用该工具来完成任务
5. 如果看到 `<system-reminder>` 中列出了 deferred tools而你需要其中某个工具先调用 tool_search() 搜索该工具,等它在后续轮次变为可用后再正常调用。
长期记忆使用建议:
1. 仅当历史信息会明显影响当前回复时,才考虑调用 `query_memory()`。
2. 适合检索:过去事件、之前聊过的内容、长期偏好、先前承诺、任务进展、近期线索;不适合检索:寒暄、即时情绪回应、轻松接话、只看最近消息就能回答的内容。
3. 群聊里更克制;私聊里如果对方提到“之前”“上次”“最近”“还记得吗”“我喜欢”“我说过”等类似的信号,可以更积极考虑检索。
4. 模式上:`search` 查事实或偏好,`time` 查某段时间,`episode` 查某次经历,`aggregate` 查整体情况;拿不准时用 `hybrid`。
5. 如果无命中、被过滤、或证据不足,就不要编造。
现在,请你输出你对{bot_name}发言的分析,你必须先输出文本内容的分析,然后再进行工具调用,:

View File

@@ -337,30 +337,11 @@ async def test_text_to_stream_triggers_real_chat_summary_writeback(
else None
),
)
monkeypatch.setattr(
memory_flow_service_module.global_config.memory,
"chat_summary_writeback_enabled",
True,
raising=False,
)
monkeypatch.setattr(
memory_flow_service_module.global_config.memory,
"chat_summary_writeback_message_threshold",
2,
raising=False,
)
monkeypatch.setattr(
memory_flow_service_module.global_config.memory,
"chat_summary_writeback_context_length",
10,
raising=False,
)
monkeypatch.setattr(
memory_flow_service_module.global_config.memory,
"person_fact_writeback_enabled",
False,
raising=False,
)
integration_config = memory_flow_service_module.global_config.a_memorix.integration
monkeypatch.setattr(integration_config, "chat_summary_writeback_enabled", True, raising=False)
monkeypatch.setattr(integration_config, "chat_summary_writeback_message_threshold", 2, raising=False)
monkeypatch.setattr(integration_config, "chat_summary_writeback_context_length", 10, raising=False)
monkeypatch.setattr(integration_config, "person_fact_writeback_enabled", False, raising=False)
await kernel.initialize()

View File

@@ -5,6 +5,14 @@ import pytest
from src.services import memory_flow_service as memory_flow_module
def _fake_global_config(**integration_values):
return SimpleNamespace(
a_memorix=SimpleNamespace(
integration=SimpleNamespace(**integration_values),
)
)
def test_person_fact_parse_fact_list_deduplicates_and_filters_short_items():
raw = '["他喜欢猫", "他喜欢猫", "", "", "他会弹吉他"]'
@@ -38,6 +46,43 @@ def test_person_fact_resolve_target_person_for_private_chat(monkeypatch):
assert person.person_id == "qq:123"
@pytest.mark.asyncio
async def test_person_fact_writeback_skips_bot_only_fact_without_user_evidence(monkeypatch):
stored_facts: list[tuple[str, str, str]] = []
class FakePerson:
person_id = "person-1"
person_name = "测试用户"
nickname = "测试用户"
is_known = True
service = memory_flow_module.PersonFactWritebackService.__new__(memory_flow_module.PersonFactWritebackService)
service._resolve_target_person = lambda message: FakePerson()
async def fake_extract_facts(person, reply_text, user_evidence_text):
del person, reply_text, user_evidence_text
return ["测试用户喜欢辣椒"]
async def fake_store_person_memory_from_answer(person_name: str, memory_content: str, chat_id: str, **kwargs):
del kwargs
stored_facts.append((person_name, memory_content, chat_id))
service._extract_facts = fake_extract_facts
monkeypatch.setattr(memory_flow_module, "store_person_memory_from_answer", fake_store_person_memory_from_answer)
monkeypatch.setattr(memory_flow_module, "find_messages", lambda **kwargs: [])
message = SimpleNamespace(
processed_plain_text="我记得你喜欢辣椒。",
session_id="session-1",
reply_to="",
session=SimpleNamespace(platform="qq", user_id="bot-1", group_id=""),
)
await service._handle_message(message)
assert stored_facts == []
@pytest.mark.asyncio
async def test_chat_summary_writeback_service_triggers_when_threshold_reached(monkeypatch):
events: list[tuple[str, object]] = []
@@ -45,12 +90,10 @@ async def test_chat_summary_writeback_service_triggers_when_threshold_reached(mo
monkeypatch.setattr(
memory_flow_module,
"global_config",
SimpleNamespace(
memory=SimpleNamespace(
chat_summary_writeback_enabled=True,
chat_summary_writeback_message_threshold=3,
chat_summary_writeback_context_length=7,
)
_fake_global_config(
chat_summary_writeback_enabled=True,
chat_summary_writeback_message_threshold=3,
chat_summary_writeback_context_length=7,
),
)
monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 5)
@@ -94,12 +137,10 @@ async def test_chat_summary_writeback_service_skips_when_threshold_not_reached(m
monkeypatch.setattr(
memory_flow_module,
"global_config",
SimpleNamespace(
memory=SimpleNamespace(
chat_summary_writeback_enabled=True,
chat_summary_writeback_message_threshold=6,
chat_summary_writeback_context_length=9,
)
_fake_global_config(
chat_summary_writeback_enabled=True,
chat_summary_writeback_message_threshold=6,
chat_summary_writeback_context_length=9,
),
)
monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 5)
@@ -135,12 +176,10 @@ async def test_chat_summary_writeback_service_restores_previous_trigger_count(mo
monkeypatch.setattr(
memory_flow_module,
"global_config",
SimpleNamespace(
memory=SimpleNamespace(
chat_summary_writeback_enabled=True,
chat_summary_writeback_message_threshold=3,
chat_summary_writeback_context_length=7,
)
_fake_global_config(
chat_summary_writeback_enabled=True,
chat_summary_writeback_message_threshold=3,
chat_summary_writeback_context_length=7,
),
)
monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 8)
@@ -178,12 +217,10 @@ async def test_chat_summary_writeback_service_falls_back_to_current_count_for_le
monkeypatch.setattr(
memory_flow_module,
"global_config",
SimpleNamespace(
memory=SimpleNamespace(
chat_summary_writeback_enabled=True,
chat_summary_writeback_message_threshold=3,
chat_summary_writeback_context_length=7,
)
_fake_global_config(
chat_summary_writeback_enabled=True,
chat_summary_writeback_message_threshold=3,
chat_summary_writeback_context_length=7,
),
)
monkeypatch.setattr(memory_flow_module, "count_messages", lambda **kwargs: 5)

View File

@@ -0,0 +1,115 @@
from types import SimpleNamespace
import pytest
from src.A_memorix.core.utils.person_profile_service import PersonProfileService
class FakeMetadataStore:
def __init__(self) -> None:
self.snapshots: list[dict] = []
@staticmethod
def get_latest_person_profile_snapshot(person_id: str):
del person_id
return None
@staticmethod
def get_relations(**kwargs):
del kwargs
return []
@staticmethod
def get_paragraphs_by_source(source: str):
if source == "person_fact:person-1":
return [
{
"hash": "person-fact-1",
"content": "测试用户喜欢猫。",
"source": source,
"metadata": {"source_type": "person_fact"},
"created_at": 2.0,
"updated_at": 2.0,
}
]
return []
@staticmethod
def get_paragraph(hash_value: str):
if hash_value == "chat-summary-1":
return {
"hash": hash_value,
"content": "机器人建议测试用户以后叫星灯。",
"source": "chat_summary:session-1",
"metadata": {"source_type": "chat_summary"},
"word_count": 1,
}
if hash_value == "person-fact-1":
return {
"hash": hash_value,
"content": "测试用户喜欢猫。",
"source": "person_fact:person-1",
"metadata": {"source_type": "person_fact"},
"word_count": 1,
}
return None
@staticmethod
def get_paragraph_stale_relation_marks_batch(paragraph_hashes):
del paragraph_hashes
return {}
@staticmethod
def get_relation_status_batch(relation_hashes):
del relation_hashes
return {}
@staticmethod
def get_person_profile_override(person_id: str):
del person_id
return None
def upsert_person_profile_snapshot(self, **kwargs):
self.snapshots.append(kwargs)
return {
"person_id": kwargs["person_id"],
"profile_text": kwargs["profile_text"],
"aliases": kwargs["aliases"],
"relation_edges": kwargs["relation_edges"],
"vector_evidence": kwargs["vector_evidence"],
"evidence_ids": kwargs["evidence_ids"],
"updated_at": 1.0,
"expires_at": kwargs["expires_at"],
"source_note": kwargs["source_note"],
}
class FakeRetriever:
async def retrieve(self, query: str, top_k: int):
del query, top_k
return [
SimpleNamespace(
hash_value="chat-summary-1",
result_type="paragraph",
score=0.95,
content="机器人建议测试用户以后叫星灯。",
metadata={"source_type": "chat_summary"},
)
]
@pytest.mark.asyncio
async def test_person_profile_keeps_chat_summary_as_recent_interaction_not_stable_profile():
metadata_store = FakeMetadataStore()
service = PersonProfileService(metadata_store=metadata_store, retriever=FakeRetriever())
service.get_person_aliases = lambda person_id: (["测试用户"], "测试用户", [])
payload = await service.query_person_profile(person_id="person-1", top_k=6, force_refresh=True)
assert payload["success"] is True
profile_text = payload["profile_text"]
stable_section = profile_text.split("近期相关互动:", 1)[0]
assert "测试用户喜欢猫" in stable_section
assert "星灯" not in stable_section
assert "近期相关互动:" in profile_text
assert "星灯" in profile_text

View File

@@ -184,7 +184,6 @@ def test_timing_gate_invalid_tool_hint_only_visible_to_timing_gate() -> None:
def test_forced_timing_trigger_bypasses_message_frequency_threshold() -> None:
runtime = SimpleNamespace(
_STATE_WAIT="wait",
_agent_state="stop",
_message_turn_scheduled=False,
_internal_turn_queue=asyncio.Queue(),

View File

@@ -493,7 +493,10 @@ class TestSDK:
"timeout_ms": timeout_ms,
}
)
return SimpleNamespace(error=None, payload={"result": {"ok": True}})
return SimpleNamespace(
error=None,
payload={"success": True, "result": {"success": True, "result": {"ok": True}}},
)
class DummyPlugin:
def _set_context(self, ctx):
@@ -508,9 +511,36 @@ class TestSDK:
plugin.ctx._plugin_id = "forged_plugin"
result = await plugin.ctx.call_capability("send.text", text="hello", stream_id="stream-1")
assert result == {"ok": True}
assert result is True
assert runner._rpc_client.calls[0]["plugin_id"] == "owner_plugin"
assert runner._rpc_client.calls[0]["method"] == "cap.request"
assert runner._rpc_client.calls[0]["method"] == "cap.call"
@pytest.mark.asyncio
async def test_runner_injected_context_unwraps_llm_available_models(self):
"""Runner 应为 SDK 解开 cap.call 响应外层,避免模型列表被规整成空列表。"""
from src.plugin_runtime.runner.runner_main import PluginRunner
class DummyRPCClient:
async def send_request(self, method, plugin_id="", payload=None, timeout_ms=30000):
assert method == "cap.call"
assert plugin_id == "owner_plugin"
assert payload == {"capability": "llm.get_available_models", "args": {}}
return SimpleNamespace(
error=None,
payload={"success": True, "result": {"success": True, "models": ["utils", "replyer"]}},
)
class DummyPlugin:
def _set_context(self, ctx):
self.ctx = ctx
runner = PluginRunner(host_address="dummy", session_token="token", plugin_dirs=[])
runner._rpc_client = DummyRPCClient()
plugin = DummyPlugin()
runner._inject_context("owner_plugin", plugin)
assert await plugin.ctx.llm.get_available_models() == ["utils", "replyer"]
@pytest.mark.asyncio
async def test_runner_applies_initial_plugin_config(self, tmp_path):
@@ -671,7 +701,7 @@ class TestSDK:
if method == "cap.call":
bootstrap_methods = [call["method"] for call in self.calls[:-1]]
assert "plugin.bootstrap" in bootstrap_methods
return SimpleNamespace(error=None, payload={"success": True})
return SimpleNamespace(error=None, payload={"success": True, "result": {"success": True}})
return SimpleNamespace(error=None, payload={"accepted": True})
async def disconnect(self):
@@ -702,11 +732,15 @@ class TestSDK:
instance=plugin,
version="1.0.0",
capabilities_required=["send.text"],
dependencies=[],
manifest=SimpleNamespace(plugin_dependencies=[], llm_provider_client_types=[]),
component_handlers={},
llm_provider_handlers={},
)
monkeypatch.setattr(runner, "_install_log_handler", lambda: None)
monkeypatch.setattr(runner, "_uninstall_log_handler", lambda: asyncio.sleep(0))
monkeypatch.setattr(runner._loader, "discover_and_load", lambda plugin_dirs: [meta])
monkeypatch.setattr(runner._loader, "discover_and_load", lambda plugin_dirs, **kwargs: [meta])
await runner.run()

View File

@@ -340,11 +340,51 @@ class PersonProfileService:
"type": "paragraph",
"score": 1.1,
"content": content[:220],
"metadata": {},
"source": str(row.get("source", "") or source),
"metadata": dict(row.get("metadata", {}) or {}),
}
)
return self._filter_stale_paragraph_evidence(evidence)
@staticmethod
def _source_type_from_source(source: str) -> str:
token = str(source or "").strip()
if token.startswith("chat_summary:"):
return "chat_summary"
if token.startswith("person_fact:"):
return "person_fact"
return ""
def _enrich_paragraph_evidence_metadata(
self,
paragraph_hash: str,
metadata: Dict[str, Any],
) -> Tuple[Dict[str, Any], str]:
merged = dict(metadata or {})
source = str(merged.get("source", "") or "").strip()
try:
paragraph = self.metadata_store.get_paragraph(paragraph_hash)
except Exception:
paragraph = None
if isinstance(paragraph, dict):
paragraph_metadata = paragraph.get("metadata", {}) or {}
if isinstance(paragraph_metadata, dict):
merged = {**paragraph_metadata, **merged}
source = source or str(paragraph.get("source", "") or "").strip()
source_type = str(merged.get("source_type", "") or "").strip() or self._source_type_from_source(source)
if source_type:
merged["source_type"] = source_type
if source:
merged["source"] = source
return merged, source
@staticmethod
def _is_chat_summary_evidence(item: Dict[str, Any]) -> bool:
metadata = item.get("metadata", {}) if isinstance(item.get("metadata"), dict) else {}
source_type = str(metadata.get("source_type", "") or "").strip()
source = str(item.get("source", "") or metadata.get("source", "") or "").strip()
return source_type == "chat_summary" or source.startswith("chat_summary:")
def _filter_stale_paragraph_evidence(
self,
evidence: List[Dict[str, Any]],
@@ -417,7 +457,8 @@ class PersonProfileService:
"type": "paragraph",
"score": 0.0,
"content": str(para.get("content", ""))[:180],
"metadata": {},
"source": str(para.get("source", "") or ""),
"metadata": dict(para.get("metadata", {}) or {}),
}
)
return self._filter_stale_paragraph_evidence(fallback[:top_k])
@@ -443,13 +484,18 @@ class PersonProfileService:
if not h or h in seen_hash:
continue
seen_hash.add(h)
metadata, source = self._enrich_paragraph_evidence_metadata(
h,
dict(getattr(item, "metadata", {}) or {}),
)
evidence.append(
{
"hash": h,
"type": str(getattr(item, "result_type", "")),
"score": float(getattr(item, "score", 0.0) or 0.0),
"content": str(getattr(item, "content", "") or "")[:220],
"metadata": dict(getattr(item, "metadata", {}) or {}),
"source": source,
"metadata": metadata,
}
)
evidence.sort(key=lambda x: x.get("score", 0.0), reverse=True)
@@ -475,7 +521,7 @@ class PersonProfileService:
lines.append(f"记忆特征: {'; '.join(memory_traits[:6])}")
if relation_edges:
lines.append("关系证据:")
lines.append("稳定关系证据:")
for rel in relation_edges[:6]:
s = rel.get("subject", "")
p = rel.get("predicate", "")
@@ -483,9 +529,19 @@ class PersonProfileService:
conf = float(rel.get("confidence", 0.0))
lines.append(f"- {s} {p} {o} (conf={conf:.2f})")
if vector_evidence:
lines.append("向量证据摘要:")
for item in vector_evidence[:4]:
stable_evidence = [item for item in vector_evidence if not self._is_chat_summary_evidence(item)]
recent_interactions = [item for item in vector_evidence if self._is_chat_summary_evidence(item)]
if stable_evidence:
lines.append("稳定人物事实:")
for item in stable_evidence[:4]:
content = str(item.get("content", "")).strip()
if content:
lines.append(f"- {content}")
if recent_interactions:
lines.append("近期相关互动:")
for item in recent_interactions[:2]:
content = str(item.get("content", "")).strip()
if content:
lines.append(f"- {content}")

View File

@@ -43,6 +43,7 @@ SUMMARY_PROMPT_TEMPLATE = """
请完成以下任务:
1. **生成总结**:以第三人称或机器人的视角,简洁明了地总结这段对话的主要内容、发生的事件或讨论的主题。
2. **提取实体与关系**:识别并提取对话中提到的重要实体以及它们之间的关系。
3. **区分事实来源**:用户自己明确表达的稳定人物事实可以记录;机器人发言只能作为上下文,不能单独作为用户画像事实来源。
请严格以 JSON 格式输出,格式如下:
{{
@@ -54,6 +55,7 @@ SUMMARY_PROMPT_TEMPLATE = """
}}
注意:总结应具有叙事性,能够作为长程记忆的一部分。直接使用实体的实际名称,不要使用 e1/e2 等代号。
不要把机器人提出的建议、猜测、玩笑、承诺或复述,写成用户的稳定偏好、身份或长期事实。
"""

View File

@@ -57,7 +57,7 @@ MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute(
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0-pre.14"
CONFIG_VERSION: str = "8.10.13"
CONFIG_VERSION: str = "8.10.15"
MODEL_CONFIG_VERSION: str = "1.16.0"
logger = get_logger("config")

View File

@@ -2767,15 +2767,6 @@ class DebugConfig(ConfigBase):
__ui_label__ = "其他"
__ui_icon__ = "more-horizontal"
enable_maisaka_stage_board: bool = Field(
default=False,
json_schema_extra={
"x-widget": "switch",
"x-icon": "layout-dashboard",
},
)
"""是否启用 Maisaka 阶段看板"""
show_maisaka_thinking: bool = Field(
default=True,
json_schema_extra={

View File

@@ -16,7 +16,6 @@ from src.config.config import config_manager, global_config
from src.emoji_system.emoji_manager import emoji_manager
from src.learners.expression_auto_check_task import ExpressionAutoCheckTask
from src.manager.async_task_manager import async_task_manager
from src.maisaka.display.stage_status_board import disable_stage_status_board, enable_stage_status_board
from src.plugin_runtime.integration import get_plugin_runtime_manager
from src.prompt.prompt_manager import prompt_manager
from src.services.memory_flow_service import memory_automation_service
@@ -66,8 +65,6 @@ class MainSystem:
async def initialize(self) -> None:
"""初始化系统组件"""
if global_config.debug.enable_maisaka_stage_board:
enable_stage_status_board()
logger.info(t("startup.waking_up", nickname=global_config.bot.nickname))
self.webui_task = asyncio.create_task(self._run_webui_startup_sequence(), name="webui_startup")
@@ -191,7 +188,6 @@ async def main() -> None:
await system.initialize()
await system.schedule_tasks()
finally:
disable_stage_status_board()
emoji_manager.shutdown()
await memory_automation_service.shutdown()
await a_memorix_host_service.stop()

View File

@@ -31,7 +31,6 @@ async def handle_tool(
success=True,
content="当前对话继续进入下一轮思考和工具执行。",
metadata={
"pause_execution": True,
"timing_action": "continue",
},
)

View File

@@ -376,6 +376,7 @@ async def _select_emoji_with_sub_agent(
context_message_limit=_EMOJI_SUB_AGENT_CONTEXT_LIMIT,
system_prompt=system_prompt,
extra_messages=[prompt_message, candidate_message],
request_kind="emotion",
model_task_name=model_task_name,
)
selection_duration_ms = round((datetime.now() - selection_started_at).total_seconds() * 1000, 2)

View File

@@ -45,6 +45,14 @@ REQUEST_TYPE_BY_REQUEST_KIND = {
"planner": "maisaka_planner",
"timing_gate": "maisaka_timing_gate",
}
PROMPT_PREVIEW_CATEGORY_BY_REQUEST_KIND = {
"planner": "planner",
"timing_gate": "timing_gate",
"reply_effect_judge": "reply_effect_judge",
"expression_selector": "expression_selector",
"emotion": "emotion",
"sub_agent": "sub_agent",
}
CONTEXT_SELECTION_CACHE_STABILITY_RATIO = 2.0
@@ -234,6 +242,15 @@ class MaisakaChatLoopService:
f"maisaka_{normalized_request_kind}" if normalized_request_kind else "maisaka_planner",
)
@staticmethod
def _resolve_prompt_preview_category(request_kind: str) -> str:
"""根据请求类型决定 Prompt 预览落盘目录,避免子代理混入 planner。"""
normalized_request_kind = str(request_kind or "").strip().lower()
if not normalized_request_kind:
return "planner"
return PROMPT_PREVIEW_CATEGORY_BY_REQUEST_KIND.get(normalized_request_kind, normalized_request_kind)
def _get_llm_chat_client(self, request_kind: str) -> LLMServiceClient:
"""获取当前请求类型对应的 LLM 客户端。"""
@@ -544,7 +561,7 @@ class MaisakaChatLoopService:
if global_config.debug.show_maisaka_thinking:
prompt_section_result = PromptCLIVisualizer.build_prompt_section_result(
built_messages,
category="planner" if request_kind != "timing_gate" else "timing_gate",
category=self._resolve_prompt_preview_category(request_kind),
chat_id=self._session_id,
request_kind=request_kind,
selection_reason=selection_reason,

View File

@@ -11,8 +11,6 @@ from .display_utils import (
from .prompt_cli_renderer import PromptCLIVisualizer
from .prompt_preview_logger import PromptPreviewLogger
from .stage_status_board import (
disable_stage_status_board,
enable_stage_status_board,
remove_stage_status,
update_stage_status,
)
@@ -21,8 +19,6 @@ __all__ = [
"PromptCLIVisualizer",
"PromptPreviewLogger",
"build_tool_call_summary_lines",
"disable_stage_status_board",
"enable_stage_status_board",
"format_token_count",
"format_tool_call_for_display",
"get_request_panel_style",

View File

@@ -8,6 +8,8 @@ _REQUEST_PANEL_STYLE_MAP: dict[str, tuple[str, str]] = {
"timing_gate": ("MaiSaka 大模型请求 - Timing Gate 子代理", "bright_magenta"),
"replyer": ("MaiSaka 回复器 Prompt", "bright_yellow"),
"emotion": ("MaiSaka Emotion Tool Prompt", "bright_cyan"),
"expression_selector": ("MaiSaka 表达选择子代理 Prompt", "bright_yellow"),
"reply_effect_judge": ("MaiSaka 回复效果评分器 Prompt", "bright_red"),
"sub_agent": ("MaiSaka 大模型请求 - 子代理", "bright_blue"),
}

View File

@@ -1,54 +1,20 @@
"""Maisaka 阶段状态看板"""
"""Maisaka 阶段状态广播"""
from __future__ import annotations
from pathlib import Path
from typing import Any, Optional
from typing import Any
import json
import os
import subprocess
import sys
import asyncio
import threading
import time
class MaisakaStageStatusBoard:
"""维护 Maisaka 阶段状态,并在独立终端中展示"""
"""维护 Maisaka 阶段状态,并推送给 WebUI 麦麦观察"""
def __init__(self) -> None:
self._lock = threading.Lock()
self._enabled = False
self._entries: dict[str, dict[str, Any]] = {}
self._viewer_process: Optional[subprocess.Popen[Any]] = None
self._state_file = Path("temp") / "maisaka_stage_status.json"
self._state_file.parent.mkdir(parents=True, exist_ok=True)
def enable(self) -> None:
"""启用阶段状态看板。"""
with self._lock:
if self._enabled:
return
self._enabled = True
self._write_state_locked()
self._ensure_viewer_process_locked()
def disable(self) -> None:
"""禁用阶段状态看板。"""
with self._lock:
self._enabled = False
self._entries.clear()
self._write_state_locked()
process = self._viewer_process
self._viewer_process = None
if process is not None and process.poll() is None:
try:
process.terminate()
except Exception:
pass
def update(
self,
@@ -62,16 +28,15 @@ class MaisakaStageStatusBoard:
) -> None:
"""更新一个会话的阶段状态。"""
now = time.time()
with self._lock:
if not self._enabled:
return
now = time.time()
current = self._entries.get(session_id, {})
previous_stage = str(current.get("stage") or "").strip()
stage_started_at = float(current.get("stage_started_at") or now)
if previous_stage != stage:
stage_started_at = now
self._entries[session_id] = {
payload = {
"session_id": session_id,
"session_name": session_name,
"stage": stage,
@@ -80,62 +45,53 @@ class MaisakaStageStatusBoard:
"agent_state": agent_state,
"stage_started_at": stage_started_at,
"updated_at": now,
"timestamp": now,
}
self._write_state_locked()
self._entries[session_id] = payload
self._schedule_stage_status_event(payload)
def remove(self, session_id: str) -> None:
"""移除一个会话的阶段状态。"""
with self._lock:
if not self._enabled:
return
self._entries.pop(session_id, None)
self._write_state_locked()
removed = self._entries.pop(session_id, None)
def _write_state_locked(self) -> None:
payload = {
"enabled": self._enabled,
"host_pid": os.getpid(),
"updated_at": time.time(),
"entries": list(self._entries.values()),
}
tmp_file = self._state_file.with_suffix(".tmp")
tmp_file.write_text(json.dumps(payload, ensure_ascii=False, indent=2), encoding="utf-8")
tmp_file.replace(self._state_file)
self._schedule_stage_removed_event(session_id, removed)
def _ensure_viewer_process_locked(self) -> None:
if not sys.platform.startswith("win"):
def snapshot(self) -> list[dict[str, Any]]:
"""返回当前所有聊天流的阶段状态快照。"""
with self._lock:
return [dict(entry) for entry in self._entries.values()]
@staticmethod
def _schedule_stage_status_event(payload: dict[str, Any]) -> None:
try:
from src.maisaka.monitor_events import emit_stage_status
asyncio.get_running_loop().create_task(emit_stage_status(**payload))
except RuntimeError:
return
if self._viewer_process is not None and self._viewer_process.poll() is None:
@staticmethod
def _schedule_stage_removed_event(session_id: str, removed: dict[str, Any] | None) -> None:
try:
from src.maisaka.monitor_events import emit_stage_removed
asyncio.get_running_loop().create_task(
emit_stage_removed(
session_id=session_id,
session_name=str((removed or {}).get("session_name") or ""),
)
)
except RuntimeError:
return
creationflags = getattr(subprocess, "CREATE_NEW_CONSOLE", 0)
viewer_script = Path(__file__).resolve().with_name("stage_status_viewer.py")
self._viewer_process = subprocess.Popen(
[
sys.executable,
str(viewer_script),
str(self._state_file.resolve()),
],
creationflags=creationflags,
cwd=str(Path.cwd()),
)
_stage_board = MaisakaStageStatusBoard()
def enable_stage_status_board() -> None:
"""启用控制台阶段状态看板。"""
_stage_board.enable()
def disable_stage_status_board() -> None:
"""禁用控制台阶段状态看板。"""
_stage_board.disable()
def update_stage_status(
*,
session_id: str,
@@ -145,7 +101,7 @@ def update_stage_status(
round_text: str = "",
agent_state: str = "",
) -> None:
"""更新控制台阶段状态。"""
"""更新 WebUI 麦麦观察中的阶段状态。"""
_stage_board.update(
session_id=session_id,
@@ -158,6 +114,12 @@ def update_stage_status(
def remove_stage_status(session_id: str) -> None:
"""移除控制台阶段状态。"""
"""移除 WebUI 麦麦观察中的阶段状态。"""
_stage_board.remove(session_id)
def get_stage_status_snapshot() -> list[dict[str, Any]]:
"""获取当前阶段状态快照。"""
return _stage_board.snapshot()

View File

@@ -1,93 +0,0 @@
"""Maisaka 阶段状态看板查看器。"""
from __future__ import annotations
from pathlib import Path
from typing import Any
import json
import os
import sys
import time
import traceback
def _clear_screen() -> None:
os.system("cls" if sys.platform.startswith("win") else "clear")
def _load_state(state_file: Path) -> dict[str, Any]:
if not state_file.exists():
return {}
try:
return json.loads(state_file.read_text(encoding="utf-8"))
except Exception:
return {}
def _render(state: dict[str, Any]) -> str:
entries = state.get("entries")
if not isinstance(entries, list):
entries = []
lines = ["Maisaka 阶段看板", "=" * 72, ""]
if not entries:
lines.append("当前没有活跃会话。")
return "\n".join(lines)
entries = sorted(
[entry for entry in entries if isinstance(entry, dict)],
key=lambda item: str(item.get("session_name") or item.get("session_id") or ""),
)
now = time.time()
for entry in entries:
session_name = str(entry.get("session_name") or entry.get("session_id") or "").strip() or "unknown"
session_id = str(entry.get("session_id") or "").strip()
stage = str(entry.get("stage") or "").strip() or "未知"
detail = str(entry.get("detail") or "").strip() or "-"
round_text = str(entry.get("round_text") or "").strip()
agent_state = str(entry.get("agent_state") or "").strip() or "-"
stage_started_at = float(entry.get("stage_started_at") or now)
elapsed = max(0.0, now - stage_started_at)
lines.append(f"Chat: {session_name}")
if session_id and session_id != session_name:
lines.append(f"ID: {session_id}")
lines.append(f"阶段: {stage}")
if round_text:
lines.append(f"轮次: {round_text}")
lines.append(f"详情: {detail}")
lines.append(f"状态: {agent_state}")
lines.append(f"阶段耗时: {elapsed:.1f}s")
lines.append("-" * 72)
return "\n".join(lines)
def main() -> int:
if len(sys.argv) < 2:
return 1
state_file = Path(sys.argv[1]).resolve()
log_file = state_file.with_name("maisaka_stage_status_viewer.log")
last_render = ""
while True:
try:
state = _load_state(state_file)
if not state.get("enabled", False):
return 0
rendered = _render(state)
if rendered != last_render:
_clear_screen()
print(rendered, flush=True)
last_render = rendered
time.sleep(0.5)
except Exception:
log_file.write_text(traceback.format_exc(), encoding="utf-8")
time.sleep(3)
return 1
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -367,6 +367,47 @@ async def emit_session_start(
})
async def emit_stage_status(
*,
session_id: str,
session_name: str,
stage: str,
detail: str = "",
round_text: str = "",
agent_state: str = "",
stage_started_at: float,
updated_at: float,
timestamp: float,
) -> None:
"""广播单个聊天流的当前阶段状态。"""
await _broadcast("stage.status", {
"session_id": session_id,
"session_name": session_name,
"stage": stage,
"detail": detail,
"round_text": round_text,
"agent_state": agent_state,
"stage_started_at": stage_started_at,
"updated_at": updated_at,
"timestamp": timestamp,
})
async def emit_stage_removed(
*,
session_id: str,
session_name: str = "",
) -> None:
"""广播聊天流阶段状态移除事件。"""
await _broadcast("stage.removed", {
"session_id": session_id,
"session_name": session_name,
"timestamp": time.time(),
})
async def emit_message_ingested(
session_id: str,
speaker_name: str,
@@ -385,6 +426,26 @@ async def emit_message_ingested(
})
async def emit_message_sent(
session_id: str,
speaker_name: str,
content: str,
message_id: str,
timestamp: float,
source_kind: str = "",
) -> None:
"""广播 MaiSaka 自己发送的消息事件。"""
await _broadcast("message.sent", {
"session_id": session_id,
"speaker_name": speaker_name,
"content": content,
"message_id": message_id,
"source_kind": source_kind,
"timestamp": timestamp,
})
async def emit_cycle_start(
session_id: str,
cycle_id: int,
@@ -404,6 +465,27 @@ async def emit_cycle_start(
})
async def emit_cycle_end(
session_id: str,
cycle_id: int,
time_records: Dict[str, float],
agent_state: str,
end_reason: str,
end_detail: str,
) -> None:
"""广播单个推理循环结束事件。"""
await _broadcast("cycle.end", {
"session_id": session_id,
"cycle_id": cycle_id,
"time_records": _normalize_payload_value(time_records),
"agent_state": agent_state,
"end_reason": end_reason,
"end_detail": end_detail,
"timestamp": time.time(),
})
async def emit_timing_gate_result(
session_id: str,
cycle_id: int,
@@ -455,10 +537,13 @@ async def emit_planner_finalized(
planner_completion_tokens: Optional[int],
planner_total_tokens: Optional[int],
planner_duration_ms: Optional[float],
planner_prompt_html_uri: Optional[str],
tools: Optional[List[Dict[str, Any]]],
time_records: Dict[str, float],
agent_state: str,
planner_prompt_html_uri: Optional[str] = None,
tools: Optional[List[Dict[str, Any]]] = None,
time_records: Optional[Dict[str, float]] = None,
agent_state: str = "",
planner_interrupted: bool = False,
end_reason: str = "",
end_detail: str = "",
) -> None:
"""广播一轮 planner 结束后的最终聚合事件。"""
@@ -494,8 +579,11 @@ async def emit_planner_finalized(
planner_prompt_html_uri,
),
"tools": _serialize_tool_results(list(tools or [])),
"interrupted": planner_interrupted,
"final_state": {
"time_records": _normalize_payload_value(time_records),
"time_records": _normalize_payload_value(time_records or {}),
"agent_state": agent_state,
"end_reason": end_reason,
"end_detail": end_detail,
},
})

View File

@@ -38,6 +38,7 @@ from .context_messages import (
from .history_post_processor import process_chat_history_after_cycle
from .history_utils import build_prefixed_message_sequence, build_session_message_visible_text
from .monitor_events import (
emit_cycle_end,
emit_cycle_start,
emit_message_ingested,
emit_planner_finalized,
@@ -411,21 +412,14 @@ class MaisakaReasoningEngine:
max_internal_rounds: int,
has_pending_messages: bool,
) -> bool:
return has_pending_messages and round_index + 1 < max_internal_rounds
return has_pending_messages and round_index < max_internal_rounds
async def run_loop(self) -> None:
"""独立消费消息批次,并执行对应的内部思考轮次。"""
try:
while self._runtime._running:
queued_trigger = await self._runtime._internal_turn_queue.get()
message_triggered, timeout_triggered = self._drain_ready_turn_triggers(queued_trigger)
if self._runtime._agent_state == self._runtime._STATE_WAIT and not timeout_triggered:
self._runtime._message_turn_scheduled = False
logger.debug(
f"{self._runtime.log_prefix} 当前仍处于 wait 状态,忽略消息触发并继续等待超时"
)
continue
message_triggered = self._drain_ready_turn_triggers(queued_trigger)
if message_triggered:
await self._runtime._wait_for_message_quiet_period()
@@ -436,37 +430,21 @@ class MaisakaReasoningEngine:
if self._runtime._has_pending_messages()
else []
)
if not timeout_triggered and not cached_messages:
if not cached_messages:
continue
self._runtime._agent_state = self._runtime._STATE_RUNNING
self._runtime._update_stage_status(
"消息整理",
f"待处理消息 {len(cached_messages)}" if cached_messages else "准备复用超时锚点",
f"待处理消息 {len(cached_messages)}",
)
if cached_messages:
asyncio.create_task(self._runtime._trigger_batch_learning(cached_messages))
if timeout_triggered:
self._runtime._chat_history.append(
self._build_wait_completed_message(has_new_messages=True)
)
await self._ingest_messages(cached_messages)
anchor_message = cached_messages[-1]
else:
anchor_message = self._get_timeout_anchor_message()
if anchor_message is None:
logger.warning(
f"{self._runtime.log_prefix} 等待超时后缺少可复用的锚点消息,跳过本轮继续思考"
)
continue
logger.info(f"{self._runtime.log_prefix} 等待超时后开始新一轮思考")
if self._runtime._pending_wait_tool_call_id:
self._runtime._chat_history.append(
self._build_wait_completed_message(has_new_messages=False)
)
asyncio.create_task(self._runtime._trigger_batch_learning(cached_messages))
await self._ingest_messages(cached_messages)
anchor_message = cached_messages[-1]
try:
timing_gate_required = True
for round_index in range(self._runtime._max_internal_rounds):
round_index = 0
while round_index < self._runtime._max_internal_rounds:
cycle_detail = self._start_cycle()
round_text = f"{round_index + 1}/{self._runtime._max_internal_rounds}"
self._runtime._log_cycle_started(cycle_detail, round_index)
@@ -489,6 +467,9 @@ class MaisakaReasoningEngine:
response: Optional[ChatResponse] = None
action_tool_definitions: list[dict[str, Any]] = []
planner_extra_lines: list[str] = []
planner_interrupted = False
cycle_end_reason = "continue"
cycle_end_detail = "本轮思考完成,继续后续内部轮次。"
tool_result_summaries: list[str] = []
tool_monitor_results: list[dict[str, Any]] = []
try:
@@ -530,6 +511,8 @@ class MaisakaReasoningEngine:
)
timing_gate_required = self._mark_timing_gate_completed(timing_action)
if timing_action != "continue":
cycle_end_reason = "timing_no_reply"
cycle_end_detail = "Timing Gate 选择 no_reply本轮不会进入 Planner。"
logger.debug(
f"{self._runtime.log_prefix} Timing Gate 结束当前回合: "
f"回合={round_index + 1} 动作={timing_action}"
@@ -574,19 +557,40 @@ class MaisakaReasoningEngine:
if response.tool_calls:
tool_started_at = time.time()
should_pause, tool_result_summaries, tool_monitor_results = await self._handle_tool_calls(
(
should_pause,
pause_tool_name,
tool_result_summaries,
tool_monitor_results,
) = await self._handle_tool_calls(
response.tool_calls,
response.content or "",
anchor_message,
)
cycle_detail.time_records["tool_calls"] = time.time() - tool_started_at
if should_pause:
if pause_tool_name == "finish":
cycle_end_reason = "finish"
cycle_end_detail = "Planner 调用 finish结束本轮思考并等待新消息。"
elif pause_tool_name:
cycle_end_reason = f"tool_pause:{pause_tool_name}"
cycle_end_detail = f"工具 {pause_tool_name} 要求暂停当前思考循环。"
else:
cycle_end_reason = "tool_pause"
cycle_end_detail = "工具要求暂停当前思考循环。"
break
cycle_end_reason = "tool_continue"
cycle_end_detail = "Planner 工具执行完成,继续下一轮内部思考。"
continue
if not response.content:
cycle_end_reason = "empty_planner_response"
cycle_end_detail = "Planner 没有返回文本或工具调用,本轮思考结束。"
break
except ReqAbortException as exc:
planner_interrupted = True
cycle_end_reason = "planner_interrupted"
cycle_end_detail = "Planner 被新消息打断,当前轮结束。"
self._runtime._update_stage_status(
"Planner 已打断",
str(exc) or "收到外部中断信号",
@@ -650,6 +654,15 @@ class MaisakaReasoningEngine:
continue
finally:
completed_cycle = self._end_cycle(cycle_detail)
if (
round_index + 1 >= self._runtime._max_internal_rounds
and cycle_end_reason in {"continue", "tool_continue"}
):
cycle_end_reason = "max_rounds"
cycle_end_detail = (
f"已达到内部思考轮次上限 {self._runtime._max_internal_rounds}"
"本轮处理结束。"
)
self._runtime._render_context_usage_panel(
cycle_id=cycle_detail.cycle_id,
time_records=dict(completed_cycle.time_records),
@@ -715,7 +728,20 @@ class MaisakaReasoningEngine:
tools=tool_monitor_results,
time_records=dict(completed_cycle.time_records),
agent_state=self._runtime._agent_state,
planner_interrupted=planner_interrupted,
end_reason=cycle_end_reason,
end_detail=cycle_end_detail,
)
await emit_cycle_end(
session_id=self._runtime.session_id,
cycle_id=cycle_detail.cycle_id,
time_records=dict(completed_cycle.time_records),
agent_state=self._runtime._agent_state,
end_reason=cycle_end_reason,
end_detail=cycle_end_detail,
)
if not planner_interrupted:
round_index += 1
finally:
if self._runtime._agent_state == self._runtime._STATE_RUNNING:
self._runtime._agent_state = self._runtime._STATE_STOP
@@ -731,12 +757,11 @@ class MaisakaReasoningEngine:
def _drain_ready_turn_triggers(
self,
queued_trigger: Literal["message", "timeout"],
) -> tuple[bool, bool]:
"""合并当前已就绪的 turn 触发信号。"""
queued_trigger: Literal["message"],
) -> bool:
"""合并当前已就绪的消息触发信号。"""
message_triggered = queued_trigger == "message"
timeout_triggered = queued_trigger == "timeout"
while True:
try:
@@ -747,33 +772,8 @@ class MaisakaReasoningEngine:
if next_trigger == "message":
message_triggered = True
continue
if next_trigger == "timeout":
timeout_triggered = True
continue
return message_triggered, timeout_triggered
def _get_timeout_anchor_message(self) -> Optional[SessionMessage]:
"""在 wait 超时后复用最近一条真实用户消息作为锚点。"""
if self._runtime.message_cache:
return self._runtime.message_cache[-1]
return None
def _build_wait_completed_message(self, *, has_new_messages: bool) -> ToolResultMessage:
"""构造 wait 完成后的工具结果消息。"""
tool_call_id = self._runtime._pending_wait_tool_call_id or "wait_timeout"
self._runtime._pending_wait_tool_call_id = None
content = (
"等待已结束,期间收到了新的用户输入。请结合这些新消息继续下一轮思考。"
if has_new_messages
else "等待已超时,期间没有收到新的用户输入。请基于现有上下文继续下一轮思考。"
)
return ToolResultMessage(
content=content,
timestamp=datetime.now(),
tool_call_id=tool_call_id,
tool_name="wait",
)
return message_triggered
async def _ingest_messages(self, messages: list[SessionMessage]) -> None:
"""处理传入消息列表,将其转换为历史消息并加入聊天历史缓存。"""
@@ -1414,7 +1414,7 @@ class MaisakaReasoningEngine:
tool_calls: list[ToolCall],
latest_thought: str,
anchor_message: SessionMessage,
) -> tuple[bool, list[str], list[dict[str, Any]]]:
) -> tuple[bool, str, list[str], list[dict[str, Any]]]:
"""执行一批统一工具调用。
Args:
@@ -1423,8 +1423,8 @@ class MaisakaReasoningEngine:
anchor_message: 当前轮的锚点消息。
Returns:
tuple[bool, list[str], list[dict[str, Any]]]: 是否需要暂停当前思考循环、
工具结果摘要列表,以及最终监控事件使用的工具详情列表。
tuple[bool, str, list[str], list[dict[str, Any]]]: 是否需要暂停当前思考循环、
触发暂停的工具名、工具结果摘要列表,以及最终监控事件使用的工具详情列表。
"""
tool_result_summaries: list[str] = []
@@ -1444,7 +1444,7 @@ class MaisakaReasoningEngine:
tool_monitor_results.append(
self._build_tool_monitor_result(tool_call, invocation, result, duration_ms=0.0, tool_spec=None)
)
return False, tool_result_summaries, tool_monitor_results
return False, "", tool_result_summaries, tool_monitor_results
execution_context = self._build_tool_execution_context(latest_thought, anchor_message)
availability_context = self._build_tool_availability_context()
@@ -1493,6 +1493,6 @@ class MaisakaReasoningEngine:
logger.warning(f"{self._runtime.log_prefix} 回复工具未生成可见消息,将继续下一轮循环")
if bool(result.metadata.get("pause_execution", False)):
return True, tool_result_summaries, tool_monitor_results
return True, invocation.tool_name, tool_result_summaries, tool_monitor_results
return False, tool_result_summaries, tool_monitor_results
return False, "", tool_result_summaries, tool_monitor_results

View File

@@ -46,7 +46,7 @@ from .display.display_utils import build_tool_call_summary_lines, format_token_c
from .display.prompt_cli_renderer import PromptCLIVisualizer
from .display.stage_status_board import remove_stage_status, update_stage_status
from .history_utils import drop_leading_orphan_tool_results
from .monitor_events import emit_session_start
from .monitor_events import emit_message_sent, emit_session_start
from .reasoning_engine import MaisakaReasoningEngine
from .reply_effect import ReplyEffectTracker
from .reply_effect.image_utils import extract_visual_attachments_from_sequence
@@ -55,14 +55,13 @@ from .tool_provider import MaisakaBuiltinToolProvider
logger = get_logger("maisaka_runtime")
MAX_INTERNAL_ROUNDS = 6
MAX_INTERNAL_ROUNDS = 10
class MaisakaHeartFlowChatting:
"""会话级别的 Maisaka 运行时。"""
_STATE_RUNNING: Literal["running"] = "running"
_STATE_WAIT: Literal["wait"] = "wait"
_STATE_STOP: Literal["stop"] = "stop"
def __init__(self, session_id: str):
@@ -85,7 +84,7 @@ class MaisakaHeartFlowChatting:
# Keep all original messages for batching and later learning.
self.message_cache: list[SessionMessage] = []
self._last_processed_index = 0
self._internal_turn_queue: asyncio.Queue[Literal["message", "timeout"]] = asyncio.Queue()
self._internal_turn_queue: asyncio.Queue[Literal["message"]] = asyncio.Queue()
self._mcp_manager: Optional[MCPManager] = None
self._mcp_host_bridge: Optional[MCPHostLLMBridge] = None
@@ -103,7 +102,6 @@ class MaisakaHeartFlowChatting:
self._talk_frequency_adjust = 1.0
self._reply_latency_measurement_started_at: Optional[float] = None
self._recent_reply_latencies: deque[tuple[float, float]] = deque()
self._wait_timeout_task: Optional[asyncio.Task[None]] = None
self._max_internal_rounds = MAX_INTERNAL_ROUNDS
configured_context_size = (
global_config.chat.max_context_size
@@ -111,8 +109,7 @@ class MaisakaHeartFlowChatting:
else global_config.chat.max_private_context_size
)
self._max_context_size = max(1, int(configured_context_size))
self._agent_state: Literal["running", "wait", "stop"] = self._STATE_STOP
self._pending_wait_tool_call_id: Optional[str] = None
self._agent_state: Literal["running", "stop"] = self._STATE_STOP
self._force_next_timing_continue = False
self._force_next_timing_message_id = ""
self._force_next_timing_reason = ""
@@ -211,7 +208,6 @@ class MaisakaHeartFlowChatting:
self._message_turn_scheduled = False
self._message_debounce_required = False
self._cancel_deferred_message_turn_task()
self._cancel_wait_timeout_task()
while not self._internal_turn_queue.empty():
_ = self._internal_turn_queue.get_nowait()
@@ -270,6 +266,11 @@ class MaisakaHeartFlowChatting:
source_kind=source_kind,
)
self._chat_history.append(history_message)
self._emit_monitor_message_sent(
message=message,
speaker_name=speaker_name,
source_kind=source_kind,
)
return True
except Exception as exc:
logger.warning(
@@ -278,6 +279,29 @@ class MaisakaHeartFlowChatting:
)
return False
def _emit_monitor_message_sent(
self,
*,
message: SessionMessage,
speaker_name: str,
source_kind: str,
) -> None:
"""异步广播 MaiSaka 自己发出的消息,供 WebUI 实时展示。"""
try:
asyncio.create_task(
emit_message_sent(
session_id=self.session_id,
speaker_name=speaker_name,
content=(message.processed_plain_text or "").strip(),
message_id=message.message_id,
timestamp=message.timestamp.timestamp(),
source_kind=source_kind,
)
)
except RuntimeError as exc:
logger.debug(f"{self.log_prefix} 广播已发送消息到监控面板失败: {exc}")
async def register_message(self, message: SessionMessage) -> None:
"""缓存一条新消息并唤醒主循环。"""
if self._running:
@@ -914,9 +938,6 @@ class MaisakaHeartFlowChatting:
def _schedule_message_turn(self) -> None:
"""为当前待处理消息安排一次内部 turn。"""
if self._agent_state == self._STATE_WAIT:
return
if not self._has_pending_messages() or self._message_turn_scheduled:
return
@@ -999,51 +1020,9 @@ class MaisakaHeartFlowChatting:
self._message_debounce_required = False
def _enter_wait_state(self, seconds: Optional[float] = None, tool_call_id: Optional[str] = None) -> None:
"""切换到等待状态。"""
self._agent_state = self._STATE_WAIT
self._pending_wait_tool_call_id = tool_call_id
self._message_turn_scheduled = False
self._cancel_deferred_message_turn_task()
self._cancel_wait_timeout_task()
if seconds is not None:
self._wait_timeout_task = asyncio.create_task(
self._schedule_wait_timeout(seconds=seconds, tool_call_id=tool_call_id)
)
def _enter_stop_state(self) -> None:
"""切换到停止状态。"""
self._agent_state = self._STATE_STOP
self._pending_wait_tool_call_id = None
self._cancel_wait_timeout_task()
def _cancel_wait_timeout_task(self) -> None:
"""取消当前 wait 对应的超时任务。"""
if self._wait_timeout_task is None:
return
self._wait_timeout_task.cancel()
self._wait_timeout_task = None
async def _schedule_wait_timeout(self, seconds: float, tool_call_id: Optional[str]) -> None:
"""在 wait 到期后向内部循环投递 timeout 触发。"""
try:
if seconds > 0:
await asyncio.sleep(seconds)
if not self._running:
return
if self._agent_state != self._STATE_WAIT:
return
if self._pending_wait_tool_call_id != tool_call_id:
return
logger.debug(f"{self.log_prefix} Maisaka 等待已超时")
self._agent_state = self._STATE_RUNNING
await self._internal_turn_queue.put("timeout")
except asyncio.CancelledError:
return
finally:
if self._wait_timeout_task is not None and self._pending_wait_tool_call_id == tool_call_id:
self._wait_timeout_task = None
async def _trigger_batch_learning(self, messages: list[SessionMessage]) -> None:
"""按同一批消息触发表达方式和黑话学习。"""

View File

@@ -1,5 +1,5 @@
from datetime import datetime
from typing import Optional, Union
from typing import List, Optional, Union
import hashlib
import json
@@ -506,7 +506,14 @@ class Person:
logger.error(f"同步用户 {self.person_id} 信息到数据库时出错: {e}")
async def store_person_memory_from_answer(person_name: str, memory_content: str, chat_id: str) -> None:
async def store_person_memory_from_answer(
person_name: str,
memory_content: str,
chat_id: str,
*,
evidence_source: str = "user_supported",
evidence_message_ids: Optional[List[str]] = None,
) -> None:
"""将人物事实写入长期记忆系统。
Args:
@@ -569,6 +576,8 @@ async def store_person_memory_from_answer(person_name: str, memory_content: str,
"person_id": person_id,
"person_name": participant_name,
"writeback_source": "memory_flow_service",
"evidence_source": str(evidence_source or "user_supported"),
"evidence_message_ids": evidence_message_ids or [],
},
respect_filter=True,
user_id=session_user_id,

View File

@@ -517,6 +517,8 @@ class PluginRunner:
)
if resp.error:
raise RuntimeError(resp.error.get("message", "能力调用失败"))
if normalized_method == "cap.call" and isinstance(resp.payload, dict) and "result" in resp.payload:
return resp.payload.get("result")
return resp.payload
ctx = PluginContext(plugin_id=plugin_id, rpc_call=_rpc_call)

View File

@@ -84,7 +84,12 @@ class PersonFactWritebackService:
if target_person is None or not target_person.is_known:
return
facts = await self._extract_facts(target_person, reply_text)
user_evidence_messages = self._collect_user_evidence_messages(message, target_person)
if not user_evidence_messages:
return
user_evidence_text = self._format_user_evidence(user_evidence_messages)
facts = await self._extract_facts(target_person, reply_text, user_evidence_text)
if not facts:
return
@@ -104,8 +109,19 @@ class PersonFactWritebackService:
if not person_name:
return
evidence_message_ids = [
str(getattr(item, "message_id", "") or "").strip()
for item in user_evidence_messages
if str(getattr(item, "message_id", "") or "").strip()
]
for fact in facts:
await store_person_memory_from_answer(person_name, fact, session_id)
await store_person_memory_from_answer(
person_name,
fact,
session_id,
evidence_source="user_supported",
evidence_message_ids=evidence_message_ids,
)
def _resolve_target_person(self, message: Any) -> Optional[Person]:
session = getattr(message, "session", None)
@@ -140,22 +156,110 @@ class PersonFactWritebackService:
person = Person(person_id=person_id)
return person if person.is_known else None
async def _extract_facts(self, person: Person, reply_text: str) -> List[str]:
def _collect_user_evidence_messages(self, message: Any, person: Person) -> List[Any]:
session = getattr(message, "session", None)
session_id = str(
getattr(message, "session_id", "")
or getattr(session, "session_id", "")
or ""
).strip()
if not session_id:
return []
evidence: List[Any] = []
seen_ids = set()
reply_to = str(getattr(message, "reply_to", "") or "").strip()
if reply_to:
try:
replies = find_messages(message_id=reply_to, limit=1)
except Exception as exc:
logger.debug("查询人物事实 reply_to 证据失败: %s", exc)
replies = []
evidence.extend(self._filter_target_user_messages(replies, person, seen_ids))
if evidence:
return evidence[:3]
timestamp = self._extract_message_timestamp(message)
try:
candidates = find_messages(
session_id=session_id,
before_time=timestamp,
limit=6,
limit_mode="latest",
filter_bot=True,
)
except Exception as exc:
logger.debug("查询人物事实近期用户证据失败: %s", exc)
return []
return self._filter_target_user_messages(candidates, person, seen_ids)[:3]
@staticmethod
def _extract_message_timestamp(message: Any) -> float | None:
raw_timestamp = getattr(message, "timestamp", None)
if hasattr(raw_timestamp, "timestamp") and callable(raw_timestamp.timestamp):
try:
return float(raw_timestamp.timestamp())
except Exception:
return None
if isinstance(raw_timestamp, (int, float)):
return float(raw_timestamp)
return None
@staticmethod
def _filter_target_user_messages(messages: List[Any], person: Person, seen_ids: set) -> List[Any]:
filtered: List[Any] = []
target_person_id = str(getattr(person, "person_id", "") or "").strip()
for item in messages:
platform = str(getattr(item, "platform", "") or "").strip()
user_info = getattr(getattr(item, "message_info", None), "user_info", None)
user_id = str(getattr(user_info, "user_id", "") or getattr(item, "user_id", "") or "").strip()
if not platform or not user_id or is_bot_self(platform, user_id):
continue
if target_person_id and get_person_id(platform, user_id) != target_person_id:
continue
text = str(getattr(item, "processed_plain_text", "") or "").strip()
if not text:
continue
message_id = str(getattr(item, "message_id", "") or "").strip()
dedup_key = message_id or f"{platform}:{user_id}:{text}"
if dedup_key in seen_ids:
continue
seen_ids.add(dedup_key)
filtered.append(item)
return filtered
@staticmethod
def _format_user_evidence(messages: List[Any]) -> str:
lines: List[str] = []
for item in messages[:3]:
text = str(getattr(item, "processed_plain_text", "") or "").strip()
if text:
lines.append(f"- {text}")
return "\n".join(lines)
async def _extract_facts(self, person: Person, reply_text: str, user_evidence_text: str) -> List[str]:
person_name = str(getattr(person, "person_name", "") or getattr(person, "nickname", "") or person.person_id)
prompt = f"""你要从一条机器人刚刚发送的回复中,提取“关于{person_name}的稳定事实”。
prompt = f"""你要从用户原始发言中提取“关于{person_name}的稳定事实”。
目标人物:{person_name}
用户原始发言证据:
{user_evidence_text}
机器人回复:
{reply_text}
请只提取满足以下条件的事实:
1. 明确是关于目标人物本人的信息
2. 具有相对稳定性,可以作为长期记忆保存
3. 用简洁中文陈述句表达
4. 如果回复是在直接对目标人物说话,出现“你/你的/你自己”时,默认都指目标人物,请先改写成关于目标人物的第三人称事实再输出
1. 必须能被“用户原始发言证据”直接支持,不能只来自机器人回复
2. 明确是关于目标人物本人的信息
3. 具有相对稳定性,可以作为长期记忆保存
4. 用简洁中文陈述句表达
5. 如果用户原始发言中出现“我/我的/自己”,默认指目标人物,请先改写成关于目标人物的第三人称事实再输出。
不要提取:
- 机器人的情绪、计划、临时动作、客套话
- 仅由机器人提出的建议、猜测、玩笑、回忆或承诺
- 只适用于当前时刻的短期安排
- 不确定、猜测、反问
- 与目标人物无关的信息

View File

@@ -37,6 +37,7 @@ class ReasoningPromptListResponse(BaseModel):
page_size: int
stages: list[str] = Field(default_factory=list)
sessions: list[str] = Field(default_factory=list)
selected_session: str = ""
class ReasoningPromptContentResponse(BaseModel):
@@ -76,15 +77,54 @@ def _relative_posix_path(path: Path) -> str:
return path.relative_to(PROMPT_LOG_ROOT).as_posix()
def _collect_prompt_files() -> tuple[list[ReasoningPromptFile], list[str], list[str]]:
def _is_safe_name(name: str) -> bool:
path = Path(name)
return bool(name) and not path.is_absolute() and ".." not in path.parts and len(path.parts) == 1
def _list_stage_names() -> list[str]:
if not PROMPT_LOG_ROOT.is_dir():
return [], [], []
return []
return sorted(path.name for path in PROMPT_LOG_ROOT.iterdir() if path.is_dir() and _is_safe_name(path.name))
def _resolve_stage_name(stage: str) -> str:
normalized_stage = str(stage or "").strip()
if not normalized_stage or normalized_stage == "all":
return "planner"
if not _is_safe_name(normalized_stage):
raise HTTPException(status_code=400, detail="阶段名称不合法")
return normalized_stage
def _list_session_names(stage: str) -> list[str]:
stage_dir = PROMPT_LOG_ROOT / stage
if not stage_dir.is_dir():
return []
session_dirs = [path for path in stage_dir.iterdir() if path.is_dir() and _is_safe_name(path.name)]
session_dirs.sort(key=lambda path: path.stat().st_mtime, reverse=True)
return [path.name for path in session_dirs]
def _resolve_session_name(session: str, sessions: list[str]) -> str:
normalized_session = str(session or "").strip()
if not normalized_session or normalized_session in {"all", "auto"}:
return sessions[0] if sessions else ""
if not _is_safe_name(normalized_session):
raise HTTPException(status_code=400, detail="会话名称不合法")
return normalized_session if normalized_session in sessions else ""
def _collect_prompt_files(stage: str, session: str) -> list[ReasoningPromptFile]:
session_dir = PROMPT_LOG_ROOT / stage / session
if not session or not session_dir.is_dir():
return []
records: dict[tuple[str, str, str], dict[str, object]] = {}
stages: set[str] = set()
sessions: set[str] = set()
for file_path in PROMPT_LOG_ROOT.rglob("*"):
for file_path in session_dir.iterdir():
if not file_path.is_file() or file_path.suffix.lower() not in ALLOWED_SUFFIXES:
continue
@@ -97,17 +137,15 @@ def _collect_prompt_files() -> tuple[list[ReasoningPromptFile], list[str], list[
if len(parts) < 3:
continue
stage, session_id = parts[0], parts[1]
stage_name, session_id = parts[0], parts[1]
stem = file_path.stem
key = (stage, session_id, stem)
key = (stage_name, session_id, stem)
stat = file_path.stat()
stages.add(stage)
sessions.add(session_id)
record = records.setdefault(
key,
{
"stage": stage,
"stage": stage_name,
"session_id": session_id,
"stem": stem,
"timestamp": int(stem) if stem.isdigit() else None,
@@ -127,26 +165,26 @@ def _collect_prompt_files() -> tuple[list[ReasoningPromptFile], list[str], list[
items = [ReasoningPromptFile(**record) for record in records.values()]
items.sort(key=lambda item: (item.modified_at, item.timestamp or 0), reverse=True)
return items, sorted(stages), sorted(sessions)
return items
@router.get("/files", response_model=ReasoningPromptListResponse)
async def list_reasoning_prompt_files(
stage: str = Query("all"),
session: str = Query("all"),
stage: str = Query("planner"),
session: str = Query("auto"),
search: str = Query(""),
page: int = Query(1, ge=1),
page_size: int = Query(50, ge=10, le=200),
):
"""列出 logs/maisaka_prompt 下的推理过程日志。"""
items, stages, sessions = _collect_prompt_files()
stages = _list_stage_names()
selected_stage = _resolve_stage_name(stage)
sessions = _list_session_names(selected_stage)
selected_session = _resolve_session_name(session, sessions)
items = _collect_prompt_files(selected_stage, selected_session)
normalized_search = search.strip().lower()
if stage != "all":
items = [item for item in items if item.stage == stage]
if session != "all":
items = [item for item in items if item.session_id == session]
if normalized_search:
items = [
item
@@ -167,6 +205,7 @@ async def list_reasoning_prompt_files(
page_size=page_size,
stages=stages,
sessions=sessions,
selected_session=selected_session,
)

View File

@@ -5,14 +5,19 @@
"""
from datetime import datetime
from typing import Optional
from pathlib import Path
from typing import Literal, Optional
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel
from pydantic import BaseModel, Field
from sqlalchemy import func, inspect, text
from sqlmodel import col, select
import os
import time
from src.common.database.database import engine, get_db_session
from src.common.database.database_model import Images, ImageType
from src.common.logger import get_logger
from src.config.config import MMC_VERSION
from src.webui.dashboard_update import (
@@ -27,6 +32,14 @@ router = APIRouter(prefix="/system", tags=["system"], dependencies=[Depends(requ
logger = get_logger("webui_system")
_start_time = time.time()
_PROJECT_ROOT = Path(__file__).resolve().parents[3]
_DATA_DIR = _PROJECT_ROOT / "data"
_IMAGE_DIR = _DATA_DIR / "images"
_EMOJI_DIR = _DATA_DIR / "emoji"
_EMOJI_THUMBNAIL_DIR = _DATA_DIR / "emoji_thumbnails"
_LOG_DIR = _PROJECT_ROOT / "logs"
_DATABASE_FILE = _DATA_DIR / "MaiBot.db"
_DATABASE_AUXILIARY_SUFFIXES = ("-wal", "-shm")
class RestartResponse(BaseModel):
@@ -56,6 +69,211 @@ class DashboardVersionResponse(BaseModel):
pypi_url: str = PYPI_PROJECT_URL
class CacheDirectoryStats(BaseModel):
"""本地缓存目录统计。"""
key: str
label: str
path: str
exists: bool
file_count: int
total_size: int
db_records: int = 0
class DatabaseFileStats(BaseModel):
"""数据库文件统计。"""
path: str
exists: bool
size: int
class DatabaseTableStats(BaseModel):
"""数据库表统计。"""
name: str
rows: int
class DatabaseStorageStats(BaseModel):
"""数据库存储统计。"""
files: list[DatabaseFileStats]
tables: list[DatabaseTableStats]
total_size: int
class LocalCacheStatsResponse(BaseModel):
"""本地缓存统计响应。"""
directories: list[CacheDirectoryStats]
database: DatabaseStorageStats
class LocalCacheCleanupRequest(BaseModel):
"""本地缓存清理请求。"""
target: Literal["images", "emoji", "log_files", "database_logs"]
tables: list[Literal["llm_usage", "tool_records", "mai_messages"]] = Field(default_factory=list)
class LocalCacheCleanupResponse(BaseModel):
"""本地缓存清理响应。"""
success: bool
message: str
target: str
removed_files: int = 0
removed_bytes: int = 0
removed_records: int = 0
def _parse_version_parts(version: str | None) -> Optional[list[int]]:
"""将版本号转换为可比较的整数列表。"""
if not version:
return None
parts: list[int] = []
for raw_part in version.split("."):
if not raw_part.isdigit():
return None
parts.append(int(raw_part))
return parts
def _is_newer_version(latest: str | None, current: str | None) -> bool:
"""判断 latest 是否新于 current。"""
latest_parts = _parse_version_parts(latest)
current_parts = _parse_version_parts(current)
if latest_parts is None or current_parts is None:
return False
max_len = max(len(latest_parts), len(current_parts))
latest_parts.extend([0] * (max_len - len(latest_parts)))
current_parts.extend([0] * (max_len - len(current_parts)))
return latest_parts > current_parts
def _iter_files(directory: Path) -> list[Path]:
if not directory.exists() or not directory.is_dir():
return []
return [path for path in directory.rglob("*") if path.is_file()]
def _get_directory_size(directory: Path) -> tuple[int, int]:
files = _iter_files(directory)
total_size = 0
for file_path in files:
try:
total_size += file_path.stat().st_size
except OSError:
logger.warning(f"读取缓存文件大小失败: {file_path}")
return len(files), total_size
def _get_image_record_count(image_type: ImageType) -> int:
with get_db_session() as session:
statement = select(func.count()).select_from(Images).where(col(Images.image_type) == image_type)
return int(session.exec(statement).one())
def _build_directory_stats(key: str, label: str, path: Path, image_type: ImageType | None = None) -> CacheDirectoryStats:
file_count, total_size = _get_directory_size(path)
return CacheDirectoryStats(
key=key,
label=label,
path=str(path),
exists=path.exists(),
file_count=file_count,
total_size=total_size,
db_records=_get_image_record_count(image_type) if image_type is not None else 0,
)
def _get_database_files() -> list[DatabaseFileStats]:
db_paths = [_DATABASE_FILE, *[Path(f"{_DATABASE_FILE}{suffix}") for suffix in _DATABASE_AUXILIARY_SUFFIXES]]
result: list[DatabaseFileStats] = []
for db_path in db_paths:
exists = db_path.exists()
size = 0
if exists:
try:
size = db_path.stat().st_size
except OSError:
logger.warning(f"读取数据库文件大小失败: {db_path}")
result.append(DatabaseFileStats(path=str(db_path), exists=exists, size=size))
return result
def _get_database_table_stats() -> list[DatabaseTableStats]:
inspector = inspect(engine)
table_stats: list[DatabaseTableStats] = []
with engine.connect() as connection:
for table_name in inspector.get_table_names():
quoted_table_name = table_name.replace('"', '""')
rows = connection.execute(text(f'SELECT COUNT(*) FROM "{quoted_table_name}"')).scalar_one()
table_stats.append(DatabaseTableStats(name=table_name, rows=int(rows)))
return sorted(table_stats, key=lambda item: item.name)
def _build_database_stats() -> DatabaseStorageStats:
files = _get_database_files()
return DatabaseStorageStats(
files=files,
tables=_get_database_table_stats(),
total_size=sum(file.size for file in files),
)
def _remove_directory_contents(directory: Path) -> tuple[int, int]:
if not directory.exists() or not directory.is_dir():
return 0, 0
removed_files = 0
removed_bytes = 0
for file_path in _iter_files(directory):
try:
file_size = file_path.stat().st_size
file_path.unlink()
removed_files += 1
removed_bytes += file_size
except OSError as exc:
logger.warning(f"删除缓存文件失败: {file_path}, error={exc}")
for child in sorted(directory.rglob("*"), key=lambda item: len(item.parts), reverse=True):
if child.is_dir():
try:
child.rmdir()
except OSError:
pass
return removed_files, removed_bytes
def _delete_image_records(image_type: ImageType) -> int:
removed_records = 0
with get_db_session() as session:
statement = select(Images).where(col(Images.image_type) == image_type)
for record in session.exec(statement).all():
session.delete(record)
removed_records += 1
return removed_records
def _delete_log_records(table_names: list[str]) -> int:
allowed_tables = {"llm_usage", "tool_records", "mai_messages"}
invalid_tables = set(table_names) - allowed_tables
if invalid_tables:
raise ValueError(f"不支持清理这些表: {', '.join(sorted(invalid_tables))}")
removed_records = 0
with engine.begin() as connection:
for table_name in table_names:
quoted_table_name = table_name.replace('"', '""')
result = connection.execute(text(f'DELETE FROM "{quoted_table_name}"'))
removed_records += int(result.rowcount or 0)
return removed_records
@router.post("/restart", response_model=RestartResponse)
async def restart_maibot():
"""
@@ -120,6 +338,80 @@ async def get_dashboard_version(current_version: Optional[str] = None):
)
@router.get("/local-cache", response_model=LocalCacheStatsResponse)
async def get_local_cache_stats():
"""获取 data 目录下图片、表情包和数据库的本地存储情况。"""
try:
return LocalCacheStatsResponse(
directories=[
_build_directory_stats("images", "图片缓存", _IMAGE_DIR, ImageType.IMAGE),
_build_directory_stats("emoji", "表情包缓存", _EMOJI_DIR, ImageType.EMOJI),
_build_directory_stats("emoji_thumbnails", "表情包缩略图缓存", _EMOJI_THUMBNAIL_DIR),
_build_directory_stats("logs", "日志文件", _LOG_DIR),
],
database=_build_database_stats(),
)
except Exception as e:
logger.exception(f"获取本地缓存统计失败: {e}")
raise HTTPException(status_code=500, detail=f"获取本地缓存统计失败: {str(e)}") from e
@router.post("/local-cache/cleanup", response_model=LocalCacheCleanupResponse)
async def cleanup_local_cache(request: LocalCacheCleanupRequest):
"""清理指定的本地缓存区域。"""
try:
if request.target == "images":
removed_files, removed_bytes = _remove_directory_contents(_IMAGE_DIR)
removed_records = _delete_image_records(ImageType.IMAGE)
return LocalCacheCleanupResponse(
success=True,
message="图片缓存已清理",
target=request.target,
removed_files=removed_files,
removed_bytes=removed_bytes,
removed_records=removed_records,
)
if request.target == "emoji":
emoji_files, emoji_bytes = _remove_directory_contents(_EMOJI_DIR)
thumbnail_files, thumbnail_bytes = _remove_directory_contents(_EMOJI_THUMBNAIL_DIR)
removed_records = _delete_image_records(ImageType.EMOJI)
return LocalCacheCleanupResponse(
success=True,
message="表情包缓存已清理",
target=request.target,
removed_files=emoji_files + thumbnail_files,
removed_bytes=emoji_bytes + thumbnail_bytes,
removed_records=removed_records,
)
if request.target == "log_files":
removed_files, removed_bytes = _remove_directory_contents(_LOG_DIR)
return LocalCacheCleanupResponse(
success=True,
message="日志文件已清理",
target=request.target,
removed_files=removed_files,
removed_bytes=removed_bytes,
)
if not request.tables:
raise HTTPException(status_code=400, detail="请至少选择一个要清理的数据库表")
removed_records = _delete_log_records(list(request.tables))
return LocalCacheCleanupResponse(
success=True,
message="数据库日志记录已清理",
target=request.target,
removed_records=removed_records,
)
except HTTPException:
raise
except Exception as e:
logger.exception(f"清理本地缓存失败: {e}")
raise HTTPException(status_code=500, detail=f"清理本地缓存失败: {str(e)}") from e
# 可选:添加更多系统控制功能

View File

@@ -159,6 +159,15 @@ async def _handle_maisaka_monitor_subscribe(connection_id: str, request_id: Opti
ok=True,
data={"domain": "maisaka_monitor", "topic": "main"},
)
from src.maisaka.display.stage_status_board import get_stage_status_snapshot
await websocket_manager.send_event(
connection_id,
domain="maisaka_monitor",
event="stage.snapshot",
topic="main",
data={"entries": get_stage_status_snapshot(), "timestamp": time.time()},
)
async def _handle_subscribe(connection_id: str, message: Dict[str, Any]) -> None: