Version: 0.9.75.dev.260505

后端:
1.收口阶段 6 agent 结构迁移,将 newAgent 内核与 agentsvc 编排层迁入 services/agent
- 切换 Agent 启动装配与 HTTP handler 直连 agent sv,移除旧 service agent bridge
- 补齐 Agent 对 memory、task、task-class、schedule 的 RPC 适配与契约字段
- 扩展 schedule、task、task-class RPC/contract 支撑 Agent 查询、写入与 provider 切流
- 更新迁移文档、README 与相关注释,明确 agent 当前切流点和剩余 memory 迁移面
This commit is contained in:
Losita
2026-05-05 16:00:57 +08:00
parent e1819c5653
commit d7184b776b
174 changed files with 2189 additions and 1236 deletions

View File

@@ -0,0 +1,495 @@
package sv
import (
"context"
"encoding/json"
"errors"
"log"
"strconv"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/conv"
"github.com/LoveLosita/smartflow/backend/dao"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/pkg"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
)
type AgentService struct {
llmService *llmservice.Service
repo *dao.AgentDAO
taskRepo *dao.TaskDAO
cacheDAO *dao.CacheDAO
agentCache *dao.AgentCache
activeScheduleDAO *dao.ActiveScheduleDAO
activeScheduleSessionDAO *dao.ActiveScheduleSessionDAO
eventPublisher outboxinfra.EventPublisher
// ── 排程计划依赖(函数注入,避免 service 包循环依赖)──
// SmartPlanningMultiRawFunc 是可选注入能力:
// 1. 负责多任务类粗排;
// 2. 当前主链路主要依赖 HybridScheduleWithPlanMultiFunc可不强制使用。
SmartPlanningMultiRawFunc func(ctx context.Context, userID int, taskClassIDs []int) ([]model.UserWeekSchedule, []model.TaskClassItem, error)
// HybridScheduleWithPlanMultiFunc 是排程链路核心依赖:
// 1. 负责把“多任务类粗排结果 + 既有日程”合并成 HybridEntries
// 2. daily/weekly ReAct 全部基于这个结果继续优化。
HybridScheduleWithPlanMultiFunc func(ctx context.Context, userID int, taskClassIDs []int) ([]model.HybridScheduleEntry, []model.TaskClassItem, error)
// ResolvePlanningWindowFunc 负责把 task_class_ids 解析成”全局排程窗口”的相对周/天边界。
//
// 作用:
// 1. 给周级 Move 增加硬边界,避免首尾不足一周时移出有效日期范围;
// 2. 该函数只做”窗口解析”,不负责粗排与混排计算。
ResolvePlanningWindowFunc func(ctx context.Context, userID int, taskClassIDs []int) (startWeek, startDay, endWeek, endDay int, err error)
// ── 任务紧急性提升依赖(函数注入,避免 service 包循环依赖)──
// GetTasksWithUrgencyPromotionFunc 读取用户任务并应用读时紧急性提升 + 异步落库触发。
// 未注入时QueryTasksForTool 回退到旧逻辑(纯内存提升,不持久化)。
GetTasksWithUrgencyPromotionFunc func(ctx context.Context, userID int) ([]model.Task, error)
// ── agent 依赖(由 cmd/start.go 通过 Set* 方法注入)──
toolRegistry *agenttools.ToolRegistry
scheduleProvider agentmodel.ScheduleStateProvider
agentStateStore agentmodel.AgentStateStore
compactionStore agentmodel.CompactionStore
quickTaskDeps agentmodel.QuickTaskDeps
memoryReader MemoryReader
memoryCfg memorymodel.Config
memoryObserver memoryobserve.Observer
memoryMetrics memoryobserve.MetricsRecorder
activeRerunFunc ActiveScheduleSessionRerunFunc
}
// NewAgentService 构造 AgentService。
// 这里通过依赖注入把“模型、仓储、缓存、异步持久化通道”统一交给服务层管理,
// 便于后续在单测中替换实现,或在启动流程中按环境切换配置。
func NewAgentService(
llmService *llmservice.Service,
repo *dao.AgentDAO,
taskRepo *dao.TaskDAO,
cacheDAO *dao.CacheDAO,
agentRedis *dao.AgentCache,
activeScheduleDAO *dao.ActiveScheduleDAO,
activeSessionDAO *dao.ActiveScheduleSessionDAO,
eventPublisher outboxinfra.EventPublisher,
) *AgentService {
// 全局注册一次 token 采集 callback
// 1. 只注册一次,避免重复处理;
// 2. 只有带 RequestTokenMeter 的请求上下文才会真正累加。
ensureTokenMeterCallbackRegistered()
return &AgentService{
llmService: llmService,
repo: repo,
taskRepo: taskRepo,
cacheDAO: cacheDAO,
agentCache: agentRedis,
activeScheduleDAO: activeScheduleDAO,
activeScheduleSessionDAO: activeSessionDAO,
eventPublisher: eventPublisher,
}
}
// normalizeConversationID 规范会话 ID。
// 规则:
// 1) 去除首尾空白;
// 2) 若为空则生成 UUID保证后续缓存/数据库操作始终有合法 chat_id。
func normalizeConversationID(chatID string) string {
trimmed := strings.TrimSpace(chatID)
if trimmed == "" {
return uuid.NewString()
}
return trimmed
}
// thinkingModeToBool 将前端传入的 thinking 模式转换为旧链路所需的 bool 值。
// 仅 "true" 返回 true其余"false"/"auto"/"")均返回 false。
func thinkingModeToBool(mode string) bool {
return strings.TrimSpace(strings.ToLower(mode)) == "true"
}
// pickChatModel 根据请求选择模型。
// 当前约定:
// - 旧链路已全面切到 agent graph这里仅作为 runNormalChatFlow 回退时的模型选择入口;
// - 统一返回 Pro 模型,旧 strategist 参数不再生效。
func (s *AgentService) pickChatModel(requestModel string) (*llmservice.Client, string) {
if s == nil || s.llmService == nil {
return nil, "pro"
}
return s.llmService.ProClient(), "pro"
}
// PersistChatHistory 是 Agent 聊天链路唯一的“消息持久化入口”。
//
// 职责边界:
// 1. 负责根据当前部署模式选择“异步 outbox”或“同步直写 DB”
// 2. 负责把统一 DTOChatHistoryPersistPayload交给下游基础设施
// 3. 不负责 Redis 上下文写入Redis 由调用方在链路中先行处理);
// 4. 不负责消费完成回调(异步模式下由 outbox 消费者负责最终落库)。
func (s *AgentService) PersistChatHistory(ctx context.Context, payload model.ChatHistoryPersistPayload) error {
// 1. 未注入事件发布器时(例如本地极简环境),直接同步写 DB。
// 这样可以保证功能不依赖 Kafka 也能跑通。
if s.eventPublisher == nil {
return s.repo.SaveChatHistory(
ctx,
payload.UserID,
payload.ConversationID,
payload.Role,
payload.Message,
payload.ReasoningContent,
payload.ReasoningDurationSeconds,
payload.TokensConsumed,
"",
)
}
// 2. 已启用异步总线时,只发布“持久化请求事件”,不在请求路径阻塞 Kafka。
// 2.1 发布成功仅代表“事件安全入队”,实际落库由消费者异步完成。
return eventsvc.PublishChatHistoryPersistRequested(ctx, s.eventPublisher, payload)
}
// saveChatHistoryReliable 是历史兼容别名。
// 迁移策略:先保留旧方法名,避免同轮改动跨文件过大;后续可统一替换为 PersistChatHistory。
func (s *AgentService) saveChatHistoryReliable(ctx context.Context, payload model.ChatHistoryPersistPayload) error {
return s.PersistChatHistory(ctx, payload)
}
func mergeAgentReasoningText(parts ...string) string {
merged := make([]string, 0, len(parts))
for _, part := range parts {
text := strings.TrimSpace(part)
if text == "" {
continue
}
merged = append(merged, text)
}
return strings.Join(merged, "\n\n")
}
func readAgentExtraString(extra map[string]any, key string) string {
if len(extra) == 0 {
return ""
}
raw, ok := extra[key]
if !ok {
return ""
}
text, ok := raw.(string)
if !ok {
return ""
}
return strings.TrimSpace(text)
}
func readAgentExtraInt(extra map[string]any, key string) int {
if len(extra) == 0 {
return 0
}
raw, ok := extra[key]
if !ok {
return 0
}
// 1. 前端的历史消息 id 在本地态里可能是 string也可能是 number。
// 2. 重试链路只要这里解析失败,父消息 id 就会退化成 0后续写库自然会落成 NULL。
// 3. 因此这里统一做“宽松整型解析”,兼容 JSON number、前端字符串数字和常见整数类型。
value, ok := parseAgentLooseInt(raw)
if !ok || value <= 0 {
return 0
}
return value
}
func readAgentExtraBool(extra map[string]any, key string) bool {
if len(extra) == 0 {
return false
}
raw, ok := extra[key]
if !ok {
return false
}
switch v := raw.(type) {
case bool:
return v
case float64:
return v != 0
case string:
return strings.ToLower(strings.TrimSpace(v)) == "true"
}
return false
}
// readAgentExtraIntSlice 从 extra 中提取 []int。
// 支持 JSON 数组格式([]any每个元素为 float64/int
func readAgentExtraIntSlice(extra map[string]any, key string) []int {
if len(extra) == 0 {
return nil
}
raw, ok := extra[key]
if !ok || raw == nil {
return nil
}
arr, ok := raw.([]any)
if !ok {
return nil
}
result := make([]int, 0, len(arr))
for _, item := range arr {
if v, ok := parseAgentLooseInt(item); ok && v > 0 {
result = append(result, v)
}
}
return result
}
// parseAgentLooseInt 负责把 extra 中的”弱类型数字”归一成 int。
//
// 职责边界:
// 1. 负责兼容前端 JSON 解码后的常见数值类型,以及字符串形式的数字。
// 2. 不负责业务语义校验;例如是否必须大于 0由调用方自行决定。
// 3. 解析失败时返回 ok=false调用方可按各自场景走兜底逻辑。
func parseAgentLooseInt(raw any) (value int, ok bool) {
switch v := raw.(type) {
case int:
return v, true
case int32:
return int(v), true
case int64:
return int(v), true
case float64:
return int(v), true
case json.Number:
if parsed, err := v.Int64(); err == nil {
return int(parsed), true
}
if parsed, err := v.Float64(); err == nil {
return int(parsed), true
}
return 0, false
case string:
trimmed := strings.TrimSpace(v)
if trimmed == "" {
return 0, false
}
parsed, err := strconv.Atoi(trimmed)
if err != nil {
return 0, false
}
return parsed, true
default:
return 0, false
}
}
// pushErrNonBlocking 向错误通道“尽力投递”错误。
// 目的:
// 1) 避免 goroutine 在 errChan 满时被阻塞导致泄漏;
// 2) 保证主业务协程不因“错误上报拥塞”卡死。
func pushErrNonBlocking(errChan chan error, err error) {
select {
case errChan <- err:
default:
log.Printf("错误通道已满,丢弃错误: %v", err)
}
}
// runNormalChatFlow 执行普通流式聊天链路(非随口记)。
// 该函数被两处复用:
// 1) 用户输入本就不是随口记;
// 2) 开启随口记进度推送后,最终判定“非随口记”时回落到普通聊天。
func (s *AgentService) runNormalChatFlow(
ctx context.Context,
selectedModel *llmservice.Client,
resolvedModelName string,
userMessage string,
userPersisted bool,
assistantReasoningPrefix string,
assistantReasoningStartedAt *time.Time,
ifThinking bool,
userID int,
chatID string,
traceID string,
requestStart time.Time,
outChan chan<- string,
errChan chan error,
) {
// 1. 先尝试从 Redis 读历史,命中可直接进入模型推理,减少 DB 压力。
chatHistory, err := s.agentCache.GetHistory(ctx, chatID)
if err != nil {
pushErrNonBlocking(errChan, err)
return
}
cacheMiss := false
if chatHistory == nil {
// 2. 缓存未命中时回源 DB并转换为 Eino message 格式。
cacheMiss = true
histories, hisErr := s.repo.GetUserChatHistories(ctx, userID, pkg.HistoryFetchLimitByModel(resolvedModelName), chatID)
if hisErr != nil {
pushErrNonBlocking(errChan, hisErr)
return
}
chatHistory = conv.ToEinoMessages(histories)
}
// 3. 计算本次请求可用的历史 token 预算,并执行历史裁剪。
// 这样可以在上下文增长时稳定控制模型窗口,避免超长上下文引发报错或高延迟。
historyBudget := pkg.HistoryTokenBudgetByModel(resolvedModelName, agentprompt.SystemPrompt, userMessage)
trimmedHistory, totalHistoryTokens, keptHistoryTokens, droppedCount := pkg.TrimHistoryByTokenBudget(chatHistory, historyBudget)
chatHistory = trimmedHistory
// 4. 根据裁剪后历史长度更新 Redis 会话窗口配置,并主动执行窗口收敛。
targetWindow := pkg.CalcSessionWindowSize(len(chatHistory))
if err = s.agentCache.SetSessionWindowSize(ctx, chatID, targetWindow); err != nil {
log.Printf("设置历史窗口失败 chat=%s: %v", chatID, err)
}
if err = s.agentCache.EnforceHistoryWindow(ctx, chatID); err != nil {
log.Printf("执行历史窗口裁剪失败 chat=%s: %v", chatID, err)
}
if droppedCount > 0 {
log.Printf("历史裁剪: chat=%s total_tokens=%d kept_tokens=%d dropped=%d budget=%d target_window=%d",
chatID, totalHistoryTokens, keptHistoryTokens, droppedCount, historyBudget, targetWindow)
}
if cacheMiss {
// 5. 回源后把历史回填到 Redis减少下一次请求的冷启动成本。
if err = s.agentCache.BackfillHistory(ctx, chatID, chatHistory); err != nil {
pushErrNonBlocking(errChan, err)
return
}
}
// 6.0. 没有可用模型时,直接中止普通聊天,避免写入半截用户消息后没有后续回复。
if selectedModel == nil {
pushErrNonBlocking(errChan, errors.New("llm client is not ready"))
return
}
// 6. 执行真正的流式聊天。
// fullText 用于后续写 Redis/持久化outChan 用于把流片段实时推给前端。
fullText, _, reasoningDurationSeconds, streamUsage, streamErr := s.streamChatFallback(ctx, selectedModel, resolvedModelName, userMessage, ifThinking, chatHistory, outChan, assistantReasoningStartedAt, userID, chatID)
if streamErr != nil {
pushErrNonBlocking(errChan, streamErr)
return
}
// 6.1 流式 usage 并入请求级 token 统计器:
// 6.1.1 route/quicknote/taskquery 等 Generate 调用由 callback 自动累加;
// 6.1.2 主对话 Stream usage 在这里手动补齐。
addSchemaUsageIntoRequest(ctx, streamUsage)
requestTokenSnapshot := snapshotRequestTokenMeter(ctx)
requestTotalTokens := requestTokenSnapshot.TotalTokens
if requestTotalTokens <= 0 && streamUsage != nil {
// 兜底:若 callback/meter 未生效,至少使用流式 usage 保底记账。
requestTotalTokens = normalizeUsageTotal(streamUsage.TotalTokens, streamUsage.PromptTokens, streamUsage.CompletionTokens)
}
// 7. 后置持久化(用户消息):
// 7.1 先写 Redis保证“最新会话上下文”可立即用于下一轮推理
// 7.2 再走可靠持久化入口outbox 或同步 DB
if !userPersisted {
userMsg := &schema.Message{Role: schema.User, Content: userMessage}
if err = s.agentCache.PushMessage(ctx, chatID, userMsg); err != nil {
log.Printf("写入用户消息到 Redis 失败: %v", err)
}
if err = s.PersistChatHistory(ctx, model.ChatHistoryPersistPayload{
UserID: userID,
ConversationID: chatID,
Role: "user",
Message: userMessage,
ReasoningContent: "",
ReasoningDurationSeconds: 0,
// 口径 B用户消息固定记 0本轮总 token 统一记在助手消息。
TokensConsumed: 0,
}); err != nil {
pushErrNonBlocking(errChan, err)
return
}
if _, timelineErr := s.appendConversationTimelineEvent(
ctx,
userID,
chatID,
model.AgentTimelineKindUserText,
"user",
userMessage,
nil,
0,
); timelineErr != nil {
pushErrNonBlocking(errChan, timelineErr)
return
}
}
// 普通聊天链路也需要把助手回复写入 Redis
// 否则会出现“数据库有助手消息,但 Redis 最新会话只有用户消息”的口径不一致。
// 8. 后置持久化(助手消息):
// 8.1 先写 Redis保证下一轮上下文可见
// 8.2 再异步可靠落库,失败通过 errChan 回传给上层。
assistantMsg := &schema.Message{Role: schema.Assistant, Content: fullText}
if reasoningDurationSeconds > 0 {
assistantMsg.Extra = map[string]any{"reasoning_duration_seconds": reasoningDurationSeconds}
}
if err = s.agentCache.PushMessage(context.Background(), chatID, assistantMsg); err != nil {
log.Printf("写入助手消息到 Redis 失败: %v", err)
}
if saveErr := s.PersistChatHistory(context.Background(), model.ChatHistoryPersistPayload{
UserID: userID,
ConversationID: chatID,
Role: "assistant",
Message: fullText,
ReasoningContent: "",
ReasoningDurationSeconds: reasoningDurationSeconds,
// 口径B助手消息记录“本轮请求总 token”。
TokensConsumed: requestTotalTokens,
}); saveErr != nil {
pushErrNonBlocking(errChan, saveErr)
} else {
assistantTimelinePayload := map[string]any{}
if reasoningDurationSeconds > 0 {
assistantTimelinePayload["reasoning_duration_seconds"] = reasoningDurationSeconds
}
if _, timelineErr := s.appendConversationTimelineEvent(
context.Background(),
userID,
chatID,
model.AgentTimelineKindAssistantText,
"assistant",
fullText,
assistantTimelinePayload,
requestTotalTokens,
); timelineErr != nil {
pushErrNonBlocking(errChan, timelineErr)
}
}
// 9. 在主回复完成后异步尝试生成会话标题(仅首次、仅标题为空时生效)。
// 该步骤不影响当前请求返回时延,也不影响聊天主链路成功与否。
s.ensureConversationTitleAsync(userID, chatID)
}
func (s *AgentService) AgentChat(ctx context.Context, userMessage string, thinkingMode string, modelName string, userID int, chatID string, extra map[string]any) (<-chan string, <-chan error) {
requestStart := time.Now()
traceID := uuid.NewString()
outChan := make(chan string, 256)
errChan := make(chan error, 1)
go func() {
defer close(outChan)
s.runAgentGraph(ctx, userMessage, thinkingMode, modelName, userID, chatID, extra, traceID, requestStart, outChan, errChan)
}()
return outChan, errChan
}

View File

@@ -0,0 +1,388 @@
package sv
import (
"context"
"errors"
"log"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/model"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
"github.com/cloudwego/eino/schema"
)
// ActiveScheduleSessionRerunFunc 表示主动调度 session 被聊天入口接管后,如何同步推进 rerun。
//
// 职责边界:
// 1. 只负责把“当前 session + 用户回复”推进为新的主动调度结果;
// 2. 不负责决定 session 何时创建,也不负责通知投递;
// 3. 返回的结果只面向聊天入口的可见消息和 session 状态回写。
type ActiveScheduleSessionRerunFunc func(
ctx context.Context,
session *model.ActiveScheduleSessionSnapshot,
userMessage string,
traceID string,
requestStart time.Time,
) (*ActiveScheduleSessionRerunResult, error)
// ActiveScheduleSessionRerunResult 是主动调度 rerun 的最小返回结果。
//
// 职责边界:
// 1. 只承载聊天入口需要回写的可见消息、业务卡片和 session 状态;
// 2. 不直接暴露 DAO 行,也不承载 worker / notification 的副作用;
// 3. AssistantText 为空时,调用方可降级为使用卡片摘要。
type ActiveScheduleSessionRerunResult struct {
AssistantText string
BusinessCard *agentstream.StreamBusinessCardExtra
SessionState model.ActiveScheduleSessionState
SessionStatus string
PreviewID string
}
// SetActiveScheduleSessionRerunFunc 注入主动调度 rerun 入口。
func (s *AgentService) SetActiveScheduleSessionRerunFunc(fn ActiveScheduleSessionRerunFunc) {
s.activeRerunFunc = fn
}
// loadActiveScheduleSessionByConversation 尽量从缓存 + 数据库读取当前会话的主动调度 session。
//
// 步骤化说明:
// 1. 先读 Redis 热缓存,命中则直接返回;
// 2. 缓存未命中再回源数据库,避免把 session 状态逻辑绑死在缓存上;
// 3. 回源成功后尽力回填缓存,减少下一轮聊天入口的 DB 压力。
func (s *AgentService) loadActiveScheduleSessionByConversation(ctx context.Context, userID int, chatID string) (*model.ActiveScheduleSessionSnapshot, error) {
if s == nil || s.activeScheduleSessionDAO == nil {
return nil, nil
}
normalizedChatID := strings.TrimSpace(chatID)
if userID <= 0 || normalizedChatID == "" {
return nil, nil
}
if s.cacheDAO != nil {
cached, err := s.cacheDAO.GetActiveScheduleSessionFromConversationCache(ctx, userID, normalizedChatID)
if err != nil {
log.Printf("读取主动调度 session 缓存失败 user=%d chat=%s err=%v", userID, normalizedChatID, err)
} else if cached != nil {
return cached, nil
}
}
row, err := s.activeScheduleSessionDAO.GetActiveScheduleSessionByConversationID(ctx, userID, normalizedChatID)
if err != nil || row == nil {
return nil, err
}
if s.cacheDAO != nil {
if cacheErr := s.cacheDAO.SetActiveScheduleSessionToCache(ctx, row); cacheErr != nil {
log.Printf("回填主动调度 session 缓存失败 user=%d chat=%s err=%v", userID, normalizedChatID, cacheErr)
}
}
return row, nil
}
// persistActiveScheduleSessionBestEffort 负责把主动调度 session 的最新状态同步回 MySQL 和 Redis。
//
// 职责边界:
// 1. MySQL 是最终真相,先写表再回填缓存;
// 2. 缓存失败只记日志,不影响主流程;
// 3. 调用方需要先把 snapshot 改成最终状态,再交给这里落盘。
func (s *AgentService) persistActiveScheduleSessionBestEffort(ctx context.Context, snapshot *model.ActiveScheduleSessionSnapshot) error {
if s == nil || s.activeScheduleSessionDAO == nil || snapshot == nil {
return nil
}
if strings.TrimSpace(snapshot.SessionID) == "" {
return errors.New("active schedule session_id 不能为空")
}
if err := s.activeScheduleSessionDAO.UpsertActiveScheduleSession(ctx, snapshot); err != nil {
return err
}
// 1. 重新读取一遍,拿到数据库侧最终落表后的标准快照,减少缓存和 DB 的口径漂移。
// 2. 如果重读失败,也不影响主链路返回,只要主表已成功写入即可。
normalized, err := s.activeScheduleSessionDAO.GetActiveScheduleSessionBySessionID(ctx, snapshot.SessionID)
if err == nil && normalized != nil {
snapshot = normalized
}
if s.cacheDAO != nil {
if cacheErr := s.cacheDAO.SetActiveScheduleSessionToCache(ctx, snapshot); cacheErr != nil {
log.Printf("回填主动调度 session 缓存失败 session=%s err=%v", snapshot.SessionID, cacheErr)
}
}
return nil
}
// persistActiveScheduleTriggerPreviewBestEffort 负责把 rerun 产生的新 preview_id 同步回 trigger。
//
// 职责边界:
// 1. 只维护 trigger -> preview 的审计指针,不修改 preview 内容,也不推进 confirm/apply 状态;
// 2. trigger_id 或 preview_id 为空时直接跳过,避免把不完整 rerun 结果写入触发记录;
// 3. DAO 未注入时保持迁移期兼容,调用方仍以 session 写回作为主流程。
func (s *AgentService) persistActiveScheduleTriggerPreviewBestEffort(ctx context.Context, triggerID string, previewID string) error {
if s == nil || s.activeScheduleDAO == nil {
return nil
}
normalizedTriggerID := strings.TrimSpace(triggerID)
normalizedPreviewID := strings.TrimSpace(previewID)
if normalizedTriggerID == "" || normalizedPreviewID == "" {
return nil
}
return s.activeScheduleDAO.UpdateTriggerFields(ctx, normalizedTriggerID, map[string]any{
"preview_id": &normalizedPreviewID,
"updated_at": time.Now(),
})
}
// handleActiveScheduleSessionChat 处理被主动调度 session 占管的聊天入口。
//
// 步骤化说明:
// 1. 先读 session判断当前 conversation 是否仍在 waiting_user_reply / rerunning 占管期;
// 2. 占管期间先把用户消息写入历史和时间线,保证会话内容不丢失;
// 3. waiting_user_reply 进入 rerunning并同步调用主动调度 rerun
// 4. rerunning 则只提示“正在重跑”,避免同一 conversation 被并发重复推进;
// 5. 终态或非占管态直接放行普通 agent。
func (s *AgentService) handleActiveScheduleSessionChat(
ctx context.Context,
userMessage string,
traceID string,
requestStart time.Time,
userID int,
chatID string,
resolvedModelName string,
outChan chan<- string,
errChan chan error,
) (bool, error) {
session, err := s.loadActiveScheduleSessionByConversation(ctx, userID, chatID)
if err != nil {
return false, err
}
if session == nil || !isActiveScheduleSessionBlockingStatus(session.Status) {
return false, nil
}
trimmedMessage := strings.TrimSpace(userMessage)
if trimmedMessage != "" {
// 1. 主动调度占管期间,用户每次回复仍然要进入正常会话历史。
// 2. 这样后续刷新聊天页时,用户可见消息、时间线和 session 状态不会彼此脱节。
if err := s.persistNewAgentConversationMessage(ctx, userID, chatID, schema.UserMessage(trimmedMessage), 0); err != nil {
return true, err
}
}
switch session.Status {
case model.ActiveScheduleSessionStatusWaitingUserReply:
if trimmedMessage == "" {
assistantText := strings.TrimSpace(session.State.PendingQuestion)
if assistantText == "" {
assistantText = "请先补充主动调度需要的关键信息。"
}
if err := s.persistNewAgentConversationMessage(ctx, userID, chatID, schema.AssistantMessage(assistantText, nil), 0); err != nil {
return true, err
}
emitActiveScheduleAssistantChunk(outChan, traceID, resolvedModelName, requestStart, assistantText, nil)
return true, nil
}
// 1. 收到用户补充信息后,先把 session 切成 rerunning避免并发请求继续按旧状态走普通聊天。
// 2. 这个阶段只是状态切换,不代表 graph 已经完成。
// 3. 这里必须使用 DB CAS 抢占 rerun 权限,避免两条补充消息同时读到 waiting_user_reply 后重复生成 preview。
switched, err := s.activeScheduleSessionDAO.TryTransitionActiveScheduleSessionStatusBySessionID(
ctx,
session.SessionID,
model.ActiveScheduleSessionStatusWaitingUserReply,
model.ActiveScheduleSessionStatusRerunning,
)
if err != nil {
return true, err
}
if !switched {
if err := s.respondActiveScheduleRerunning(ctx, userID, chatID, traceID, resolvedModelName, requestStart, outChan); err != nil {
return true, err
}
return true, nil
}
session.Status = model.ActiveScheduleSessionStatusRerunning
if s.cacheDAO != nil {
if cacheErr := s.cacheDAO.SetActiveScheduleSessionToCache(ctx, session); cacheErr != nil {
log.Printf("回填主动调度 rerunning session 缓存失败 session=%s err=%v", session.SessionID, cacheErr)
}
}
return true, s.runActiveScheduleSessionRerun(ctx, session, trimmedMessage, traceID, requestStart, resolvedModelName, outChan, errChan)
case model.ActiveScheduleSessionStatusRerunning:
// 1. rerunning 是占管中的过渡态,说明当前会话已经在重跑或刚开始重跑。
// 2. 这里不再触发第二次 rerun只给用户一个可见的等待提示。
if trimmedMessage != "" {
if err := s.respondActiveScheduleRerunning(ctx, userID, chatID, traceID, resolvedModelName, requestStart, outChan); err != nil {
return true, err
}
}
return true, nil
default:
return false, nil
}
}
// respondActiveScheduleRerunning 负责在重复补充命中并发保护时写入可见提示。
//
// 职责边界:
// 1. 只写聊天历史和 SSE 文本,不推进 session、trigger、preview 状态;
// 2. 用于 rerunning 状态或 CAS 抢占失败后的兜底提示,避免再次触发 graph
// 3. 写入失败时返回 error让上层按聊天入口的错误通道处理。
func (s *AgentService) respondActiveScheduleRerunning(
ctx context.Context,
userID int,
chatID string,
traceID string,
resolvedModelName string,
requestStart time.Time,
outChan chan<- string,
) error {
assistantText := "主动调度正在重新生成建议,请稍后再试。"
if err := s.persistNewAgentConversationMessage(ctx, userID, chatID, schema.AssistantMessage(assistantText, nil), 0); err != nil {
return err
}
emitActiveScheduleAssistantChunk(outChan, traceID, resolvedModelName, requestStart, assistantText, nil)
return nil
}
// runActiveScheduleSessionRerun 负责把 waiting_user_reply 的用户补充同步推进成新的主动调度结果。
//
// 职责边界:
// 1. 只负责聊天入口的最小编排,不复制 worker / notification 链路;
// 2. 成功时把新 preview / ask_user / close 的结果写回 session + timeline
// 3. 失败时把 session 标成 failed方便后续排障。
func (s *AgentService) runActiveScheduleSessionRerun(
ctx context.Context,
session *model.ActiveScheduleSessionSnapshot,
userMessage string,
traceID string,
requestStart time.Time,
resolvedModelName string,
outChan chan<- string,
errChan chan error,
) error {
if s == nil || s.activeRerunFunc == nil {
return errors.New("主动调度 rerun 未接入")
}
if session == nil {
return errors.New("active schedule session 不能为空")
}
result, err := s.activeRerunFunc(ctx, session, userMessage, traceID, requestStart)
if err != nil {
session.Status = model.ActiveScheduleSessionStatusFailed
session.State.FailedReason = strings.TrimSpace(err.Error())
_ = s.persistActiveScheduleSessionBestEffort(ctx, session)
return err
}
if result == nil {
result = &ActiveScheduleSessionRerunResult{}
}
finalStatus := strings.TrimSpace(result.SessionStatus)
if finalStatus == "" {
if result.BusinessCard != nil {
finalStatus = model.ActiveScheduleSessionStatusReadyPreview
} else {
finalStatus = model.ActiveScheduleSessionStatusWaitingUserReply
}
}
session.Status = finalStatus
session.State = result.SessionState
previewID := strings.TrimSpace(result.PreviewID)
if previewID != "" {
session.CurrentPreviewID = previewID
}
if session.Status == model.ActiveScheduleSessionStatusReadyPreview {
session.State.PendingQuestion = ""
session.State.MissingInfo = nil
session.State.FailedReason = ""
}
if previewID != "" {
if err := s.persistActiveScheduleTriggerPreviewBestEffort(ctx, session.TriggerID, previewID); err != nil {
return err
}
}
if err := s.persistActiveScheduleSessionBestEffort(ctx, session); err != nil {
return err
}
assistantText := strings.TrimSpace(result.AssistantText)
if assistantText == "" && result.BusinessCard != nil {
assistantText = strings.TrimSpace(result.BusinessCard.Summary)
}
if assistantText == "" {
assistantText = "主动调度建议已更新。"
}
// 1. 把新结果写进 conversation history保证刷新后仍然能看到 rerun 的正文。
// 2. 再追加业务卡片时间线,前端可以按 timeline 重建主动调度卡片。
if err := s.persistNewAgentConversationMessage(ctx, session.UserID, session.ConversationID, schema.AssistantMessage(assistantText, nil), 0); err != nil {
return err
}
if result.BusinessCard != nil {
if _, err := s.appendConversationTimelineEvent(
ctx,
session.UserID,
session.ConversationID,
model.AgentTimelineKindBusinessCard,
"assistant",
assistantText,
map[string]any{"business_card": result.BusinessCard},
0,
); err != nil {
return err
}
}
emitActiveScheduleAssistantChunk(outChan, traceID, resolvedModelName, requestStart, assistantText, nil)
if result.BusinessCard != nil {
emitActiveScheduleBusinessCardChunk(outChan, session.SessionID, traceID, resolvedModelName, requestStart, result.BusinessCard)
}
return nil
}
func isActiveScheduleSessionBlockingStatus(status string) bool {
switch strings.ToLower(strings.TrimSpace(status)) {
case model.ActiveScheduleSessionStatusWaitingUserReply,
model.ActiveScheduleSessionStatusRerunning:
return true
default:
return false
}
}
func emitActiveScheduleAssistantChunk(outChan chan<- string, traceID string, modelName string, requestStart time.Time, text string, extra *agentstream.OpenAIChunkExtra) {
payload, err := agentstream.ToOpenAIAssistantChunkWithExtra(traceID, modelName, requestStart.Unix(), strings.TrimSpace(text), true, extra)
if err != nil {
log.Printf("构造主动调度 assistant chunk 失败 trace=%s err=%v", traceID, err)
return
}
pushChunkNonBlocking(outChan, payload)
}
func emitActiveScheduleBusinessCardChunk(outChan chan<- string, blockID string, traceID string, modelName string, requestStart time.Time, card *agentstream.StreamBusinessCardExtra) {
if card == nil {
return
}
payload, err := agentstream.ToOpenAIStreamWithExtra(nil, traceID, modelName, requestStart.Unix(), true, agentstream.NewBusinessCardExtra(blockID, "active_schedule_session", card))
if err != nil {
log.Printf("构造主动调度 business card chunk 失败 trace=%s err=%v", traceID, err)
return
}
pushChunkNonBlocking(outChan, payload)
}
func pushChunkNonBlocking(outChan chan<- string, payload string) {
if outChan == nil || strings.TrimSpace(payload) == "" {
return
}
select {
case outChan <- payload:
default:
log.Printf("主动调度 SSE 通道已满,丢弃 payload")
}
}

View File

@@ -0,0 +1,727 @@
package sv
import (
"context"
"errors"
"fmt"
"log"
"strings"
"time"
agentconv "github.com/LoveLosita/smartflow/backend/services/agent/conv"
agentgraph "github.com/LoveLosita/smartflow/backend/services/agent/graph"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
"github.com/cloudwego/eino/schema"
"github.com/spf13/viper"
"github.com/LoveLosita/smartflow/backend/conv"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/pkg"
"github.com/LoveLosita/smartflow/backend/respond"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
)
const (
agentHistoryKindKey = "newagent_history_kind"
agentHistoryKindLoopClosed = "execute_loop_closed"
)
// runAgentGraph 运行 agent 通用 graph直接替换旧 agent 路由逻辑。
//
// 职责边界:
// 1. 负责构造 AgentGraphRunInputRuntimeState、ConversationContext、Request、Deps
// 2. 负责将 outChan 适配为 ChunkEmitter
// 3. 负责调用 graph.RunAgentGraph
// 4. 负责持久化聊天历史(复用现有逻辑)。
//
// 设计原则:
// 1. 直接走 agent graph不再经过旧的 agentrouter 路由决策;
// 2. 所有任务类型chat、task、quick_note都由 graph 内部 LLM 决策;
// 3. 状态恢复、工具执行、确认流程全部由 graph 节点处理。
func (s *AgentService) runAgentGraph(
ctx context.Context,
userMessage string,
thinkingMode string,
modelName string,
userID int,
chatID string,
extra map[string]any,
traceID string,
requestStart time.Time,
outChan chan<- string,
errChan chan error,
) {
requestCtx, _ := withRequestTokenMeter(ctx)
if s == nil || s.llmService == nil {
// 0. agent 主链强依赖 llm-service装配漏传时直接返回错误避免 nil receiver panic。
pushErrNonBlocking(errChan, errors.New("agent llm service is not initialized"))
return
}
// 1. 规范会话 ID 和模型选择。
chatID = normalizeConversationID(chatID)
_, resolvedModelName := s.pickChatModel(modelName)
// 2. 确保会话存在(优先缓存,必要时回源 DB
result, err := s.agentCache.GetConversationStatus(requestCtx, chatID)
if err != nil {
pushErrNonBlocking(errChan, err)
return
}
if !result {
innerResult, ifErr := s.repo.IfChatExists(requestCtx, userID, chatID)
if ifErr != nil {
pushErrNonBlocking(errChan, ifErr)
return
}
if !innerResult {
if _, err = s.repo.CreateNewChat(userID, chatID); err != nil {
pushErrNonBlocking(errChan, err)
return
}
}
if err = s.agentCache.SetConversationStatus(requestCtx, chatID); err != nil {
log.Printf("设置会话状态缓存失败 chat=%s: %v", chatID, err)
}
}
// 3. retry 机制已下线,不再构建重试元数据。
// 4. 如果当前 conversation 被主动调度 session 占管,先走 session 分支,不进入普通 agent。
// 这样 waiting_user_reply / rerunning 期间,用户消息会先推动主动调度闭环,而不是误进自由聊天。
if handled, sessionErr := s.handleActiveScheduleSessionChat(requestCtx, userMessage, traceID, requestStart, userID, chatID, resolvedModelName, outChan, errChan); sessionErr != nil {
pushErrNonBlocking(errChan, sessionErr)
return
} else if handled {
return
}
// 5. 从 StateStore 加载或创建 RuntimeState。
// 恢复场景confirm/ask_user同时拿到快照中保存的 ConversationContext
// 其中包含工具调用/结果等中间消息,保证后续 LLM 调用的消息链完整。
runtimeState, savedConversationContext, savedScheduleState, savedOriginalScheduleState := s.loadOrCreateRuntimeState(requestCtx, chatID, userID)
// 6. 构造 ConversationContext。
// 优先使用快照中恢复的 ConversationContext含工具调用/结果),
// 无快照时从 Redis LLM 历史缓存加载。
var conversationContext *agentmodel.ConversationContext
if savedConversationContext != nil {
conversationContext = savedConversationContext
// 把用户本轮输入追加到恢复的上下文中(与 loadConversationContext 行为一致)。
if strings.TrimSpace(userMessage) != "" {
conversationContext.AppendHistory(schema.UserMessage(userMessage))
}
} else {
conversationContext = s.loadConversationContext(requestCtx, chatID, userMessage)
}
// 6.1. 在 graph 执行前统一补充与当前输入相关的记忆上下文(预取管线模式)。
// 6.1.1 先读 Redis 预取缓存注入到 ConversationContext再启动后台 goroutine 做完整检索;
// 6.1.2 返回的 channel 传入 Deps供 Execute/Plan 节点在启动前消费最新记忆;
// 6.1.3 检索失败只降级为"本轮不注入记忆",不阻断主链路。
memoryFuture := s.injectMemoryContext(requestCtx, conversationContext, userID, chatID, userMessage)
// 6.5 将前端传入的 thinkingMode 写入 CommonState供 ChatNode 及下游节点读取。
cs := runtimeState.EnsureCommonState()
cs.ThinkingMode = thinkingMode
// 6.6 若 extra 携带 task_class_ids校验后写入 CommonState仅首轮/尚未设置时生效,跨轮持久化)。
if taskClassIDs := readAgentExtraIntSlice(extra, "task_class_ids"); len(taskClassIDs) > 0 {
cs := runtimeState.EnsureCommonState()
if len(cs.TaskClassIDs) == 0 {
if s.scheduleProvider == nil {
pushErrNonBlocking(errChan, respond.WrongTaskClassID)
return
}
metas, metaErr := s.scheduleProvider.LoadTaskClassMetas(requestCtx, userID, taskClassIDs)
if metaErr != nil {
pushErrNonBlocking(errChan, respond.WrongTaskClassID)
return
}
cs.TaskClassIDs = taskClassIDs
cs.TaskClasses = metas
}
}
cs = runtimeState.EnsureCommonState()
// 6.7 先把本轮用户输入落库,确保后续可见 assistant 消息按真实时间线追加。
userMsg := schema.UserMessage(userMessage)
if err := s.persistNewAgentConversationMessage(requestCtx, userID, chatID, userMsg, 0); err != nil {
pushErrNonBlocking(errChan, err)
return
}
persistVisibleMessage := func(persistCtx context.Context, state *agentmodel.CommonState, msg *schema.Message) error {
targetState := state
if targetState == nil {
targetState = runtimeState.EnsureCommonState()
}
if targetState != nil {
if targetState.UserID <= 0 {
targetState.UserID = userID
}
if strings.TrimSpace(targetState.ConversationID) == "" {
targetState.ConversationID = chatID
}
}
return s.persistNewAgentConversationMessage(persistCtx, userID, chatID, msg, 0)
}
// 7. 构造 AgentGraphRequest。
var (
confirmAction string
resumeInteractionID string
)
if len(extra) > 0 {
confirmAction = readAgentExtraString(extra, "confirm_action")
resumeInteractionID = readAgentExtraString(extra, "resume_interaction_id")
}
graphRequest := agentmodel.AgentGraphRequest{
UserInput: userMessage,
ConfirmAction: confirmAction,
ResumeInteractionID: resumeInteractionID,
AlwaysExecute: readAgentExtraBool(extra, "always_execute"),
}
graphRequest.Normalize()
// 8. 适配 LLM clients统一从 llm-service 取出 agent 图所需模型,不再直接碰 AIHub
// 8.1 Chat/Deliver 使用 Pro 模型:路由分流、闲聊、交付总结属于标准复杂度。
// 8.2 Plan/Execute 使用 Max 模型:规划和 ReAct 循环需要深度推理能力。
llmClients := s.llmService.NewAgentModelClients()
chatClient := llmClients.Chat
planClient := llmClients.Plan
executeClient := llmClients.Execute
deliverClient := llmClients.Deliver
summaryClient := llmClients.Summary
// 9. 适配 SSE emitter。
sseEmitter := agentstream.NewSSEPayloadEmitter(outChan)
chunkEmitter := agentstream.NewChunkEmitter(sseEmitter, traceID, resolvedModelName, requestStart.Unix())
chunkEmitter.SetReasoningSummaryFunc(s.makeReasoningSummaryFunc(summaryClient))
// 关键卡片事件走统一时间线持久化,保证刷新后可重建。
chunkEmitter.SetExtraEventHook(func(extra *agentstream.OpenAIChunkExtra) {
s.persistAgentTimelineExtraEvent(context.Background(), userID, chatID, extra)
})
// 10. 构造 AgentGraphDeps由 cmd/start.go 注入的依赖)。
deps := agentmodel.AgentGraphDeps{
ChatClient: chatClient,
PlanClient: planClient,
ExecuteClient: executeClient,
DeliverClient: deliverClient,
ChunkEmitter: chunkEmitter,
StateStore: s.agentStateStore,
ToolRegistry: s.toolRegistry,
ScheduleProvider: s.scheduleProvider,
CompactionStore: s.compactionStore,
RoughBuildFunc: s.makeRoughBuildFunc(),
WriteSchedulePreview: s.makeWriteSchedulePreviewFunc(),
MemoryFuture: memoryFuture,
ThinkingPlan: viper.GetBool("agent.thinking.plan"),
ThinkingExecute: viper.GetBool("agent.thinking.execute"),
ThinkingDeliver: viper.GetBool("agent.thinking.deliver"),
PersistVisibleMessage: persistVisibleMessage,
QuickTaskDeps: s.quickTaskDeps,
}
// 11. 构造 AgentGraphRunInput 并运行 graph。
runInput := agentmodel.AgentGraphRunInput{
RuntimeState: runtimeState,
ConversationContext: conversationContext,
ScheduleState: savedScheduleState,
OriginalScheduleState: savedOriginalScheduleState,
Request: graphRequest,
Deps: deps,
}
finalState, graphErr := agentgraph.RunAgentGraph(requestCtx, runInput)
if graphErr != nil {
// 1. 客户端断连导致的 context 取消,属于正常场景,不推错误通道也不跑 fallback。
// 否则会刷 "错误通道已满" 日志噪音,且 fallback 在 ctx 已取消时也会失败。
if errors.Is(graphErr, context.Canceled) || requestCtx.Err() != nil {
log.Printf("[WARN] agent graph 因客户端断连中止 trace=%s chat=%s", traceID, chatID)
return
}
log.Printf("[ERROR] agent graph 执行失败 trace=%s chat=%s: %v", traceID, chatID, graphErr)
pushErrNonBlocking(errChan, fmt.Errorf("graph 执行失败: %w", graphErr))
// Graph 出错时回退普通聊天,保证可用性。回退使用 llm-service 的 Pro 模型。
s.runNormalChatFlow(requestCtx, chatClient, resolvedModelName, userMessage, true, "", nil, thinkingModeToBool(thinkingMode), userID, chatID, traceID, requestStart, outChan, errChan)
return
}
// 12. 持久化聊天历史(用户消息 + 助手回复)。
requestTotalTokens := snapshotRequestTokenMeter(requestCtx).TotalTokens
s.adjustAgentRequestTokenUsage(requestCtx, userID, chatID, requestTotalTokens)
// 12.5. 将最终状态快照异步写入 MySQL通过 outbox
// Deliver 节点已将快照保存到 Redis2h TTL此处通过 outbox 异步写入 MySQL 做永久存储。
if finalState != nil {
snapshot := &agentmodel.AgentStateSnapshot{
RuntimeState: finalState.EnsureRuntimeState(),
ConversationContext: finalState.EnsureConversationContext(),
}
eventsvc.PublishAgentStateSnapshot(requestCtx, s.eventPublisher, snapshot, chatID, userID)
}
// 12.6. graph 完成后条件触发记忆抽取。
// 说明:
// 1. 只有本轮未走快捷随口记任务路径时才触发记忆抽取;
// 2. 避免随口记创建的 Task 与记忆系统产生语义冲突。
if finalState != nil {
cs := finalState.EnsureRuntimeState().EnsureCommonState()
if cs == nil || !cs.UsedQuickNote {
if memErr := eventsvc.PublishMemoryExtractFromGraph(requestCtx, s.eventPublisher, userID, chatID, userMessage); memErr != nil {
log.Printf("[WARN] graph 完成后发布记忆抽取事件失败 trace=%s chat=%s err=%v", traceID, chatID, memErr)
}
}
}
// 排程预览缓存由 Deliver 节点负责写入(通过注入的 WriteSchedulePreview func
// 保证只有任务真正完成时才写,中断路径不写中间态。
// 13. 发送 OpenAI 兼容的流式结束标记,告知客户端 stream 已完成。
_ = chunkEmitter.EmitDone()
// 14. 异步生成会话标题。
s.ensureConversationTitleAsync(userID, chatID)
}
// loadOrCreateRuntimeState 从 StateStore 加载或创建新的 RuntimeState。
//
// 返回值:
// - RuntimeState可持久化流程状态
// - ConversationContext快照中保存的完整对话上下文含工具调用/结果),
// 仅在恢复已有快照时非 nil新建会话时为 nil。
//
// 设计说明:
// 1. 快照中的 ConversationContext 包含 graph 执行期间的完整中间消息(工具调用、工具结果等),
// 这些消息不会出现在 Redis LLM 历史缓存中;
// 2. 恢复场景confirm/ask_user必须使用快照中的 ConversationContext否则工具结果丢失
// 导致后续 LLM 调用收到非法的裸 Tool 消息API 拒绝请求、连接断开。
func (s *AgentService) loadOrCreateRuntimeState(ctx context.Context, chatID string, userID int) (*agentmodel.AgentRuntimeState, *agentmodel.ConversationContext, *schedule.ScheduleState, *schedule.ScheduleState) {
newRT := func() (*agentmodel.AgentRuntimeState, *agentmodel.ConversationContext, *schedule.ScheduleState, *schedule.ScheduleState) {
rt := agentmodel.NewAgentRuntimeState(nil)
cs := rt.EnsureCommonState()
cs.UserID = userID
cs.ConversationID = chatID // saveAgentState 依赖此字段决定是否持久化
return rt, nil, nil, nil
}
if s.agentStateStore == nil {
return newRT()
}
snapshot, ok, err := s.agentStateStore.Load(ctx, chatID)
log.Printf("[DEBUG] loadOrCreateRuntimeState chatID=%s ok=%v err=%v hasRuntime=%v hasPending=%v hasCtx=%v hasSchedule=%v hasOriginal=%v",
chatID, ok, err,
snapshot != nil && snapshot.RuntimeState != nil,
snapshot != nil && snapshot.RuntimeState != nil && snapshot.RuntimeState.HasPendingInteraction(),
snapshot != nil && snapshot.ConversationContext != nil,
snapshot != nil && snapshot.ScheduleState != nil,
snapshot != nil && snapshot.OriginalScheduleState != nil,
)
if err != nil {
log.Printf("加载 agent 状态失败 chat=%s: %v", chatID, err)
return newRT()
}
if ok && snapshot != nil && snapshot.RuntimeState != nil {
// 恢复运行态,确保身份信息与当前请求一致。
cs := snapshot.RuntimeState.EnsureCommonState()
cs.UserID = userID
cs.ConversationID = chatID
// 1. 冷加载兜底:若上一轮已经收口且当前没有待恢复交互,说明本次是新一轮请求;
// 2. 这里先重置执行期临时字段,避免旧 round/terminal 状态污染 chat 路由和后续 execute
// 3. 即使 chat 节点也有同条件重置,这里仍保留兜底,覆盖断线恢复或入口绕行场景。
if !snapshot.RuntimeState.HasPendingInteraction() && cs.Phase == agentmodel.PhaseDone {
terminalBefore := cs.TerminalStatus()
roundBefore := cs.RoundUsed
// 1. 仅"正常完成(completed)"写 loop 收口 marker
// 1.1 下一轮执行时prompt 会把上一轮 loop 从 msg2 归档到 msg1
// 1.2 异常中断aborted/exhausted不写 marker保留 msg2 便于后续续跑。
if terminalBefore == agentmodel.FlowTerminalStatusCompleted {
appendExecuteLoopClosedMarker(snapshot.ConversationContext)
}
cs.ResetForNextRun()
log.Printf(
"[DEBUG] loadOrCreateRuntimeState reset runtime for next run chat=%s round_before=%d terminal_before=%s",
chatID,
roundBefore,
terminalBefore,
)
}
// 常规场景仍由 Chat 节点基于路由覆盖 Phase这里只在"上一轮已 done"时做一次前置清理兜底。
// 其余跨轮可复用状态(如任务类范围、会话历史、日程内存态)继续保留,支持连续对话调整日程。
originalScheduleState := snapshot.OriginalScheduleState
if snapshot.ScheduleState != nil && originalScheduleState == nil {
// 1. 兼容老快照:历史会话可能只存了 ScheduleState没有 original 副本。
// 2. 这里补一份克隆,保证后续节点拿到的仍是"恢复态 + 原始态"成对数据。
// 3. 即便当前阶段不落库,这里也保留一致性,避免下一轮再出现语义漂移。
originalScheduleState = snapshot.ScheduleState.Clone()
}
return snapshot.RuntimeState, snapshot.ConversationContext, snapshot.ScheduleState, originalScheduleState
}
return newRT()
}
// appendExecuteLoopClosedMarker 在 ConversationContext 写入"上一轮 loop 正常收口"标记。
//
// 职责边界:
// 1. 只追加轻量 marker 供 prompt 分层,不做历史摘要或裁剪;
// 2. 若末尾已是同类 marker则幂等跳过
// 3. context 为空时直接返回,避免冷启动异常。
func appendExecuteLoopClosedMarker(conversationContext *agentmodel.ConversationContext) {
if conversationContext == nil {
return
}
history := conversationContext.HistorySnapshot()
if len(history) > 0 {
last := history[len(history)-1]
if last != nil && last.Extra != nil {
if kind, ok := last.Extra[agentHistoryKindKey].(string); ok && strings.TrimSpace(kind) == agentHistoryKindLoopClosed {
return
}
}
}
conversationContext.AppendHistory(&schema.Message{
Role: schema.Assistant,
Content: "",
Extra: map[string]any{
agentHistoryKindKey: agentHistoryKindLoopClosed,
},
})
}
// loadConversationContext 加载对话历史,构造 ConversationContext。
func (s *AgentService) loadConversationContext(ctx context.Context, chatID, userMessage string) *agentmodel.ConversationContext {
// 从 Redis 加载历史。
history, err := s.agentCache.GetHistory(ctx, chatID)
if err != nil {
log.Printf("加载历史失败 chat=%s: %v", chatID, err)
history = nil
}
// 缓存未命中时回源 DB。
if history == nil {
histories, hisErr := s.repo.GetUserChatHistories(ctx, 0, pkg.HistoryFetchLimitByModel("worker"), chatID)
if hisErr != nil {
log.Printf("从 DB 加载历史失败 chat=%s: %v", chatID, hisErr)
} else {
history = conv.ToEinoMessages(histories)
// 回填到 Redis。
if backfillErr := s.agentCache.BackfillHistory(ctx, chatID, history); backfillErr != nil {
log.Printf("回填历史到 Redis 失败 chat=%s: %v", chatID, backfillErr)
}
}
}
// 构造 ConversationContext。
conversationContext := agentmodel.NewConversationContext(agentprompt.SystemPrompt)
if history != nil {
conversationContext.ReplaceHistory(history)
}
// 把用户本轮输入追加到历史(供 graph 使用)。
if strings.TrimSpace(userMessage) != "" {
conversationContext.AppendHistory(schema.UserMessage(userMessage))
}
return conversationContext
}
// persistNewAgentConversationMessage 负责把 agent 链路里"真正对用户可见"的消息统一落到 Redis + MySQL。
//
// 职责边界:
// 1. 只做单条消息的持久化,不做 graph 流程控制;
// 2. TokensConsumed 由调用方显式传入agent 逐条可见消息默认写 0
// 3. Redis 失败只记日志DB 失败返回错误,便于调用方决定是否中止当前链路。
func (s *AgentService) persistNewAgentConversationMessage(
ctx context.Context,
userID int,
chatID string,
msg *schema.Message,
tokensConsumed int,
) error {
if s == nil || msg == nil {
return nil
}
role := strings.TrimSpace(string(msg.Role))
content := strings.TrimSpace(msg.Content)
if role == "" || content == "" {
return nil
}
if userID <= 0 || strings.TrimSpace(chatID) == "" {
return fmt.Errorf("agent visible message persist: invalid conversation identity")
}
if ctx == nil {
ctx = context.Background()
}
persistMsg := &schema.Message{
Role: msg.Role,
Content: content,
// 可见消息持久化只保存正文;模型 raw reasoning 改由 thinking_summary 生成用户可见摘要,
// 避免历史接口或时间线刷新时重新暴露内部思考文本。
ReasoningContent: "",
}
if len(msg.Extra) > 0 {
persistMsg.Extra = make(map[string]any, len(msg.Extra))
for key, value := range msg.Extra {
persistMsg.Extra[key] = value
}
}
if err := s.agentCache.PushMessage(ctx, chatID, persistMsg); err != nil {
log.Printf("写入 agent 可见消息到 Redis 失败 chat=%s role=%s: %v", chatID, role, err)
}
reasoningDurationSeconds := 0
if persistMsg.Extra != nil {
switch v := persistMsg.Extra["reasoning_duration_seconds"].(type) {
case int:
reasoningDurationSeconds = v
case int64:
reasoningDurationSeconds = int(v)
case float64:
reasoningDurationSeconds = int(v)
}
}
persistPayload := model.ChatHistoryPersistPayload{
UserID: userID,
ConversationID: chatID,
Role: role,
Message: content,
ReasoningContent: strings.TrimSpace(persistMsg.ReasoningContent),
ReasoningDurationSeconds: reasoningDurationSeconds,
TokensConsumed: tokensConsumed,
}
if err := s.PersistChatHistory(ctx, persistPayload); err != nil {
return err
}
// 统一写入会话时间线,保证正文与卡片可按单一 seq 顺序重建。
timelineKind := model.AgentTimelineKindAssistantText
switch role {
case "user":
timelineKind = model.AgentTimelineKindUserText
case "assistant":
timelineKind = model.AgentTimelineKindAssistantText
}
timelinePayload := map[string]any{}
if reasoningDurationSeconds > 0 {
timelinePayload["reasoning_duration_seconds"] = reasoningDurationSeconds
}
if _, err := s.appendConversationTimelineEvent(
ctx,
userID,
chatID,
timelineKind,
role,
content,
timelinePayload,
tokensConsumed,
); err != nil {
return err
}
return nil
}
// makeRoughBuildFunc 把 AgentService 上的 HybridScheduleWithPlanMultiFunc 封装成
// agent 层的 RoughBuildFunc将 HybridScheduleWithPlanMultiFunc 的结果转换为 RoughBuildPlacement。
// HybridScheduleWithPlanMultiFunc 未注入时返回 nilRoughBuild 节点会静默跳过粗排。
//
// 修复说明:
// 旧实现使用第二个返回值 []TaskClassItem只有 EmbeddedTime != nil 的条目(嵌入水课)才生成
// placement普通时段放置的任务全部被丢弃。
// 正确做法:使用第一个返回值 []HybridScheduleEntry过滤 Status="suggested" 且 TaskItemID>0 的条目,
// 这样嵌入和非嵌入的粗排结果都能正确写入 ScheduleState。
// adjustAgentRequestTokenUsage 负责把本轮 graph 的请求级 token 一次性回写到账本。
//
// 说明:
// 1. agent 逐条可见消息都按 0 token 落库,最终统一在这里补记整轮消耗;
// 2. 如果启用了 outbox就沿用异步 token 调整事件,保持写账口径一致;
// 3. 该步骤属于请求收尾,不应反过来打断用户已看到的回复。
func (s *AgentService) adjustAgentRequestTokenUsage(ctx context.Context, userID int, chatID string, deltaTokens int) {
if s == nil || userID <= 0 || strings.TrimSpace(chatID) == "" || deltaTokens <= 0 {
return
}
if ctx == nil {
ctx = context.Background()
}
if s.eventPublisher != nil {
if err := eventsvc.PublishChatTokenUsageAdjustRequested(ctx, s.eventPublisher, model.ChatTokenUsageAdjustPayload{
UserID: userID,
ConversationID: chatID,
TokensDelta: deltaTokens,
Reason: "new_agent_request",
TriggeredAt: time.Now(),
}); err != nil {
log.Printf("写入 agent 请求级 token 调整事件失败 chat=%s tokens=%d err=%v", chatID, deltaTokens, err)
}
return
}
if err := s.repo.AdjustTokenUsage(ctx, userID, chatID, deltaTokens, ""); err != nil {
log.Printf("同步写入 agent 请求级 token 调整失败 chat=%s tokens=%d err=%v", chatID, deltaTokens, err)
}
}
func (s *AgentService) makeRoughBuildFunc() agentmodel.RoughBuildFunc {
if s.HybridScheduleWithPlanMultiFunc == nil {
return nil
}
return func(ctx context.Context, userID int, taskClassIDs []int) ([]agentmodel.RoughBuildPlacement, error) {
entries, _, err := s.HybridScheduleWithPlanMultiFunc(ctx, userID, taskClassIDs)
if err != nil {
return nil, err
}
placements := make([]agentmodel.RoughBuildPlacement, 0, len(entries))
for _, entry := range entries {
if entry.Status != "suggested" || entry.TaskItemID == 0 {
continue
}
placements = append(placements, agentmodel.RoughBuildPlacement{
TaskItemID: entry.TaskItemID,
Week: entry.Week,
DayOfWeek: entry.DayOfWeek,
SectionFrom: entry.SectionFrom,
SectionTo: entry.SectionTo,
})
}
return placements, nil
}
}
// makeWriteSchedulePreviewFunc 封装 cacheDAO 写排程预览缓存的操作,供 Execute/Deliver 节点复用。
func (s *AgentService) makeWriteSchedulePreviewFunc() agentmodel.WriteSchedulePreviewFunc {
if s.cacheDAO == nil {
return nil
}
return func(ctx context.Context, state *schedule.ScheduleState, userID int, conversationID string, taskClassIDs []int) error {
stateDigest := summarizeScheduleStateForPreviewDebug(state)
preview := agentconv.ScheduleStateToPreview(state, userID, conversationID, taskClassIDs, "")
if preview == nil {
log.Printf("[WARN] schedule preview skipped chat=%s user=%d state=%s", conversationID, userID, stateDigest)
return nil
}
previewDigest := summarizeHybridEntriesForPreviewDebug(preview.HybridEntries)
log.Printf(
"[DEBUG] schedule preview write chat=%s user=%d state=%s preview=%s generated_at=%s",
conversationID,
userID,
stateDigest,
previewDigest,
preview.GeneratedAt.Format(time.RFC3339),
)
return s.cacheDAO.SetSchedulePlanPreviewToCache(ctx, userID, conversationID, preview)
}
}
// summarizeScheduleStateForPreviewDebug 统计 Deliver 写预览前的内存日程摘要。
func summarizeScheduleStateForPreviewDebug(state *schedule.ScheduleState) string {
if state == nil {
return "state=nil"
}
total := len(state.Tasks)
pendingTotal := 0
suggestedTotal := 0
existingTotal := 0
taskItemWithSlot := 0
eventWithSlot := 0
for i := range state.Tasks {
t := &state.Tasks[i]
hasSlot := len(t.Slots) > 0
switch {
case schedule.IsPendingTask(*t):
pendingTotal++
case schedule.IsSuggestedTask(*t):
suggestedTotal++
case schedule.IsExistingTask(*t):
existingTotal++
}
if hasSlot {
if t.Source == "task_item" {
taskItemWithSlot++
}
if t.Source == "event" {
eventWithSlot++
}
}
}
return fmt.Sprintf(
"tasks=%d pending=%d suggested=%d existing=%d task_item_with_slot=%d event_with_slot=%d",
total,
pendingTotal,
suggestedTotal,
existingTotal,
taskItemWithSlot,
eventWithSlot,
)
}
// summarizeHybridEntriesForPreviewDebug 统计预览转换后的 HybridEntries 摘要。
func summarizeHybridEntriesForPreviewDebug(entries []model.HybridScheduleEntry) string {
existing := 0
suggested := 0
taskType := 0
courseType := 0
for _, e := range entries {
if e.Status == "suggested" {
suggested++
} else {
existing++
}
if e.Type == "task" {
taskType++
}
if e.Type == "course" {
courseType++
}
}
return fmt.Sprintf(
"entries=%d existing=%d suggested=%d task_type=%d course_type=%d",
len(entries),
existing,
suggested,
taskType,
courseType,
)
}
// --- 依赖注入字段 ---
// toolRegistry 由 cmd/start.go 注入
func (s *AgentService) SetToolRegistry(registry *agenttools.ToolRegistry) {
s.toolRegistry = registry
}
// scheduleProvider 由 cmd/start.go 注入
func (s *AgentService) SetScheduleProvider(provider agentmodel.ScheduleStateProvider) {
s.scheduleProvider = provider
}
// agentStateStore 由 cmd/start.go 注入
func (s *AgentService) SetAgentStateStore(store agentmodel.AgentStateStore) {
s.agentStateStore = store
}
// compactionStore 由 cmd/start.go 注入
func (s *AgentService) SetCompactionStore(store agentmodel.CompactionStore) {
s.compactionStore = store
}
// quickTaskDeps 由 cmd/start.go 注入
func (s *AgentService) SetQuickTaskDeps(deps agentmodel.QuickTaskDeps) {
s.quickTaskDeps = deps
}

View File

@@ -0,0 +1,213 @@
package sv
import (
"context"
"log"
"strings"
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
const (
agentMemoryRetrieveLimit = 10
agentMemoryIntroLine = "以下是与当前对话相关的用户记忆,仅在自然且确实有帮助时参考,不要生硬复述。"
)
// MemoryReader 描述 agent 主链路读取记忆所需的最小能力。
//
// 职责边界:
// 1. 只负责"按当前输入取回候选记忆"
// 2. 不负责 prompt 拼装,也不要求调用方感知 memory 模块内部 repo/service 结构;
// 3. 返回值直接复用 memory DTO避免 service 层再维护一套重复结构。
type MemoryReader interface {
Retrieve(ctx context.Context, req memorymodel.RetrieveRequest) ([]memorymodel.ItemDTO, error)
}
type memoryObserveProvider interface {
MemoryObserver() memoryobserve.Observer
MemoryMetrics() memoryobserve.MetricsRecorder
}
// SetMemoryReader 注入 agent 主链路读取记忆所需的薄接口与渲染配置。
func (s *AgentService) SetMemoryReader(reader MemoryReader, cfg memorymodel.Config) {
s.memoryReader = reader
s.memoryCfg = cfg
s.memoryObserver = memoryobserve.NewNopObserver()
s.memoryMetrics = memoryobserve.NewNopMetrics()
if provider, ok := reader.(memoryObserveProvider); ok {
s.memoryObserver = provider.MemoryObserver()
s.memoryMetrics = provider.MemoryMetrics()
}
}
// injectMemoryContext 在 graph 执行前,把本轮相关记忆写入 ConversationContext 的 pinned block。
//
// 改造后采用"预取管线"模式:
// 1. 先读 Redis 预取缓存(上一轮写入),命中则立即注入到 ConversationContext
// 2. 再启动后台 goroutine 做完整记忆检索,渲染后发到 channel + 写 Redis
// 3. Chat 节点直接用缓存记忆启动首字节零延迟Execute/Plan 通过 channel 消费最新结果。
func (s *AgentService) injectMemoryContext(
ctx context.Context,
conversationContext *agentmodel.ConversationContext,
userID int,
chatID string,
userMessage string,
) chan string {
memoryFuture := make(chan string, 1)
if conversationContext == nil {
return memoryFuture
}
// 1. 门控检查:无 reader 或无效用户时清掉旧 block 并返回空 channel。
if s.memoryReader == nil || userID <= 0 {
conversationContext.RemovePinnedBlock(agentmodel.MemoryContextBlockKey)
return memoryFuture
}
// 2. 读 Redis 预取缓存(<5ms命中则注入。
cachedItems, _ := s.cacheDAO.GetMemoryPrefetchCache(ctx, userID, chatID)
if len(cachedItems) > 0 {
content := renderMemoryPinnedContentByMode(cachedItems, s.memoryCfg.EffectiveInjectRenderMode())
if content != "" {
conversationContext.UpsertPinnedBlock(agentmodel.ContextBlock{
Key: agentmodel.MemoryContextBlockKey,
Title: agentmodel.MemoryContextBlockTitle,
Content: content,
})
s.recordMemoryInject(ctx, userID, len(cachedItems), true, nil, "prefetch_cache")
log.Printf("[INFO] memory prefetch: 从 Redis 缓存注入记忆 user=%d count=%d", userID, len(cachedItems))
}
}
// 3. 短应答不启动后台检索,节省资源。
if !shouldInjectMemoryForInput(userMessage) {
log.Printf("[INFO] memory prefetch: 短应答跳过检索 user=%d msg=%q", userID, userMessage)
return memoryFuture
}
// 4. 启动后台 goroutine完整检索 → 渲染 → 发 channel + 写 Redis。
log.Printf("[INFO] memory prefetch: 启动后台检索 goroutine user=%d chat=%s", userID, chatID)
go s.prefetchMemoryForNextTurn(userID, chatID, userMessage, memoryFuture)
return memoryFuture
}
// prefetchMemoryForNextTurn 后台执行完整记忆检索,将结果渲染后发送到 channel 并写入 Redis。
//
// 职责边界:
// 1. 检索结果渲染为文本后发送到 memoryFuture channel供 Execute/Plan 节点消费);
// 2. 原始 ItemDTO 写入 Redis 预取缓存(供下一轮 Chat 节点消费);
// 3. 检索失败只记日志,不阻断主链路。
func (s *AgentService) prefetchMemoryForNextTurn(userID int, chatID, userMessage string, memoryFuture chan string) {
bgCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
items, err := s.memoryReader.Retrieve(bgCtx, memorymodel.RetrieveRequest{
Query: strings.TrimSpace(userMessage),
UserID: userID,
ConversationID: strings.TrimSpace(chatID),
Limit: agentMemoryRetrieveLimit,
Now: time.Now(),
})
if err != nil {
log.Printf("[WARN] 记忆预取失败 user=%d chat=%s: %v", userID, chatID, err)
s.recordMemoryInject(bgCtx, userID, 0, false, err, "prefetch_retrieve")
return
}
log.Printf("[INFO] memory prefetch: 后台检索完成 user=%d count=%d", userID, len(items))
if len(items) == 0 {
// 1. 检索为空说明该用户当前没有可用记忆,旧缓存已过期;
// 2. 主动清除该用户所有会话的预取缓存,避免过期记忆在下一轮继续注入;
// 3. 清除失败只记日志,不阻断主链路,缓存自然过期也可兜底。
if cacheErr := s.cacheDAO.DeleteMemoryPrefetchCacheByUser(context.Background(), userID); cacheErr != nil {
log.Printf("[WARN] memory prefetch cache clear failed (empty result) user=%d: %v", userID, cacheErr)
}
return
}
// 渲染并发送到 channel供 Execute/Plan 节点消费)。
content := renderMemoryPinnedContentByMode(items, s.memoryCfg.EffectiveInjectRenderMode())
if content != "" {
memoryFuture <- content
}
// 同时写入 Redis 供下一轮 Chat 使用。
if cacheErr := s.cacheDAO.SetMemoryPrefetchCache(context.Background(), userID, chatID, items); cacheErr != nil {
log.Printf("[WARN] 记忆预取缓存写入失败 user=%d: %v", userID, cacheErr)
}
}
// shouldInjectMemoryForInput 判断当前输入是否值得触发一次记忆召回。
//
// 步骤说明:
// 1. 空输入直接跳过;
// 2. 对"好/确认/ok"这类弱语义应答做显式拦截,避免 legacy fallback 在无查询价值时注入一批高分但不相关的旧记忆;
// 3. 其余输入一律放行,优先保证 MVP 可用。
func shouldInjectMemoryForInput(userMessage string) bool {
trimmed := strings.TrimSpace(userMessage)
if trimmed == "" {
return false
}
switch strings.ToLower(trimmed) {
case "好", "好的", "嗯", "嗯嗯", "行", "可以", "收到", "明白", "确认", "取消", "是", "不是", "对", "不对", "ok", "okay", "yes", "no":
return false
default:
return true
}
}
func (s *AgentService) recordMemoryInject(
ctx context.Context,
userID int,
inputCount int,
success bool,
err error,
source string,
) {
if s == nil {
return
}
observer := s.memoryObserver
if observer == nil {
observer = memoryobserve.NewNopObserver()
}
metrics := s.memoryMetrics
if metrics == nil {
metrics = memoryobserve.NewNopMetrics()
}
level := memoryobserve.LevelInfo
if err != nil {
level = memoryobserve.LevelWarn
}
observer.Observe(ctx, memoryobserve.Event{
Level: level,
Component: memoryobserve.ComponentInject,
Operation: memoryobserve.OperationInject,
Fields: map[string]any{
"user_id": userID,
"inject_mode": s.memoryCfg.EffectiveInjectRenderMode(),
"input_count": inputCount,
"rendered_count": inputCount,
"token_budget": 0,
"fallback": false,
"success": success && err == nil,
"error": err,
"error_code": memoryobserve.ClassifyError(err),
"source": source,
},
})
if inputCount > 0 {
metrics.AddCounter(memoryobserve.MetricInjectItemTotal, int64(inputCount), map[string]string{
"inject_mode": s.memoryCfg.EffectiveInjectRenderMode(),
"source": source,
})
}
}

View File

@@ -0,0 +1,155 @@
package sv
import (
"fmt"
"strings"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
)
// renderMemoryPinnedContentByMode 根据配置选择记忆渲染方式。
func renderMemoryPinnedContentByMode(items []memorymodel.ItemDTO, renderMode string) string {
switch memorymodel.NormalizeInjectRenderMode(renderMode) {
case memorymodel.MemoryInjectRenderModeTypedV2:
return RenderTypedMemoryContent(items)
default:
return RenderFlatMemoryContent(items)
}
}
// RenderFlatMemoryContent 生成兼容旧链路的扁平记忆文本。
func RenderFlatMemoryContent(items []memorymodel.ItemDTO) string {
if len(items) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString(agentMemoryIntroLine)
seen := make(map[string]struct{}, len(items))
written := 0
for _, item := range items {
line := buildMemoryPinnedLine(item)
if line == "" {
continue
}
if _, exists := seen[line]; exists {
continue
}
seen[line] = struct{}{}
sb.WriteString("\n- ")
sb.WriteString(line)
written++
}
if written == 0 {
return ""
}
return strings.TrimSpace(sb.String())
}
// RenderTypedMemoryContent 按记忆类型分段渲染。
//
// 步骤化说明:
// 1. 先按固定类型顺序分组,避免同类记忆在 prompt 中被打散;
// 2. 每组内部继续做文本级去重,兜底保护历史脏数据;
// 3. 只输出非空分组,减少 Execute / Plan 阶段的无效噪音。
func RenderTypedMemoryContent(items []memorymodel.ItemDTO) string {
if len(items) == 0 {
return ""
}
type renderSection struct {
Title string
Items []string
}
orderedTypes := []string{
memorymodel.MemoryTypeConstraint,
memorymodel.MemoryTypePreference,
memorymodel.MemoryTypeFact,
}
sectionTitle := map[string]string{
memorymodel.MemoryTypeConstraint: "必守约束",
memorymodel.MemoryTypePreference: "用户偏好",
memorymodel.MemoryTypeFact: "当前话题相关事实",
}
grouped := make(map[string][]string, len(orderedTypes))
seen := make(map[string]struct{}, len(items))
for _, item := range items {
content := buildMemoryRenderContent(item)
if content == "" {
continue
}
dedupKey := strings.TrimSpace(item.MemoryType) + "::" + content
if _, exists := seen[dedupKey]; exists {
continue
}
seen[dedupKey] = struct{}{}
memoryType := memorymodel.NormalizeMemoryType(item.MemoryType)
if memoryType == "" {
memoryType = memorymodel.MemoryTypeFact
}
grouped[memoryType] = append(grouped[memoryType], content)
}
sections := make([]renderSection, 0, len(orderedTypes))
for _, memoryType := range orderedTypes {
contentList := grouped[memoryType]
if len(contentList) == 0 {
continue
}
sections = append(sections, renderSection{
Title: sectionTitle[memoryType],
Items: contentList,
})
}
if len(sections) == 0 {
return ""
}
var sb strings.Builder
sb.WriteString(agentMemoryIntroLine)
for _, section := range sections {
sb.WriteString("\n\n【")
sb.WriteString(section.Title)
sb.WriteString("】")
for _, line := range section.Items {
sb.WriteString("\n- ")
sb.WriteString(line)
}
}
return strings.TrimSpace(sb.String())
}
// buildMemoryPinnedLine 把单条记忆渲染成“[类型] 内容”的简洁格式。
func buildMemoryPinnedLine(item memorymodel.ItemDTO) string {
text := buildMemoryRenderContent(item)
if text == "" {
return ""
}
return fmt.Sprintf("[%s] %s", localizeMemoryType(item.MemoryType), text)
}
func buildMemoryRenderContent(item memorymodel.ItemDTO) string {
text := strings.TrimSpace(item.Content)
if text == "" {
text = strings.TrimSpace(item.Title)
}
return text
}
// localizeMemoryType 把 memory 类型映射成 prompt 里更自然的中文标签。
func localizeMemoryType(memoryType string) string {
switch strings.TrimSpace(memoryType) {
case memorymodel.MemoryTypePreference:
return "偏好"
case memorymodel.MemoryTypeConstraint:
return "约束"
case memorymodel.MemoryTypeFact:
return "事实"
default:
return "记忆"
}
}

View File

@@ -0,0 +1,354 @@
package sv
import (
"context"
"fmt"
"log"
"strings"
"time"
"unicode/utf8"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/respond"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
)
const (
// conversationTitleTimeout 是异步标题生成的超时时间。
// 该过程不在主请求链路里,但仍要设置上限,避免后台协程长时间阻塞。
conversationTitleTimeout = 4 * time.Second
// conversationTitleHistoryLimit 限制参与“生成标题”的最近消息条数。
// 只取最近几轮可减少 token 成本,同时足够概括当前会话主题。
conversationTitleHistoryLimit = 8
// conversationTitleMaxChars 是标题最大字符数(按 rune 计)。
// 控制标题长度,避免前端展示溢出。
conversationTitleMaxChars = 24
// conversationListDefaultPage 是会话列表默认页码。
conversationListDefaultPage = 1
// conversationListDefaultPageSize 是会话列表默认分页大小。
conversationListDefaultPageSize = 20
// conversationListMaxPageSize 是会话列表单页上限,避免超大分页压垮数据库。
conversationListMaxPageSize = 100
// conversationTitleTokenAdjustReason 是“标题异步生成 token 账本调整”原因码。
// 用于日志和后续审计归因。
conversationTitleTokenAdjustReason = "conversation_title_async"
)
const conversationTitlePrompt = `你是 SmartMate 的会话标题生成器。
请基于给定对话内容,生成一个简短中文标题。
要求:
1) 只输出标题文本,不要解释,不要加引号,不要 markdown。
2) 标题长度控制在 8~20 个中文字符,尽量自然、口语化。
3) 不要出现“用户/助手/对话/聊天记录”等泛化词。
4) 如果内容是任务提醒类,标题应体现核心事项。`
// GetConversationMeta 返回单个会话的元信息(供前端轮询/主动拉取)。
// 说明:
// 1) 该接口和 SSE 流解耦,不依赖流式 header
// 2) title 允许为空,前端可根据 has_title 决定是否展示占位文案。
func (s *AgentService) GetConversationMeta(ctx context.Context, userID int, chatID string) (*model.GetConversationMetaResponse, error) {
chat, err := s.repo.GetConversationMeta(ctx, userID, strings.TrimSpace(chatID))
if err != nil {
return nil, err
}
title := ""
if chat.Title != nil {
title = strings.TrimSpace(*chat.Title)
}
return &model.GetConversationMetaResponse{
ConversationID: chat.ChatID,
Title: title,
HasTitle: title != "",
MessageCount: chat.MessageCount,
LastMessageAt: chat.LastMessageAt,
Status: chat.Status,
}, nil
}
// GetConversationList 返回“当前用户会话列表(分页)”。
//
// 职责边界:
// 1. 负责分页参数规范化(默认值、上限保护);
// 2. 负责状态过滤值校验(仅允许 active/archived
// 3. 负责把 DAO 模型转换成前端响应 DTO
// 4. 不负责缓存(由上层架构决策按需引入)。
func (s *AgentService) GetConversationList(ctx context.Context, userID, page, pageSize int, status string) (*model.GetConversationListResponse, error) {
// 1. 先做参数规范化,保证 DAO 层始终收到安全参数。
normalizedPage := normalizeConversationListPage(page)
normalizedPageSize := normalizeConversationListPageSize(pageSize)
// 2. 校验状态过滤器:
// 2.1 允许空值(表示不过滤);
// 2.2 仅接受 active/archived避免把任意字符串下推到 SQL。
normalizedStatus, valid := normalizeConversationStatus(status)
if !valid {
return nil, respond.WrongParamType
}
// 3. 查库拿分页结果。
chats, total, err := s.repo.GetConversationList(ctx, userID, normalizedPage, normalizedPageSize, normalizedStatus)
if err != nil {
return nil, err
}
// 4. 转换为响应 DTO统一 title/has_title 语义,避免前端重复处理空指针。
items := make([]model.GetConversationListItem, 0, len(chats))
for _, chatItem := range chats {
title := ""
if chatItem.Title != nil {
title = strings.TrimSpace(*chatItem.Title)
}
items = append(items, model.GetConversationListItem{
ConversationID: chatItem.ChatID,
Title: title,
HasTitle: title != "",
MessageCount: chatItem.MessageCount,
LastMessageAt: chatItem.LastMessageAt,
Status: chatItem.Status,
CreatedAt: chatItem.CreatedAt,
})
}
// 5. 计算 has_more 语义,前端可直接用于“继续加载”按钮。
hasMore := int64(normalizedPage*normalizedPageSize) < total
return &model.GetConversationListResponse{
List: items,
Page: normalizedPage,
PageSize: normalizedPageSize,
Limit: normalizedPageSize,
Total: total,
HasMore: hasMore,
}, nil
}
func normalizeConversationListPage(page int) int {
if page <= 0 {
return conversationListDefaultPage
}
return page
}
func normalizeConversationListPageSize(pageSize int) int {
if pageSize <= 0 {
return conversationListDefaultPageSize
}
if pageSize > conversationListMaxPageSize {
return conversationListMaxPageSize
}
return pageSize
}
func normalizeConversationStatus(status string) (string, bool) {
normalized := strings.TrimSpace(strings.ToLower(status))
if normalized == "" {
return "", true
}
if normalized == "active" || normalized == "archived" {
return normalized, true
}
return "", false
}
// ensureConversationTitleAsync 在后台异步生成并写入会话标题。
// 设计约束:
// 1) 仅在“标题为空”时尝试生成,避免覆盖用户已确认/已存在标题;
// 2) 失败只记日志,不影响当前聊天链路;
// 3) 标题素材优先来自 Redis 历史(命中快、与当前上下文一致)。
func (s *AgentService) ensureConversationTitleAsync(userID int, chatID string) {
if s == nil || s.repo == nil || s.agentCache == nil {
return
}
if strings.TrimSpace(chatID) == "" {
return
}
go func() {
// 1. 后台任务使用独立超时上下文,避免受请求 ctx 取消影响。
ctx, cancel := context.WithTimeout(context.Background(), conversationTitleTimeout)
defer cancel()
// 2. 先查当前标题;若已存在则直接返回,不做多余模型调用。
title, exists, err := s.repo.GetConversationTitle(ctx, userID, chatID)
if err != nil {
log.Printf("异步生成会话标题失败(读取标题失败) chat=%s err=%v", chatID, err)
return
}
if !exists || strings.TrimSpace(title) != "" {
return
}
// 3. 从 Redis 读取当前会话历史,作为标题生成素材。
history, err := s.agentCache.GetHistory(ctx, chatID)
if err != nil {
log.Printf("异步生成会话标题失败(读取历史失败) chat=%s err=%v", chatID, err)
return
}
if len(history) == 0 {
return
}
// 4. 调用模型生成标题,并做格式清洗。
generated, titleTokens, err := s.generateConversationTitle(ctx, history)
if err != nil {
log.Printf("异步生成会话标题失败(模型生成失败) chat=%s err=%v", chatID, err)
return
}
if strings.TrimSpace(generated) == "" {
return
}
// 4.1 标题生成成功后,把本次异步模型 token 记账:
// 4.1.1 启用 outbox 时走 adjust 事件,异步可靠入账;
// 4.1.2 未启用 outbox 时走同步兜底,直接更新账本。
if titleTokens > 0 {
if s.eventPublisher != nil {
publishErr := eventsvc.PublishChatTokenUsageAdjustRequested(ctx, s.eventPublisher, model.ChatTokenUsageAdjustPayload{
UserID: userID,
ConversationID: chatID,
TokensDelta: titleTokens,
Reason: conversationTitleTokenAdjustReason,
TriggeredAt: time.Now(),
})
if publishErr != nil {
log.Printf("异步标题 token 记账事件发布失败 chat=%s tokens=%d err=%v", chatID, titleTokens, publishErr)
}
} else {
if adjustErr := s.repo.AdjustTokenUsage(ctx, userID, chatID, titleTokens, ""); adjustErr != nil {
log.Printf("异步标题 token 同步记账失败 chat=%s tokens=%d err=%v", chatID, titleTokens, adjustErr)
}
}
}
// 5. 只在标题仍为空时写入,保证并发幂等。
if err = s.repo.UpdateConversationTitleIfEmpty(ctx, userID, chatID, generated); err != nil {
log.Printf("异步生成会话标题失败(写库失败) chat=%s err=%v", chatID, err)
}
}()
}
// generateConversationTitle 使用聊天模型从近期历史生成标题。
func (s *AgentService) generateConversationTitle(ctx context.Context, history []*schema.Message) (string, int, error) {
modelInst := s.pickTitleModel()
if modelInst == nil {
return "", 0, fmt.Errorf("标题生成模型未初始化")
}
// 1. 只取最近 N 条,降低 token 并聚焦当前会话主题。
trimmed := tailMessages(history, conversationTitleHistoryLimit)
prompt := buildConversationTitleUserPrompt(trimmed)
if strings.TrimSpace(prompt) == "" {
return "", 0, fmt.Errorf("缺少可用历史内容")
}
messages := []*schema.Message{
schema.SystemMessage(conversationTitlePrompt),
schema.UserMessage(prompt),
}
// 2. 标题生成属于结构化短输出,关闭 thinking 并限制 tokens降低延迟与发散。
resp, err := modelInst.GenerateText(ctx, messages, llmservice.GenerateOptions{
Temperature: 0.2,
MaxTokens: 40,
Thinking: llmservice.ThinkingModeDisabled,
})
if err != nil {
return "", 0, err
}
if resp == nil {
return "", 0, fmt.Errorf("标题生成模型返回为空")
}
// 2.1 标题链路的 token 从模型响应 usage 中提取;缺失则按 0 处理,不影响主流程。
titleTokens := 0
if resp.Usage != nil {
titleTokens = normalizeUsageTotal(
resp.Usage.TotalTokens,
resp.Usage.PromptTokens,
resp.Usage.CompletionTokens,
)
}
return normalizeConversationTitle(resp.Text), titleTokens, nil
}
// pickTitleModel 选择用于标题生成的模型。
// 优先 Lite成本低、速度快Lite 不可用时回退 Pro。
func (s *AgentService) pickTitleModel() *llmservice.Client {
if s == nil || s.llmService == nil {
return nil
}
if client := s.llmService.LiteClient(); client != nil {
return client
}
return s.llmService.ProClient()
}
// buildConversationTitleUserPrompt 把消息历史拼成可读文本供模型总结。
func buildConversationTitleUserPrompt(messages []*schema.Message) string {
var builder strings.Builder
builder.WriteString("请根据以下对话内容生成标题:\n")
for _, msg := range messages {
if msg == nil {
continue
}
content := strings.TrimSpace(msg.Content)
if content == "" {
continue
}
// 单条消息做长度裁剪,避免超长回复把标题主题“冲淡”。
content = trimRunes(content, 80)
role := "助手"
if strings.EqualFold(strings.TrimSpace(string(msg.Role)), string(schema.User)) {
role = "用户"
}
builder.WriteString(role)
builder.WriteString("")
builder.WriteString(content)
builder.WriteString("\n")
}
return strings.TrimSpace(builder.String())
}
func tailMessages(messages []*schema.Message, limit int) []*schema.Message {
if limit <= 0 || len(messages) <= limit {
return messages
}
return messages[len(messages)-limit:]
}
// normalizeConversationTitle 清洗模型输出,确保可直接展示/存库。
func normalizeConversationTitle(raw string) string {
text := strings.TrimSpace(raw)
if text == "" {
return ""
}
if idx := strings.Index(text, "\n"); idx >= 0 {
text = strings.TrimSpace(text[:idx])
}
text = strings.Trim(text, "\"'“”‘’《》[]【】")
text = strings.TrimPrefix(text, "标题:")
text = strings.TrimPrefix(text, "标题:")
text = strings.TrimSpace(text)
text = trimRunes(text, conversationTitleMaxChars)
return strings.TrimSpace(text)
}
func trimRunes(text string, limit int) string {
if limit <= 0 || text == "" {
return ""
}
if utf8.RuneCountInString(text) <= limit {
return text
}
runes := []rune(text)
return string(runes[:limit])
}
// GetContextStats 获取指定会话的上下文窗口 token 分布统计。
func (s *AgentService) GetContextStats(ctx context.Context, userID int, chatID string) (string, error) {
return s.repo.LoadContextTokenStats(ctx, userID, chatID)
}

View File

@@ -0,0 +1,130 @@
package sv
import (
"context"
"errors"
"log"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/respond"
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
)
// GetSchedulePlanPreview 按 conversation_id 读取结构化排程预览。
//
// 职责边界:
// 1. 负责参数归一化、缓存优先读取、会话归属校验和 DB 兜底。
// 2. 负责把缓存/快照 DTO 转成接口响应 DTO。
// 3. 不负责触发排程,不负责补算结果,也不负责消息链路落库。
func (s *AgentService) GetSchedulePlanPreview(ctx context.Context, userID int, chatID string) (*model.GetSchedulePlanPreviewResponse, error) {
// 1. 先校验会话参数,避免无效请求打到缓存或数据库。
normalizedChatID := strings.TrimSpace(chatID)
if normalizedChatID == "" {
return nil, respond.MissingParam
}
if s == nil {
return nil, errors.New("agent service is not initialized")
}
// 2. 优先查 Redis。
if s.cacheDAO != nil {
preview, err := s.cacheDAO.GetSchedulePlanPreviewFromCache(ctx, userID, normalizedChatID)
if err != nil {
return nil, err
}
if preview != nil {
if preview.UserID > 0 && preview.UserID != userID {
return nil, respond.SchedulePlanPreviewNotFound
}
plans := agentshared.CloneWeekSchedules(preview.CandidatePlans)
if plans == nil {
plans = make([]model.UserWeekSchedule, 0)
}
return &model.GetSchedulePlanPreviewResponse{
ConversationID: normalizedChatID,
TraceID: strings.TrimSpace(preview.TraceID),
Summary: strings.TrimSpace(preview.Summary),
CandidatePlans: plans,
HybridEntries: agentshared.CloneHybridEntries(preview.HybridEntries),
TaskClassIDs: preview.TaskClassIDs,
GeneratedAt: preview.GeneratedAt,
}, nil
}
}
// 3. Redis 未命中时回源 MySQL。
if s.repo != nil {
snapshot, err := s.repo.GetScheduleStateSnapshot(ctx, userID, normalizedChatID)
if err != nil {
return nil, err
}
if snapshot != nil {
response := snapshotToSchedulePlanPreviewResponse(snapshot)
if s.cacheDAO != nil {
cachePreview := snapshotToSchedulePlanPreviewCache(snapshot)
if setErr := s.cacheDAO.SetSchedulePlanPreviewToCache(ctx, userID, normalizedChatID, cachePreview); setErr != nil {
log.Printf("回填排程预览缓存失败 chat_id=%s: %v", normalizedChatID, setErr)
}
}
return response, nil
}
}
return nil, respond.SchedulePlanPreviewNotFound
}
// snapshotToSchedulePlanPreviewCache 把 MySQL 快照映射成 Redis 预览缓存结构。
func snapshotToSchedulePlanPreviewCache(snapshot *model.SchedulePlanStateSnapshot) *model.SchedulePlanPreviewCache {
if snapshot == nil {
return nil
}
generatedAt := snapshot.UpdatedAt
if generatedAt.IsZero() {
generatedAt = time.Now()
}
return &model.SchedulePlanPreviewCache{
UserID: snapshot.UserID,
ConversationID: snapshot.ConversationID,
TraceID: strings.TrimSpace(snapshot.TraceID),
Summary: schedulePlanSummaryOrFallback(strings.TrimSpace(snapshot.FinalSummary)),
CandidatePlans: agentshared.CloneWeekSchedules(snapshot.CandidatePlans),
TaskClassIDs: append([]int(nil), snapshot.TaskClassIDs...),
HybridEntries: agentshared.CloneHybridEntries(snapshot.HybridEntries),
AllocatedItems: agentshared.CloneTaskClassItems(snapshot.AllocatedItems),
GeneratedAt: generatedAt,
}
}
// snapshotToSchedulePlanPreviewResponse 把 MySQL 快照映射成查询接口响应结构。
func snapshotToSchedulePlanPreviewResponse(snapshot *model.SchedulePlanStateSnapshot) *model.GetSchedulePlanPreviewResponse {
if snapshot == nil {
return nil
}
plans := agentshared.CloneWeekSchedules(snapshot.CandidatePlans)
if plans == nil {
plans = make([]model.UserWeekSchedule, 0)
}
generatedAt := snapshot.UpdatedAt
if generatedAt.IsZero() {
generatedAt = time.Now()
}
return &model.GetSchedulePlanPreviewResponse{
ConversationID: snapshot.ConversationID,
TraceID: strings.TrimSpace(snapshot.TraceID),
Summary: schedulePlanSummaryOrFallback(strings.TrimSpace(snapshot.FinalSummary)),
CandidatePlans: plans,
HybridEntries: agentshared.CloneHybridEntries(snapshot.HybridEntries),
TaskClassIDs: snapshot.TaskClassIDs,
GeneratedAt: generatedAt,
}
}
// schedulePlanSummaryOrFallback 统一收口排程摘要兜底文案,避免各处重复维护默认值。
func schedulePlanSummaryOrFallback(summary string) string {
if strings.TrimSpace(summary) == "" {
return "排程流程已完成,但未生成结果摘要。"
}
return summary
}

View File

@@ -0,0 +1,143 @@
package sv
import (
"context"
"errors"
"fmt"
"log"
"strings"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/respond"
agentconv "github.com/LoveLosita/smartflow/backend/services/agent/conv"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
)
// SaveScheduleState 处理前端拖拽后的“暂存排程状态”请求。
//
// 职责边界:
// 1. 负责把前端绝对坐标写回当前会话的 ScheduleState 快照;
// 2. 负责刷新 Redis 预览缓存,保证后续预览读取与最新拖拽一致;
// 3. 不负责写 MySQL 正式课表,也不负责触发新一轮 graph 执行。
func (s *AgentService) SaveScheduleState(
ctx context.Context,
userID int,
conversationID string,
items []model.SaveScheduleStatePlacedItem,
) error {
// 1. 加载会话快照;没有快照说明当前会话不在可微调窗口内。
if s.agentStateStore == nil {
return errors.New("agent state store 未初始化")
}
snapshot, ok, err := s.agentStateStore.Load(ctx, conversationID)
if err != nil {
return fmt.Errorf("加载快照失败: %w", err)
}
if !ok || snapshot == nil || snapshot.ScheduleState == nil {
return respond.ScheduleStateSnapshotNotFound
}
// 2. 做会话归属校验,防止跨用户写入别人的会话快照。
if snapshot.RuntimeState != nil {
cs := snapshot.RuntimeState.EnsureCommonState()
if cs.UserID != 0 && cs.UserID != userID {
return fmt.Errorf("会话归属校验失败:快照 user_id=%d请求 user_id=%d", cs.UserID, userID)
}
}
// 3. 将前端绝对坐标应用到内存态 ScheduleState。
// 3.1 这里只修改 source=task_item 任务;
// 3.2 source=event 课程位保持不变;
// 3.3 坐标非法时由 ApplyPlacedItems 返回明确错误。
if err := agentconv.ApplyPlacedItems(snapshot.ScheduleState, items); err != nil {
return err
}
// 4. 先写回运行态快照,确保“拖拽后的状态”成为后续读链路真值。
if err := s.agentStateStore.Save(ctx, conversationID, snapshot); err != nil {
return fmt.Errorf("保存快照失败: %w", err)
}
// 5. 再刷新预览缓存,避免 GetSchedulePlanPreview 读到拖拽前旧缓存。
if err := s.refreshSchedulePreviewAfterStateSave(ctx, userID, conversationID, snapshot); err != nil {
return err
}
log.Printf("[INFO] schedule state saved chat=%s user=%d item_count=%d", conversationID, userID, len(items))
return nil
}
// refreshSchedulePreviewAfterStateSave 按“最新快照”重建并覆盖 Redis 预览缓存。
//
// 职责边界:
// 1. 只处理 Redis 预览缓存,不负责 MySQL 快照;
// 2. 以最新 ScheduleState 为准,修复“预览读到旧拖拽结果”的回滚问题;
// 3. 尽量保留旧预览中的 trace_id/candidate_plans避免前端字段突变。
func (s *AgentService) refreshSchedulePreviewAfterStateSave(
ctx context.Context,
userID int,
conversationID string,
snapshot *agentmodel.AgentStateSnapshot,
) error {
// 1. 依赖不完整时直接跳过,避免写入不完整缓存。
if s == nil || s.cacheDAO == nil || snapshot == nil || snapshot.ScheduleState == nil {
return nil
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return nil
}
// 2. 从运行态提取 task_class_ids保证预览过滤口径与会话一致。
taskClassIDs := make([]int, 0)
if snapshot.RuntimeState != nil {
flowState := snapshot.RuntimeState.EnsureCommonState()
taskClassIDs = append(taskClassIDs, flowState.TaskClassIDs...)
}
// 3. 基于最新 ScheduleState 生成预览主干hybrid_entries 为最新真值)。
preview := agentconv.ScheduleStateToPreview(
snapshot.ScheduleState,
userID,
normalizedConversationID,
taskClassIDs,
"",
)
if preview == nil {
return nil
}
// 4. 合并旧预览里需要保留的字段,避免前端依赖字段突然丢失。
existingPreview, err := s.cacheDAO.GetSchedulePlanPreviewFromCache(ctx, userID, normalizedConversationID)
if err != nil {
return fmt.Errorf("读取排程预览缓存失败: %w", err)
}
if existingPreview != nil {
preview.TraceID = strings.TrimSpace(existingPreview.TraceID)
if len(existingPreview.CandidatePlans) > 0 {
preview.CandidatePlans = agentshared.CloneWeekSchedules(existingPreview.CandidatePlans)
}
if len(existingPreview.AllocatedItems) > 0 {
preview.AllocatedItems = agentshared.CloneTaskClassItems(existingPreview.AllocatedItems)
}
if len(preview.TaskClassIDs) == 0 && len(existingPreview.TaskClassIDs) > 0 {
preview.TaskClassIDs = append([]int(nil), existingPreview.TaskClassIDs...)
}
}
if preview.CandidatePlans == nil {
preview.CandidatePlans = make([]model.UserWeekSchedule, 0)
}
if preview.HybridEntries == nil {
preview.HybridEntries = make([]model.HybridScheduleEntry, 0)
}
if preview.TaskClassIDs == nil {
preview.TaskClassIDs = make([]int, 0)
}
// 5. 回写 Redis 预览缓存;失败则返回错误,让前端可感知并重试。
if err := s.cacheDAO.SetSchedulePlanPreviewToCache(ctx, userID, normalizedConversationID, preview); err != nil {
return fmt.Errorf("刷新排程预览缓存失败: %w", err)
}
return nil
}

View File

@@ -0,0 +1,148 @@
package sv
import (
"context"
"io"
"strings"
"time"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
)
// streamChatFallback 是 graph 执行失败时的降级流式聊天。
// 内联了旧 agentchat.StreamChat 的核心逻辑,不再依赖 agent/ 包。
func (s *AgentService) streamChatFallback(
ctx context.Context,
llm *llmservice.Client,
modelName string,
userInput string,
ifThinking bool,
chatHistory []*schema.Message,
outChan chan<- string,
reasoningStartAt *time.Time,
userID int,
chatID string,
) (string, string, int, *schema.TokenUsage, error) {
messages := make([]*schema.Message, 0, len(chatHistory)+2)
messages = append(messages, schema.SystemMessage(agentprompt.SystemPrompt))
if len(chatHistory) > 0 {
messages = append(messages, chatHistory...)
}
messages = append(messages, schema.UserMessage(userInput))
if strings.TrimSpace(modelName) == "" {
modelName = "smartflow-worker"
}
requestID := "chatcmpl-" + uuid.NewString()
created := time.Now().Unix()
firstChunk := true
chunkEmitter := agentstream.NewChunkEmitter(agentstream.NewSSEPayloadEmitter(outChan), requestID, modelName, created)
reasoningSummaryClient := s.llmService.LiteClient()
if reasoningSummaryClient == nil {
reasoningSummaryClient = s.llmService.ProClient()
}
chunkEmitter.SetReasoningSummaryFunc(s.makeReasoningSummaryFunc(reasoningSummaryClient))
chunkEmitter.SetExtraEventHook(func(extra *agentstream.OpenAIChunkExtra) {
s.persistAgentTimelineExtraEvent(context.Background(), userID, chatID, extra)
})
reasoningDigestor, digestorErr := chunkEmitter.NewReasoningDigestor(ctx, "fallback.speak", "fallback")
if digestorErr != nil {
return "", "", 0, nil, digestorErr
}
digestorClosed := false
closeDigestor := func() {
if reasoningDigestor == nil || digestorClosed {
return
}
digestorClosed = true
_ = reasoningDigestor.Close(ctx)
}
defer closeDigestor()
var localReasoningStartAt *time.Time
if reasoningStartAt != nil && !reasoningStartAt.IsZero() {
startCopy := reasoningStartAt.In(time.Local)
localReasoningStartAt = &startCopy
}
var reasoningEndAt *time.Time
thinkingMode := llmservice.ThinkingModeDisabled
if ifThinking {
thinkingMode = llmservice.ThinkingModeEnabled
}
reader, err := llm.Stream(ctx, messages, llmservice.GenerateOptions{
Thinking: thinkingMode,
})
if err != nil {
return "", "", 0, nil, err
}
defer reader.Close()
var fullText strings.Builder
var tokenUsage *schema.TokenUsage
for {
chunk, recvErr := reader.Recv()
if recvErr == io.EOF {
break
}
if recvErr != nil {
return "", "", 0, nil, recvErr
}
if chunk != nil && chunk.ResponseMeta != nil && chunk.ResponseMeta.Usage != nil {
tokenUsage = agentstream.MergeUsage(tokenUsage, chunk.ResponseMeta.Usage)
}
if chunk != nil {
if strings.TrimSpace(chunk.ReasoningContent) != "" && localReasoningStartAt == nil {
now := time.Now()
localReasoningStartAt = &now
}
if strings.TrimSpace(chunk.Content) != "" && localReasoningStartAt != nil && reasoningEndAt == nil {
now := time.Now()
reasoningEndAt = &now
}
// 1. fallback 链路同样不能透传 raw reasoning_content
// 2. 只把 reasoning 喂给摘要器,正文出现时立即关门丢弃后续摘要。
if strings.TrimSpace(chunk.ReasoningContent) != "" && reasoningDigestor != nil {
reasoningDigestor.Append(chunk.ReasoningContent)
}
if chunk.Content != "" {
if reasoningDigestor != nil {
reasoningDigestor.MarkContentStarted()
}
if emitErr := chunkEmitter.EmitAssistantText("fallback.speak", "fallback", chunk.Content, firstChunk); emitErr != nil {
return "", "", 0, nil, emitErr
}
fullText.WriteString(chunk.Content)
firstChunk = false
}
}
}
closeDigestor()
if finishErr := chunkEmitter.EmitFinish("fallback.speak", "fallback"); finishErr != nil {
return "", "", 0, nil, finishErr
}
if doneErr := chunkEmitter.EmitDone(); doneErr != nil {
return "", "", 0, nil, doneErr
}
reasoningDurationSeconds := 0
if localReasoningStartAt != nil {
if reasoningEndAt == nil {
now := time.Now()
reasoningEndAt = &now
}
if reasoningEndAt.After(*localReasoningStartAt) {
reasoningDurationSeconds = int(reasoningEndAt.Sub(*localReasoningStartAt) / time.Second)
}
}
return fullText.String(), "", reasoningDurationSeconds, tokenUsage, nil
}

View File

@@ -0,0 +1,174 @@
package sv
import (
"context"
"errors"
"sort"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/respond"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
func (s *AgentService) QueryTasksForTool(ctx context.Context, req agentmodel.TaskQueryRequest) ([]agentmodel.TaskQueryTaskRecord, error) {
if req.UserID <= 0 {
return nil, errors.New("invalid user_id in task query")
}
var tasks []model.Task
var err error
// 优先使用统一提升链路(含缓存读取 + 读时派生 + outbox 异步落库)。
if s.GetTasksWithUrgencyPromotionFunc != nil {
tasks, err = s.GetTasksWithUrgencyPromotionFunc(ctx, req.UserID)
if err != nil {
if errors.Is(err, respond.UserTasksEmpty) {
return make([]agentmodel.TaskQueryTaskRecord, 0), nil
}
return nil, err
}
} else {
// 回退:未注入时走旧的 taskRepo 直接读取(无缓存、无持久化)。
if s.taskRepo == nil {
return nil, errors.New("task repository is nil")
}
tasks, err = s.taskRepo.GetTasksByUserID(req.UserID)
if err != nil {
if errors.Is(err, respond.UserTasksEmpty) {
return make([]agentmodel.TaskQueryTaskRecord, 0), nil
}
return nil, err
}
now := time.Now()
for i := range tasks {
applyReadTimeUrgencyPromotion(&tasks[i], now)
}
}
// 过滤、排序、截断。
filtered := make([]model.Task, 0, len(tasks))
for _, task := range tasks {
if !taskMatchesQueryFilter(task, req) {
continue
}
filtered = append(filtered, task)
}
sortTasksForQuery(filtered, req)
if req.Limit > 0 && len(filtered) > req.Limit {
filtered = filtered[:req.Limit]
}
records := make([]agentmodel.TaskQueryTaskRecord, 0, len(filtered))
for _, task := range filtered {
records = append(records, agentmodel.TaskQueryTaskRecord{
ID: task.ID,
Title: task.Title,
PriorityGroup: task.Priority,
EstimatedSections: model.NormalizeEstimatedSections(&task.EstimatedSections),
IsCompleted: task.IsCompleted,
DeadlineAt: task.DeadlineAt,
UrgencyThresholdAt: task.UrgencyThresholdAt,
})
}
return records, nil
}
func applyReadTimeUrgencyPromotion(task *model.Task, now time.Time) {
if task == nil || task.IsCompleted || task.UrgencyThresholdAt == nil {
return
}
if task.UrgencyThresholdAt.After(now) {
return
}
switch task.Priority {
case 2:
task.Priority = 1
case 4:
task.Priority = 3
}
}
func taskMatchesQueryFilter(task model.Task, req agentmodel.TaskQueryRequest) bool {
if !req.IncludeCompleted && task.IsCompleted {
return false
}
if req.Quadrant != nil && task.Priority != *req.Quadrant {
return false
}
keyword := strings.TrimSpace(req.Keyword)
if keyword != "" && !strings.Contains(strings.ToLower(task.Title), strings.ToLower(keyword)) {
return false
}
if req.DeadlineAfter != nil {
if task.DeadlineAt == nil || task.DeadlineAt.Before(*req.DeadlineAfter) {
return false
}
}
if req.DeadlineBefore != nil {
if task.DeadlineAt == nil || task.DeadlineAt.After(*req.DeadlineBefore) {
return false
}
}
return true
}
func sortTasksForQuery(tasks []model.Task, req agentmodel.TaskQueryRequest) {
if len(tasks) <= 1 {
return
}
order := strings.ToLower(strings.TrimSpace(req.Order))
if order != "desc" {
order = "asc"
}
sortBy := strings.ToLower(strings.TrimSpace(req.SortBy))
if sortBy == "" {
sortBy = "deadline"
}
sort.SliceStable(tasks, func(i, j int) bool {
left := tasks[i]
right := tasks[j]
switch sortBy {
case "priority":
if left.Priority != right.Priority {
if order == "desc" {
return left.Priority > right.Priority
}
return left.Priority < right.Priority
}
return left.ID > right.ID
case "id":
if order == "desc" {
return left.ID > right.ID
}
return left.ID < right.ID
default:
if less, decided := compareDeadline(left.DeadlineAt, right.DeadlineAt, order); decided {
return less
}
return left.ID > right.ID
}
})
}
func compareDeadline(left, right *time.Time, order string) (less bool, decided bool) {
if left == nil && right == nil {
return false, false
}
if left == nil && right != nil {
return false, true
}
if left != nil && right == nil {
return true, true
}
if left.Equal(*right) {
return false, false
}
if order == "desc" {
return left.After(*right), true
}
return left.Before(*right), true
}

View File

@@ -0,0 +1,663 @@
package sv
import (
"context"
"encoding/json"
"errors"
"log"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/model"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
"gorm.io/gorm"
)
// GetConversationTimeline 返回指定会话的统一时间线(正文+卡片)列表。
//
// 职责边界:
// 1. 只读,不修改会话状态;
// 2. 顺序以 seq 为准,保证刷新后可稳定重建;
// 3. 优先读 Redis 时间线缓存,未命中再回源 MySQL。
func (s *AgentService) GetConversationTimeline(ctx context.Context, userID int, chatID string) ([]model.GetConversationTimelineItem, error) {
normalizedChatID := normalizeConversationID(chatID)
if userID <= 0 || strings.TrimSpace(normalizedChatID) == "" {
return nil, gorm.ErrRecordNotFound
}
exists, err := s.repo.IfChatExists(ctx, userID, normalizedChatID)
if err != nil {
return nil, err
}
if !exists {
return nil, gorm.ErrRecordNotFound
}
if s.cacheDAO != nil {
cacheItems, cacheErr := s.cacheDAO.GetConversationTimelineFromCache(ctx, userID, normalizedChatID)
if cacheErr == nil && cacheItems != nil {
return normalizeConversationTimelineItems(cacheItems), nil
}
if cacheErr != nil {
log.Printf("读取会话时间线缓存失败 user=%d chat=%s err=%v", userID, normalizedChatID, cacheErr)
}
}
events, err := s.repo.ListConversationTimelineEvents(ctx, userID, normalizedChatID)
if err != nil {
return nil, err
}
items := buildConversationTimelineItemsFromDB(events)
if s.cacheDAO != nil {
if err := s.cacheDAO.SetConversationTimelineToCache(ctx, userID, normalizedChatID, items); err != nil {
log.Printf("回填会话时间线缓存失败 user=%d chat=%s err=%v", userID, normalizedChatID, err)
}
if len(items) > 0 {
if err := s.cacheDAO.SetConversationTimelineSeq(ctx, userID, normalizedChatID, items[len(items)-1].Seq); err != nil {
log.Printf("回填会话时间线 seq 失败 user=%d chat=%s err=%v", userID, normalizedChatID, err)
}
}
}
return normalizeConversationTimelineItems(items), nil
}
// appendConversationTimelineEvent 统一追加单条时间线事件到 Redis + outbox。
//
// 步骤化说明:
// 1. 先分配同会话内单调递增的 seq优先走 RedisRedis 不可用时回退 DB
// 2. 再把事件同步追加到 Redis timeline cache保证刷新前的用户体验连续
// 3. 最后发布 outbox 事件异步落 MySQL与 chat history 的可靠落库方式对齐;
// 4. 未注入 eventPublisher 时走同步 MySQL fallback方便本地极简环境启动。
func (s *AgentService) appendConversationTimelineEvent(
ctx context.Context,
userID int,
chatID string,
kind string,
role string,
content string,
payload map[string]any,
tokensConsumed int,
) (int64, error) {
if s == nil || s.repo == nil {
return 0, errors.New("agent service is not initialized")
}
if ctx == nil {
ctx = context.Background()
}
normalizedChatID := strings.TrimSpace(chatID)
normalizedRole := strings.TrimSpace(role)
normalizedKind := canonicalizeTimelineKind(kind, normalizedRole)
normalizedContent := strings.TrimSpace(content)
if userID <= 0 || normalizedChatID == "" || normalizedKind == "" {
return 0, errors.New("invalid timeline event identity")
}
normalizedContent, normalizedPayload, shouldPersist := normalizeConversationTimelinePersistMaterial(normalizedKind, normalizedContent, payload)
if !shouldPersist {
return 0, nil
}
seq, err := s.nextConversationTimelineSeq(ctx, userID, normalizedChatID)
if err != nil {
return 0, err
}
persistPayload := (model.ChatTimelinePersistPayload{
UserID: userID,
ConversationID: normalizedChatID,
Seq: seq,
Kind: normalizedKind,
Role: normalizedRole,
Content: normalizedContent,
PayloadJSON: marshalTimelinePayloadJSON(normalizedPayload),
TokensConsumed: tokensConsumed,
}).Normalize()
if s.eventPublisher != nil {
now := time.Now()
// 1. 先写 Redis timeline cache让刷新前的本地态和下一轮上下文都能立即看到这条事件。
// 2. 再发布 outbox 事件,与 chat history 保持相同的“入队成功即返回”语义。
// 3. 若 outbox 发布失败,这里返回 error 交给上层处理,不在本方法里偷偷回退成同步写库。
s.appendConversationTimelineCacheNonBlocking(
ctx,
userID,
normalizedChatID,
buildConversationTimelineCacheItem(0, seq, normalizedKind, normalizedRole, normalizedContent, normalizedPayload, tokensConsumed, &now),
)
if err := eventsvc.PublishAgentTimelinePersistRequested(ctx, s.eventPublisher, persistPayload); err != nil {
return 0, err
}
return seq, nil
}
return s.appendConversationTimelineEventSync(ctx, userID, normalizedChatID, persistPayload, normalizedPayload)
}
// appendConversationTimelineEventSync 在未启用 outbox 时同步写 MySQL。
//
// 步骤化说明:
// 1. 本方法只作为 eventPublisher 为空时的降级路径,保证本地环境不依赖总线;
// 2. 若 seq 唯一键冲突,读取 DB 最大 seq 后补一个新序号,语义与 outbox 消费者保持一致;
// 3. MySQL 写入成功后再追加 Redis cache让缓存拿到数据库生成的 id/created_at。
func (s *AgentService) appendConversationTimelineEventSync(
ctx context.Context,
userID int,
chatID string,
persistPayload model.ChatTimelinePersistPayload,
payload map[string]any,
) (int64, error) {
eventID, eventCreatedAt, err := s.repo.SaveConversationTimelineEvent(ctx, persistPayload)
if err != nil {
// 1. 这里的冲突通常来自 Redis seq key 过期或落后于 DB。
// 2. 由于当前是同步写库链路,可以直接读取 DB 当前最大 seq 并补一个新序号。
// 3. 若重试后仍失败,则把数据库错误原样抛给上层,避免悄悄吞掉真实问题。
if !model.IsTimelineSeqConflictError(err) {
return 0, err
}
maxSeq, seqErr := s.repo.GetConversationTimelineMaxSeq(ctx, userID, chatID)
if seqErr != nil {
return 0, seqErr
}
persistPayload.Seq = maxSeq + 1
eventID, eventCreatedAt, err = s.repo.SaveConversationTimelineEvent(ctx, persistPayload)
if err != nil {
return 0, err
}
if s.cacheDAO != nil {
if setErr := s.cacheDAO.SetConversationTimelineSeq(ctx, userID, chatID, persistPayload.Seq); setErr != nil {
log.Printf("回填时间线 seq 到 Redis 失败 user=%d chat=%s seq=%d err=%v", userID, chatID, persistPayload.Seq, setErr)
}
}
}
s.appendConversationTimelineCacheNonBlocking(
ctx,
userID,
chatID,
buildConversationTimelineCacheItem(
eventID,
persistPayload.Seq,
persistPayload.Kind,
persistPayload.Role,
persistPayload.Content,
payload,
persistPayload.TokensConsumed,
eventCreatedAt,
),
)
return persistPayload.Seq, nil
}
// appendConversationTimelineCacheNonBlocking 尽力把单条 timeline 事件追加到 Redis。
//
// 步骤化说明:
// 1. 缓存失败不能反向影响主链路,因为 MySQL/outbox 才是最终可靠写入;
// 2. 这里统一记录错误日志,方便排查 Redis 不可用或 payload 序列化问题;
// 3. item 由调用方提前标准化,本方法不再二次裁剪业务字段。
func (s *AgentService) appendConversationTimelineCacheNonBlocking(
ctx context.Context,
userID int,
chatID string,
item model.GetConversationTimelineItem,
) {
if s.cacheDAO == nil {
return
}
if err := s.cacheDAO.AppendConversationTimelineEventToCache(ctx, userID, chatID, item); err != nil {
log.Printf("追加时间线缓存失败 user=%d chat=%s seq=%d kind=%s err=%v", userID, chatID, item.Seq, item.Kind, err)
}
}
// nextConversationTimelineSeq 负责分配一条新的 timeline seq。
//
// 步骤化说明:
// 1. 优先走 Redis INCR避免所有事件都串行依赖 MySQL
// 2. 再用 DB MAX(seq) 做一次自检尽量把“Redis key 过期/落后”在写入前提前修正;
// 3. 若 Redis 不可用,则直接回退到 DB MAX(seq)+1并把结果尽力回填回 Redis。
func (s *AgentService) nextConversationTimelineSeq(ctx context.Context, userID int, chatID string) (int64, error) {
if s == nil || s.repo == nil {
return 0, errors.New("agent service is not initialized")
}
if ctx == nil {
ctx = context.Background()
}
normalizedChatID := strings.TrimSpace(chatID)
if userID <= 0 || normalizedChatID == "" {
return 0, errors.New("invalid timeline seq identity")
}
if s.cacheDAO == nil {
return s.nextConversationTimelineSeqFromDB(ctx, userID, normalizedChatID)
}
candidateSeq, err := s.cacheDAO.IncrConversationTimelineSeq(ctx, userID, normalizedChatID)
if err != nil {
log.Printf("分配时间线 seq 时 Redis INCR 失败,回退 DB user=%d chat=%s err=%v", userID, normalizedChatID, err)
return s.nextConversationTimelineSeqFromDB(ctx, userID, normalizedChatID)
}
// 1. Redis key 缺失时INCR 常会从 1 重新开始,容易和已有 DB 记录撞 seq。
// 2. 这里额外对照一次 DB 最大 seq把明显落后的顺序号提前修正降低 outbox 消费时的补 seq 概率。
// 3. 该自检不会看到“尚未消费到 MySQL 的新 outbox 事件”,因此真正的极端并发兜底仍由消费者承担。
maxSeq, err := s.repo.GetConversationTimelineMaxSeq(ctx, userID, normalizedChatID)
if err != nil {
return 0, err
}
if candidateSeq > maxSeq {
return candidateSeq, nil
}
repairedSeq := maxSeq + 1
if err = s.cacheDAO.SetConversationTimelineSeq(ctx, userID, normalizedChatID, repairedSeq); err != nil {
log.Printf("修正时间线 seq 到 Redis 失败 user=%d chat=%s seq=%d err=%v", userID, normalizedChatID, repairedSeq, err)
}
return repairedSeq, nil
}
func (s *AgentService) nextConversationTimelineSeqFromDB(ctx context.Context, userID int, chatID string) (int64, error) {
maxSeq, err := s.repo.GetConversationTimelineMaxSeq(ctx, userID, chatID)
if err != nil {
return 0, err
}
nextSeq := maxSeq + 1
if s.cacheDAO != nil {
if setErr := s.cacheDAO.SetConversationTimelineSeq(ctx, userID, chatID, nextSeq); setErr != nil {
log.Printf("回填时间线 seq 到 Redis 失败 user=%d chat=%s seq=%d err=%v", userID, chatID, nextSeq, setErr)
}
}
return nextSeq, nil
}
// normalizeConversationTimelinePersistMaterial 负责把 timeline 原始输入收敛成“可缓存 + 可持久化”的口径。
//
// 职责边界:
// 1. 对普通事件只做浅拷贝,避免调用方后续继续改 map 影响已入队 payload
// 2. 对 thinking_summary 只保留 detail_summary 与必要 metadata明确剔除 short_summary
// 3. 若 thinking_summary 最终没有 detail_summary则返回 shouldPersist=false仅保留实时 SSE 展示,不进入 timeline。
func normalizeConversationTimelinePersistMaterial(kind string, content string, payload map[string]any) (string, map[string]any, bool) {
normalizedKind := strings.ToLower(strings.TrimSpace(kind))
normalizedContent := strings.TrimSpace(content)
if normalizedKind != model.AgentTimelineKindThinkingSummary {
return normalizedContent, cloneTimelinePayload(payload), true
}
return sanitizeThinkingSummaryPersistMaterial(normalizedContent, payload)
}
func sanitizeThinkingSummaryPersistMaterial(content string, payload map[string]any) (string, map[string]any, bool) {
detailSummary := readTimelinePayloadString(payload, "detail_summary")
if detailSummary == "" {
detailSummary = strings.TrimSpace(content)
}
if detailSummary == "" {
return "", nil, false
}
sanitized := make(map[string]any)
copyTrimmedTimelinePayloadField(payload, sanitized, "stage")
copyTrimmedTimelinePayloadField(payload, sanitized, "block_id")
copyTrimmedTimelinePayloadField(payload, sanitized, "display_mode")
copyTimelinePayloadFieldIfPresent(payload, sanitized, "summary_seq")
copyTimelinePayloadFieldIfPresent(payload, sanitized, "final")
copyTimelinePayloadFieldIfPresent(payload, sanitized, "duration_seconds")
sanitized["detail_summary"] = detailSummary
return detailSummary, sanitized, true
}
func copyTrimmedTimelinePayloadField(src map[string]any, dst map[string]any, key string) {
if len(src) == 0 || dst == nil {
return
}
value, ok := src[key]
if !ok {
return
}
text, ok := value.(string)
if !ok {
return
}
trimmed := strings.TrimSpace(text)
if trimmed == "" {
return
}
dst[key] = trimmed
}
func copyTimelinePayloadFieldIfPresent(src map[string]any, dst map[string]any, key string) {
if len(src) == 0 || dst == nil {
return
}
value, ok := src[key]
if !ok || value == nil {
return
}
dst[key] = value
}
// persistAgentTimelineExtraEvent 把 SSE extra 里的结构化事件写入时间线。
//
// 说明:
// 1. 只持久化刷新后仍需重建的业务事件;
// 2. short_summary 这类临时展示信息会在 appendConversationTimelineEvent 内被过滤掉;
// 3. 失败只记日志,不反向打断当前 SSE 输出。
func (s *AgentService) persistAgentTimelineExtraEvent(
ctx context.Context,
userID int,
chatID string,
extra *agentstream.OpenAIChunkExtra,
) {
kind, ok := mapTimelineKindFromStreamExtra(extra)
if !ok {
return
}
if ctx == nil {
ctx = context.Background()
}
if _, err := s.appendConversationTimelineEvent(
ctx,
userID,
chatID,
kind,
"",
"",
buildTimelinePayloadFromStreamExtra(extra),
0,
); err != nil {
log.Printf("写入 agent 时间线事件失败 user=%d chat=%s kind=%s err=%v", userID, chatID, kind, err)
}
}
func buildConversationTimelineCacheItem(
eventID int64,
seq int64,
kind string,
role string,
content string,
payload map[string]any,
tokensConsumed int,
createdAt *time.Time,
) model.GetConversationTimelineItem {
item := model.GetConversationTimelineItem{
ID: eventID,
Seq: seq,
Kind: kind,
Role: role,
Content: content,
Payload: cloneTimelinePayload(payload),
TokensConsumed: tokensConsumed,
}
if createdAt != nil {
item.CreatedAt = createdAt
}
return item
}
func buildConversationTimelineItemsFromDB(events []model.AgentTimelineEvent) []model.GetConversationTimelineItem {
if len(events) == 0 {
return make([]model.GetConversationTimelineItem, 0)
}
items := make([]model.GetConversationTimelineItem, 0, len(events))
for _, event := range events {
item := model.GetConversationTimelineItem{
ID: event.ID,
Seq: event.Seq,
Kind: strings.TrimSpace(event.Kind),
TokensConsumed: event.TokensConsumed,
CreatedAt: event.CreatedAt,
}
if event.Role != nil {
item.Role = strings.TrimSpace(*event.Role)
}
if event.Content != nil {
item.Content = strings.TrimSpace(*event.Content)
}
if event.Payload != nil {
var payload map[string]any
if err := json.Unmarshal([]byte(strings.TrimSpace(*event.Payload)), &payload); err == nil && len(payload) > 0 {
item.Payload = payload
}
}
items = append(items, item)
}
return normalizeConversationTimelineItems(items)
}
// normalizeConversationTimelineItems 统一收敛 timeline 的 kind/role 口径,避免前端切分失效。
func normalizeConversationTimelineItems(items []model.GetConversationTimelineItem) []model.GetConversationTimelineItem {
if len(items) == 0 {
return make([]model.GetConversationTimelineItem, 0)
}
normalized := make([]model.GetConversationTimelineItem, 0, len(items))
for _, item := range items {
role := strings.ToLower(strings.TrimSpace(item.Role))
kind := canonicalizeTimelineKind(item.Kind, role)
// kind 缺失时尝试从 role 反推文本类型,保障“用户分段锚点”可用。
if kind == "" {
switch role {
case "user":
kind = model.AgentTimelineKindUserText
case "assistant":
kind = model.AgentTimelineKindAssistantText
}
}
// role 缺失时按文本类型补齐,减少前端额外兼容判断。
if role == "" {
switch kind {
case model.AgentTimelineKindUserText:
role = "user"
case model.AgentTimelineKindAssistantText:
role = "assistant"
}
}
item.Kind = kind
item.Role = role
normalized = append(normalized, item)
}
return normalized
}
// canonicalizeTimelineKind 统一 kind 别名,收敛到文档定义值。
func canonicalizeTimelineKind(kind string, role string) string {
normalizedKind := strings.ToLower(strings.TrimSpace(kind))
normalizedRole := strings.ToLower(strings.TrimSpace(role))
switch normalizedKind {
case model.AgentTimelineKindUserText,
model.AgentTimelineKindAssistantText,
model.AgentTimelineKindToolCall,
model.AgentTimelineKindToolResult,
model.AgentTimelineKindConfirmRequest,
model.AgentTimelineKindBusinessCard,
model.AgentTimelineKindScheduleCompleted,
model.AgentTimelineKindThinkingSummary:
return normalizedKind
case "text", "message", "query":
if normalizedRole == "user" {
return model.AgentTimelineKindUserText
}
if normalizedRole == "assistant" {
return model.AgentTimelineKindAssistantText
}
return normalizedKind
default:
return normalizedKind
}
}
func marshalTimelinePayloadJSON(payload map[string]any) string {
if len(payload) == 0 {
return ""
}
data, err := json.Marshal(payload)
if err != nil {
return ""
}
return string(data)
}
func cloneTimelinePayload(payload map[string]any) map[string]any {
if len(payload) == 0 {
return nil
}
cloned := make(map[string]any, len(payload))
for key, value := range payload {
cloned[key] = value
}
return cloned
}
func mapTimelineKindFromStreamExtra(extra *agentstream.OpenAIChunkExtra) (string, bool) {
if extra == nil {
return "", false
}
if isThinkingSummaryStreamExtra(extra) {
return model.AgentTimelineKindThinkingSummary, true
}
switch extra.Kind {
case agentstream.StreamExtraKindToolCall:
return model.AgentTimelineKindToolCall, true
case agentstream.StreamExtraKindToolResult:
return model.AgentTimelineKindToolResult, true
case agentstream.StreamExtraKindConfirm:
return model.AgentTimelineKindConfirmRequest, true
case agentstream.StreamExtraKindBusinessCard:
return model.AgentTimelineKindBusinessCard, true
case agentstream.StreamExtraKindScheduleCompleted:
return model.AgentTimelineKindScheduleCompleted, true
default:
return "", false
}
}
func buildTimelinePayloadFromStreamExtra(extra *agentstream.OpenAIChunkExtra) map[string]any {
if extra == nil {
return nil
}
if isThinkingSummaryStreamExtra(extra) {
return buildThinkingSummaryTimelinePayload(extra)
}
payload := map[string]any{
"stage": strings.TrimSpace(extra.Stage),
"block_id": strings.TrimSpace(extra.BlockID),
"display_mode": string(extra.DisplayMode),
}
if extra.Tool != nil {
toolPayload := map[string]any{
"name": strings.TrimSpace(extra.Tool.Name),
"status": strings.TrimSpace(extra.Tool.Status),
"summary": strings.TrimSpace(extra.Tool.Summary),
"arguments_preview": strings.TrimSpace(extra.Tool.ArgumentsPreview),
}
if len(extra.Tool.ArgumentView) > 0 {
toolPayload["argument_view"] = cloneTimelinePayload(extra.Tool.ArgumentView)
}
if len(extra.Tool.ResultView) > 0 {
toolPayload["result_view"] = cloneTimelinePayload(extra.Tool.ResultView)
}
payload["tool"] = toolPayload
}
if extra.Confirm != nil {
payload["confirm"] = map[string]any{
"interaction_id": strings.TrimSpace(extra.Confirm.InteractionID),
"title": strings.TrimSpace(extra.Confirm.Title),
"summary": strings.TrimSpace(extra.Confirm.Summary),
}
}
if extra.Interrupt != nil {
payload["interrupt"] = map[string]any{
"interaction_id": strings.TrimSpace(extra.Interrupt.InteractionID),
"type": strings.TrimSpace(extra.Interrupt.Type),
"summary": strings.TrimSpace(extra.Interrupt.Summary),
}
}
if extra.BusinessCard != nil {
payload["business_card"] = cloneStreamBusinessCard(extra.BusinessCard)
}
if len(extra.Meta) > 0 {
payload["meta"] = cloneTimelinePayload(extra.Meta)
}
return payload
}
func isThinkingSummaryStreamExtra(extra *agentstream.OpenAIChunkExtra) bool {
if extra == nil {
return false
}
return strings.EqualFold(strings.TrimSpace(string(extra.Kind)), model.AgentTimelineKindThinkingSummary)
}
func buildThinkingSummaryTimelinePayload(extra *agentstream.OpenAIChunkExtra) map[string]any {
payload := map[string]any{
"stage": strings.TrimSpace(extra.Stage),
"block_id": strings.TrimSpace(extra.BlockID),
"display_mode": string(extra.DisplayMode),
}
if extra.ThinkingSummary != nil {
summary := extra.ThinkingSummary
payload["summary_seq"] = summary.SummarySeq
payload["final"] = summary.Final
payload["duration_seconds"] = summary.DurationSeconds
if detailSummary := strings.TrimSpace(summary.DetailSummary); detailSummary != "" {
payload["detail_summary"] = detailSummary
}
return payload
}
if detailSummary := readTimelineExtraMetaString(extra.Meta, "detail_summary"); detailSummary != "" {
payload["detail_summary"] = detailSummary
}
return payload
}
func readTimelineExtraMetaString(meta map[string]any, key string) string {
if len(meta) == 0 {
return ""
}
raw, ok := meta[key]
if !ok {
return ""
}
text, ok := raw.(string)
if !ok {
return ""
}
return strings.TrimSpace(text)
}
func readTimelinePayloadString(payload map[string]any, key string) string {
if len(payload) == 0 {
return ""
}
raw, ok := payload[key]
if !ok {
return ""
}
text, ok := raw.(string)
if !ok {
return ""
}
return strings.TrimSpace(text)
}
func cloneStreamBusinessCard(card *agentstream.StreamBusinessCardExtra) map[string]any {
if card == nil {
return nil
}
cloned := map[string]any{
"card_type": strings.TrimSpace(card.CardType),
"title": strings.TrimSpace(card.Title),
"summary": strings.TrimSpace(card.Summary),
"source": strings.TrimSpace(card.Source),
}
if len(card.Data) > 0 {
cloned["data"] = cloneTimelinePayload(card.Data)
}
return cloned
}

View File

@@ -0,0 +1,121 @@
package sv
import (
"context"
"errors"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
memorycontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/memory"
)
// MemoryRPCReaderClient 描述 agent 读取 memory zrpc 所需的最小能力。
//
// 职责边界:
// 1. 只读取候选记忆,不暴露管理写接口;
// 2. 不要求调用方知道 gateway/client/memory 的具体实现;
// 3. 错误原样返回给预取链路,由 agent 侧负责软降级和观测记录。
type MemoryRPCReaderClient interface {
Retrieve(ctx context.Context, req memorycontracts.RetrieveRequest) ([]memorycontracts.ItemDTO, error)
}
type memoryRPCReader struct {
client MemoryRPCReaderClient
observer memoryobserve.Observer
metrics memoryobserve.MetricsRecorder
}
// NewMemoryRPCReader 创建跨进程 memory reader 适配器。
//
// 职责边界:
// 1. 只把 agent 内部的 memorymodel.RetrieveRequest 转成共享契约;
// 2. 不持有 memory.Module避免 CP3 后 agent 主链路继续直连本进程记忆服务;
// 3. observer / metrics 只用于 agent 注入观测,不参与 retrieve 业务调用;
// 4. client 为空时返回 nil让 SetMemoryReader 保持既有“无 reader 则不注入”的降级语义。
func NewMemoryRPCReader(
client MemoryRPCReaderClient,
observer memoryobserve.Observer,
metrics memoryobserve.MetricsRecorder,
) MemoryReader {
if client == nil {
return nil
}
if observer == nil {
observer = memoryobserve.NewNopObserver()
}
if metrics == nil {
metrics = memoryobserve.NewNopMetrics()
}
return &memoryRPCReader{
client: client,
observer: observer,
metrics: metrics,
}
}
// Retrieve 通过 memory zrpc 读取候选记忆并转换回 agent 内部 DTO。
func (r *memoryRPCReader) Retrieve(ctx context.Context, req memorymodel.RetrieveRequest) ([]memorymodel.ItemDTO, error) {
if r == nil || r.client == nil {
return nil, errors.New("memory rpc reader client is nil")
}
items, err := r.client.Retrieve(ctx, memorycontracts.RetrieveRequest{
Query: req.Query,
UserID: req.UserID,
ConversationID: req.ConversationID,
AssistantID: req.AssistantID,
RunID: req.RunID,
MemoryTypes: append([]string(nil), req.MemoryTypes...),
Limit: req.Limit,
Now: req.Now,
})
if err != nil {
return nil, err
}
return toMemoryModelItems(items), nil
}
// MemoryObserver 暴露 agent 注入链路使用的 observer保持 CP3 切流前后的注入观测连续。
func (r *memoryRPCReader) MemoryObserver() memoryobserve.Observer {
if r == nil || r.observer == nil {
return memoryobserve.NewNopObserver()
}
return r.observer
}
// MemoryMetrics 暴露 agent 注入链路使用的 metrics避免 RPC reader 切流后指标静默丢失。
func (r *memoryRPCReader) MemoryMetrics() memoryobserve.MetricsRecorder {
if r == nil || r.metrics == nil {
return memoryobserve.NewNopMetrics()
}
return r.metrics
}
// toMemoryModelItems 只做跨层 DTO 字段搬运,不改变排序、过滤和记忆内容。
func toMemoryModelItems(items []memorycontracts.ItemDTO) []memorymodel.ItemDTO {
if len(items) == 0 {
return nil
}
result := make([]memorymodel.ItemDTO, 0, len(items))
for _, item := range items {
result = append(result, memorymodel.ItemDTO{
ID: item.ID,
UserID: item.UserID,
ConversationID: item.ConversationID,
AssistantID: item.AssistantID,
RunID: item.RunID,
MemoryType: item.MemoryType,
Title: item.Title,
Content: item.Content,
ContentHash: item.ContentHash,
Confidence: item.Confidence,
Importance: item.Importance,
SensitivityLevel: item.SensitivityLevel,
IsExplicit: item.IsExplicit,
Status: item.Status,
TTLAt: item.TTLAt,
CreatedAt: item.CreatedAt,
UpdatedAt: item.UpdatedAt,
})
}
return result
}

View File

@@ -0,0 +1,112 @@
package sv
import (
"context"
"errors"
"log"
"strings"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
)
const reasoningSummaryMaxTokens = 700
type reasoningSummaryLLMResponse struct {
ShortSummary string `json:"short_summary"`
DetailSummary string `json:"detail_summary"`
}
// makeReasoningSummaryFunc 把便宜模型封装成 stream 层可注入的摘要函数。
//
// 职责边界:
// 1. service 层负责选择模型与 promptstream 层只负责调度和闸门;
// 2. 这里不持久化摘要,持久化统一走 ChunkEmitter 的 extra hook
// 3. 摘要失败时返回 error由 ReasoningDigestor 吞掉并等待下一次水位线/Flush 兜底。
func (s *AgentService) makeReasoningSummaryFunc(client *llmservice.Client) agentstream.ReasoningSummaryFunc {
if client == nil {
return nil
}
return func(ctx context.Context, input agentstream.ReasoningSummaryInput) (agentstream.StreamThinkingSummaryExtra, error) {
previousSummary := ""
if input.PreviousSummary != nil {
previousSummary = input.PreviousSummary.DetailSummary
if strings.TrimSpace(previousSummary) == "" {
previousSummary = input.PreviousSummary.ShortSummary
}
}
messages := agentprompt.BuildReasoningSummaryMessages(agentprompt.ReasoningSummaryPromptInput{
FullReasoning: input.FullReasoning,
DeltaReasoning: input.DeltaReasoning,
PreviousSummary: previousSummary,
CandidateSeq: input.CandidateSeq,
Final: input.Final,
DurationSeconds: input.DurationSeconds,
})
resp, rawResult, err := llmservice.GenerateJSON[reasoningSummaryLLMResponse](
ctx,
client,
messages,
llmservice.GenerateOptions{
Temperature: 0.1,
MaxTokens: reasoningSummaryMaxTokens,
Thinking: llmservice.ThinkingModeDisabled,
Metadata: map[string]any{
"stage": "reasoning_summary",
"candidate_seq": input.CandidateSeq,
"final": input.Final,
},
},
)
if err != nil {
log.Printf("[WARN] reasoning 摘要模型调用失败 seq=%d final=%v err=%v raw=%s",
input.CandidateSeq,
input.Final,
err,
truncateReasoningSummaryRaw(rawResult),
)
return agentstream.StreamThinkingSummaryExtra{}, err
}
summary := agentstream.StreamThinkingSummaryExtra{
ShortSummary: strings.TrimSpace(resp.ShortSummary),
DetailSummary: limitReasoningDetailSummary(
resp.DetailSummary,
agentprompt.ReasoningSummaryDetailRuneLimit(input.FullReasoning, input.DeltaReasoning),
),
}
if summary.ShortSummary == "" && summary.DetailSummary == "" {
return agentstream.StreamThinkingSummaryExtra{}, errors.New("reasoning 摘要模型返回空摘要")
}
return summary, nil
}
}
func limitReasoningDetailSummary(text string, maxRunes int) string {
text = strings.TrimSpace(text)
if text == "" || maxRunes <= 0 {
return text
}
runes := []rune(text)
if len(runes) <= maxRunes {
return text
}
return string(runes[:maxRunes])
}
func truncateReasoningSummaryRaw(raw *llmservice.TextResult) string {
if raw == nil {
return ""
}
text := strings.TrimSpace(raw.Text)
runes := []rune(text)
if len(runes) <= 200 {
return text
}
return string(runes[:200]) + "..."
}

View File

@@ -0,0 +1,330 @@
package sv
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/model"
agentconv "github.com/LoveLosita/smartflow/backend/services/agent/conv"
scheduletool "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
taskclasscontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/taskclass"
)
const scheduleProviderRPCTimeout = 6 * time.Second
// ScheduleAgentRPCClient 描述 agent schedule provider 读取 schedule 服务所需的最小能力。
//
// 职责边界:
// 1. 只读取按周原始日程槽位事实;
// 2. 不暴露 schedule DAO、缓存或写入状态机
// 3. 返回 JSON 契约后由 provider 复用既有 LoadScheduleState 建模逻辑。
type ScheduleAgentRPCClient interface {
GetAgentWeekSchedule(ctx context.Context, req schedulecontracts.AgentScheduleWeekRequest) (json.RawMessage, error)
}
// TaskClassAgentReadRPCClient 描述 agent schedule provider 读取 task-class 服务所需的最小能力。
type TaskClassAgentReadRPCClient interface {
GetAgentTaskClasses(ctx context.Context, req taskclasscontracts.AgentTaskClassesRequest) (json.RawMessage, error)
}
// TaskClassAgentRPCClient 聚合 agent 当前依赖的 task-class RPC 写入与读取能力。
type TaskClassAgentRPCClient interface {
TaskClassUpsertRPCClient
TaskClassAgentReadRPCClient
}
// ScheduleRPCProvider 通过 schedule/task-class zrpc 构建 agent ScheduleState。
//
// 职责边界:
// 1. 只替换 agent schedule provider 的 DAO 读取路径;
// 2. 窗口推导、extra category 与 ScheduleState 建模继续复用 agent/conv 老逻辑;
// 3. 不负责持久化 Diff不改变 confirm/apply 链路。
type ScheduleRPCProvider struct {
scheduleClient ScheduleAgentRPCClient
taskClassClient TaskClassAgentReadRPCClient
}
func NewScheduleRPCProvider(scheduleClient ScheduleAgentRPCClient, taskClassClient TaskClassAgentReadRPCClient) *ScheduleRPCProvider {
return &ScheduleRPCProvider{
scheduleClient: scheduleClient,
taskClassClient: taskClassClient,
}
}
func (p *ScheduleRPCProvider) LoadScheduleState(ctx context.Context, userID int) (*scheduletool.ScheduleState, error) {
taskClasses, err := p.loadCompleteTaskClasses(ctx, userID, nil)
if err != nil {
return nil, err
}
return p.loadScheduleStateWithTaskClasses(ctx, userID, taskClasses, true)
}
func (p *ScheduleRPCProvider) LoadScheduleStateForTaskClasses(ctx context.Context, userID int, taskClassIDs []int) (*scheduletool.ScheduleState, error) {
if len(taskClassIDs) == 0 {
return p.LoadScheduleState(ctx, userID)
}
taskClasses, err := p.loadCompleteTaskClasses(ctx, userID, taskClassIDs)
if err != nil {
return nil, err
}
return p.loadScheduleStateWithTaskClasses(ctx, userID, taskClasses, false)
}
func (p *ScheduleRPCProvider) LoadTaskClassMetas(ctx context.Context, userID int, taskClassIDs []int) ([]scheduletool.TaskClassMeta, error) {
if len(taskClassIDs) == 0 {
return nil, nil
}
taskClasses, err := p.loadCompleteTaskClasses(ctx, userID, taskClassIDs)
if err != nil {
return nil, err
}
return agentconv.TaskClassesToScheduleMetas(taskClasses), nil
}
func (p *ScheduleRPCProvider) loadScheduleStateWithTaskClasses(ctx context.Context, userID int, taskClasses []model.TaskClass, allowCurrentWeekFallback bool) (*scheduletool.ScheduleState, error) {
windowDays, weeks := agentconv.BuildWindowFromTaskClasses(taskClasses)
if len(windowDays) == 0 {
if !allowCurrentWeekFallback {
return nil, fmt.Errorf("任务类缺少有效时间窗:请补充 start_date/end_date 后再进行智能编排")
}
var err error
windowDays, weeks, err = agentconv.BuildCurrentWeekWindow()
if err != nil {
return nil, err
}
}
allSchedules := make([]model.Schedule, 0)
for _, week := range weeks {
weekSchedules, err := p.loadWeekSchedules(ctx, userID, week)
if err != nil {
return nil, fmt.Errorf("通过 schedule RPC 加载用户周日程失败 week=%d: %w", week, err)
}
allSchedules = append(allSchedules, weekSchedules...)
}
extraItemCategories := agentconv.BuildExtraItemCategories(allSchedules, taskClasses)
return agentconv.LoadScheduleState(allSchedules, taskClasses, extraItemCategories, windowDays), nil
}
func (p *ScheduleRPCProvider) loadCompleteTaskClasses(ctx context.Context, userID int, taskClassIDs []int) ([]model.TaskClass, error) {
if p == nil || p.taskClassClient == nil {
return nil, errors.New("task-class rpc reader is nil")
}
callCtx, cancel := context.WithTimeout(ctx, scheduleProviderRPCTimeout)
defer cancel()
raw, err := p.taskClassClient.GetAgentTaskClasses(callCtx, taskclasscontracts.AgentTaskClassesRequest{
UserID: userID,
TaskClassIDs: append([]int(nil), taskClassIDs...),
})
if err != nil {
return nil, err
}
var resp taskclasscontracts.AgentTaskClassesResponse
if len(raw) > 0 && string(raw) != "null" {
if err := json.Unmarshal(raw, &resp); err != nil {
return nil, err
}
}
taskClasses := make([]model.TaskClass, 0, len(resp.TaskClasses))
for _, item := range resp.TaskClasses {
taskClass, err := agentTaskClassToModel(item)
if err != nil {
return nil, err
}
taskClasses = append(taskClasses, taskClass)
}
return taskClasses, nil
}
func (p *ScheduleRPCProvider) loadWeekSchedules(ctx context.Context, userID int, week int) ([]model.Schedule, error) {
if p == nil || p.scheduleClient == nil {
return nil, errors.New("schedule rpc reader is nil")
}
callCtx, cancel := context.WithTimeout(ctx, scheduleProviderRPCTimeout)
defer cancel()
raw, err := p.scheduleClient.GetAgentWeekSchedule(callCtx, schedulecontracts.AgentScheduleWeekRequest{
UserID: userID,
Week: week,
})
if err != nil {
return nil, err
}
var resp schedulecontracts.AgentScheduleWeekResponse
if len(raw) > 0 && string(raw) != "null" {
if err := json.Unmarshal(raw, &resp); err != nil {
return nil, err
}
}
schedules := make([]model.Schedule, 0, len(resp.Schedules))
for _, item := range resp.Schedules {
schedules = append(schedules, agentScheduleSlotToModel(item))
}
return schedules, nil
}
func agentTaskClassToModel(in taskclasscontracts.AgentTaskClass) (model.TaskClass, error) {
startDate, err := parseAgentDate(in.StartDate)
if err != nil {
return model.TaskClass{}, err
}
endDate, err := parseAgentDate(in.EndDate)
if err != nil {
return model.TaskClass{}, err
}
items := make([]model.TaskClassItem, 0, len(in.Items))
for _, item := range in.Items {
content := item.Content
items = append(items, model.TaskClassItem{
ID: item.ID,
CategoryID: cloneIntPtr(item.CategoryID),
Order: cloneIntPtr(item.Order),
Content: &content,
EmbeddedTime: taskClassContractTargetTimeToModel(item.EmbeddedTime),
Status: cloneIntPtr(item.Status),
})
}
return model.TaskClass{
ID: in.ID,
UserID: intPtrOrNil(in.UserID),
Name: stringPtrOrNil(in.Name),
Mode: stringPtrOrNil(in.Mode),
StartDate: startDate,
EndDate: endDate,
SubjectType: stringPtrOrNil(in.SubjectType),
DifficultyLevel: stringPtrOrNil(in.DifficultyLevel),
CognitiveIntensity: stringPtrOrNil(in.CognitiveIntensity),
TotalSlots: intPtrOrNil(in.TotalSlots),
AllowFillerCourse: boolPtr(in.AllowFillerCourse),
Strategy: stringPtrOrNil(in.Strategy),
ExcludedSlots: model.IntSlice(append([]int(nil), in.ExcludedSlots...)),
ExcludedDaysOfWeek: model.IntSlice(append([]int(nil), in.ExcludedDaysOfWeek...)),
Items: items,
}, nil
}
func agentScheduleSlotToModel(in schedulecontracts.AgentScheduleSlot) model.Schedule {
return model.Schedule{
ID: in.ID,
EventID: in.EventID,
UserID: in.UserID,
Week: in.Week,
DayOfWeek: in.DayOfWeek,
Section: in.Section,
EmbeddedTaskID: cloneIntPtr(in.EmbeddedTaskID),
Status: in.Status,
Event: agentScheduleEventToModel(in.Event),
EmbeddedTask: agentScheduleTaskItemToModel(in.EmbeddedTask),
}
}
func agentScheduleEventToModel(in *schedulecontracts.AgentScheduleEvent) *model.ScheduleEvent {
if in == nil {
return nil
}
return &model.ScheduleEvent{
ID: in.ID,
UserID: in.UserID,
Name: in.Name,
Location: cloneStringPtr(in.Location),
Type: in.Type,
RelID: cloneIntPtr(in.RelID),
TaskSourceType: in.TaskSourceType,
CanBeEmbedded: in.CanBeEmbedded,
StartTime: in.StartTime,
EndTime: in.EndTime,
}
}
func agentScheduleTaskItemToModel(in *schedulecontracts.AgentScheduleTaskItem) *model.TaskClassItem {
if in == nil {
return nil
}
content := in.Content
return &model.TaskClassItem{
ID: in.ID,
CategoryID: cloneIntPtr(in.CategoryID),
Order: cloneIntPtr(in.Order),
Content: &content,
EmbeddedTime: scheduleContractTargetTimeToModel(in.EmbeddedTime),
Status: cloneIntPtr(in.Status),
}
}
func parseAgentDate(value string) (*time.Time, error) {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
return nil, nil
}
parsed, err := time.ParseInLocation("2006-01-02", trimmed, time.Local)
if err != nil {
return nil, err
}
return &parsed, nil
}
func taskClassContractTargetTimeToModel(value *taskclasscontracts.TargetTime) *model.TargetTime {
if value == nil {
return nil
}
return &model.TargetTime{
Week: value.Week,
DayOfWeek: value.DayOfWeek,
SectionFrom: value.SectionFrom,
SectionTo: value.SectionTo,
}
}
func scheduleContractTargetTimeToModel(value *schedulecontracts.AgentScheduleTargetTime) *model.TargetTime {
if value == nil {
return nil
}
return &model.TargetTime{
Week: value.Week,
DayOfWeek: value.DayOfWeek,
SectionFrom: value.SectionFrom,
SectionTo: value.SectionTo,
}
}
func stringPtrOrNil(value string) *string {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
return nil
}
return &trimmed
}
func intPtrOrNil(value int) *int {
if value == 0 {
return nil
}
return &value
}
func boolPtr(value bool) *bool {
return &value
}
func cloneIntPtr(value *int) *int {
if value == nil {
return nil
}
copied := *value
return &copied
}
func cloneStringPtr(value *string) *string {
if value == nil {
return nil
}
copied := *value
return &copied
}

View File

@@ -0,0 +1,121 @@
package sv
import (
"context"
"encoding/json"
"errors"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/model"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
taskclasscontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/taskclass"
)
const taskClassUpsertRPCTimeout = 6 * time.Second
// TaskClassUpsertRPCClient 描述 agent 写入任务类时依赖的 task-class RPC 最小能力。
//
// 职责边界:
// 1. 只覆盖 upsert_task_class 工具需要的新增/更新能力;
// 2. 不暴露 task-class DAO、事务细节或 schedule 迁移期直写语义;
// 3. 读取型能力由 schedule provider 的独立接口承载,避免接口膨胀。
type TaskClassUpsertRPCClient interface {
AddTaskClass(ctx context.Context, req taskclasscontracts.UpsertTaskClassRequest) (json.RawMessage, error)
UpdateTaskClass(ctx context.Context, req taskclasscontracts.UpsertTaskClassRequest) (json.RawMessage, error)
}
type taskClassRPCUpsertAdapter struct {
client TaskClassUpsertRPCClient
}
// NewTaskClassRPCUpsertFunc 把 task-class zrpc client 适配成 agent 工具写入函数。
//
// 职责边界:
// 1. 只替换 agent upsert_task_class 的 DAO 直连路径;
// 2. 入参仍复用 agent 工具层已标准化的 UserAddTaskClassRequest
// 3. client 为空时返回会失败的闭包,让工具层保留既有错误包装语义。
func NewTaskClassRPCUpsertFunc(client TaskClassUpsertRPCClient) func(userID int, input agenttools.TaskClassUpsertInput) (agenttools.TaskClassUpsertPersistResult, error) {
adapter := &taskClassRPCUpsertAdapter{client: client}
return adapter.UpsertTaskClass
}
// UpsertTaskClass 通过 task-class zrpc 新增或更新任务类,并返回稳定 task_class_id。
func (a *taskClassRPCUpsertAdapter) UpsertTaskClass(userID int, input agenttools.TaskClassUpsertInput) (agenttools.TaskClassUpsertPersistResult, error) {
if a == nil || a.client == nil {
return agenttools.TaskClassUpsertPersistResult{}, errors.New("task-class rpc client is nil")
}
req := taskClassUpsertInputToContract(userID, input)
ctx, cancel := context.WithTimeout(context.Background(), taskClassUpsertRPCTimeout)
defer cancel()
var raw json.RawMessage
var err error
created := input.ID == 0
// 调用目的:把 agent 工具产出的任务类写入 task-class 服务,避免 agent 继续直连 task_classes/task_items。
if created {
raw, err = a.client.AddTaskClass(ctx, req)
} else {
raw, err = a.client.UpdateTaskClass(ctx, req)
}
if err != nil {
return agenttools.TaskClassUpsertPersistResult{}, err
}
var resp taskclasscontracts.UpsertTaskClassResponse
if err := json.Unmarshal(raw, &resp); err != nil {
return agenttools.TaskClassUpsertPersistResult{}, err
}
if resp.TaskClassID <= 0 {
return agenttools.TaskClassUpsertPersistResult{}, errors.New("task-class rpc upsert returned invalid task_class_id")
}
return agenttools.TaskClassUpsertPersistResult{
TaskClassID: resp.TaskClassID,
Created: resp.Created,
}, nil
}
func taskClassUpsertInputToContract(userID int, input agenttools.TaskClassUpsertInput) taskclasscontracts.UpsertTaskClassRequest {
req := input.Request
items := make([]taskclasscontracts.UpsertTaskClassItemConfig, 0, len(req.Items))
for _, item := range req.Items {
items = append(items, taskclasscontracts.UpsertTaskClassItemConfig{
ID: item.ID,
Order: item.Order,
Content: strings.TrimSpace(item.Content),
EmbeddedTime: toTaskClassContractTargetTime(item.EmbeddedTime),
})
}
return taskclasscontracts.UpsertTaskClassRequest{
UserID: userID,
TaskClassID: input.ID,
Name: strings.TrimSpace(req.Name),
StartDate: strings.TrimSpace(req.StartDate),
EndDate: strings.TrimSpace(req.EndDate),
Mode: strings.TrimSpace(req.Mode),
SubjectType: strings.TrimSpace(req.SubjectType),
DifficultyLevel: strings.TrimSpace(req.DifficultyLevel),
CognitiveIntensity: strings.TrimSpace(req.CognitiveIntensity),
Config: taskclasscontracts.UpsertTaskClassConfig{
TotalSlots: req.Config.TotalSlots,
AllowFillerCourse: req.Config.AllowFillerCourse,
Strategy: strings.TrimSpace(req.Config.Strategy),
ExcludedSlots: append([]int(nil), req.Config.ExcludedSlots...),
ExcludedDaysOfWeek: append([]int(nil), req.Config.ExcludedDaysOfWeek...),
},
Items: items,
}
}
func toTaskClassContractTargetTime(value *model.TargetTime) *taskclasscontracts.TargetTime {
if value == nil {
return nil
}
return &taskclasscontracts.TargetTime{
Week: value.Week,
DayOfWeek: value.DayOfWeek,
SectionFrom: value.SectionFrom,
SectionTo: value.SectionTo,
}
}

View File

@@ -0,0 +1,194 @@
package sv
import (
"context"
"encoding/json"
"errors"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/respond"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
taskcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/task"
)
const quickTaskCreateRPCTimeout = 3 * time.Second
// TaskRPCClient 描述 agent 快捷任务链路访问 task zrpc 所需的最小能力。
//
// 职责边界:
// 1. 只覆盖快捷任务的创建和查询,不暴露 task DAO 或其它写接口;
// 2. 不要求 agent 编排层感知 pb / grpc 细节;
// 3. 错误原样返回,由 quick_task 节点转换成面向用户的失败文案。
type TaskRPCClient interface {
AddTask(ctx context.Context, req taskcontracts.AddTaskRequest) (json.RawMessage, error)
GetUserTasks(ctx context.Context, userID int) (json.RawMessage, error)
}
type taskRPCAdapter struct {
client TaskRPCClient
}
// NewTaskRPCQuickTaskDeps 把 task zrpc client 适配成 agent 快捷任务依赖。
//
// 职责边界:
// 1. 只替换 agent quick task 的 task DAO 直连路径;
// 2. 不迁移 task-class upsert、schedule provider 或 agent 编排本体;
// 3. client 为空时返回零值依赖,让 quick_task 节点沿用既有“依赖缺失则报错”语义。
func NewTaskRPCQuickTaskDeps(client TaskRPCClient) agentmodel.QuickTaskDeps {
if client == nil {
return agentmodel.QuickTaskDeps{}
}
adapter := &taskRPCAdapter{client: client}
return agentmodel.QuickTaskDeps{
CreateTask: adapter.CreateTask,
QueryTasks: adapter.QueryTasks,
}
}
// CreateTask 通过 task zrpc 创建四象限任务,返回 task_id。
func (a *taskRPCAdapter) CreateTask(
userID int,
title string,
priorityGroup int,
estimatedSections int,
deadlineAt *time.Time,
urgencyThresholdAt *time.Time,
) (int, error) {
if a == nil || a.client == nil {
return 0, errors.New("task rpc client is nil")
}
ctx, cancel := context.WithTimeout(context.Background(), quickTaskCreateRPCTimeout)
defer cancel()
// 调用目的:把 quick_task 节点产出的结构化任务写入 task 服务,避免 agent 继续直连 tasks 表。
raw, err := a.client.AddTask(ctx, taskcontracts.AddTaskRequest{
UserID: userID,
Title: strings.TrimSpace(title),
PriorityGroup: priorityGroup,
EstimatedSections: estimatedSections,
DeadlineAt: deadlineAt,
UrgencyThresholdAt: urgencyThresholdAt,
})
if err != nil {
return 0, err
}
var resp taskcontracts.AddTaskResponse
if err := json.Unmarshal(raw, &resp); err != nil {
return 0, err
}
if resp.ID <= 0 {
return 0, errors.New("task rpc add task returned invalid task id")
}
return resp.ID, nil
}
// QueryTasks 通过 task zrpc 读取用户任务,再复用 agent 侧既有过滤、排序和展示转换语义。
func (a *taskRPCAdapter) QueryTasks(
ctx context.Context,
userID int,
params agentmodel.TaskQueryParams,
) ([]agentmodel.TaskQueryResult, error) {
if a == nil || a.client == nil {
return nil, errors.New("task rpc client is nil")
}
raw, err := a.client.GetUserTasks(ctx, userID)
if err != nil {
if errors.Is(err, respond.UserTasksEmpty) {
return []agentmodel.TaskQueryResult{}, nil
}
return nil, err
}
var items []taskcontracts.TaskListItem
if len(raw) > 0 && string(raw) != "null" {
if err := json.Unmarshal(raw, &items); err != nil {
return nil, err
}
}
tasks := taskListItemsToModels(items)
req := agentmodel.TaskQueryRequest{
UserID: userID,
Quadrant: params.Quadrant,
SortBy: params.SortBy,
Order: params.Order,
Limit: params.Limit,
IncludeCompleted: params.IncludeCompleted,
Keyword: params.Keyword,
DeadlineBefore: params.DeadlineBefore,
DeadlineAfter: params.DeadlineAfter,
}
filtered := make([]model.Task, 0, len(tasks))
for _, task := range tasks {
if !taskMatchesQueryFilter(task, req) {
continue
}
filtered = append(filtered, task)
}
sortTasksForQuery(filtered, req)
if req.Limit > 0 && len(filtered) > req.Limit {
filtered = filtered[:req.Limit]
}
return taskModelsToQueryResults(filtered), nil
}
func taskListItemsToModels(items []taskcontracts.TaskListItem) []model.Task {
if len(items) == 0 {
return nil
}
result := make([]model.Task, 0, len(items))
for _, item := range items {
result = append(result, model.Task{
ID: item.ID,
UserID: item.UserID,
Title: item.Title,
Priority: item.PriorityGroup,
EstimatedSections: model.NormalizeEstimatedSections(&item.EstimatedSections),
IsCompleted: item.IsCompleted,
DeadlineAt: parseTaskListTime(item.Deadline),
UrgencyThresholdAt: parseTaskListTime(item.UrgencyThresholdAt),
})
}
return result
}
func taskModelsToQueryResults(tasks []model.Task) []agentmodel.TaskQueryResult {
if len(tasks) == 0 {
return []agentmodel.TaskQueryResult{}
}
results := make([]agentmodel.TaskQueryResult, 0, len(tasks))
for _, task := range tasks {
deadlineStr := ""
if task.DeadlineAt != nil {
deadlineStr = task.DeadlineAt.In(time.Local).Format("2006-01-02 15:04")
}
results = append(results, agentmodel.TaskQueryResult{
ID: task.ID,
Title: task.Title,
PriorityGroup: task.Priority,
EstimatedSections: model.NormalizeEstimatedSections(&task.EstimatedSections),
IsCompleted: task.IsCompleted,
DeadlineAt: deadlineStr,
})
}
return results
}
func parseTaskListTime(value string) *time.Time {
value = strings.TrimSpace(value)
if value == "" {
return nil
}
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05", "2006-01-02 15:04"} {
parsed, err := time.ParseInLocation(layout, value, time.Local)
if err == nil {
return &parsed
}
}
return nil
}

View File

@@ -0,0 +1,145 @@
package sv
import (
"context"
"sync"
einoCallbacks "github.com/cloudwego/eino/callbacks"
einoModel "github.com/cloudwego/eino/components/model"
"github.com/cloudwego/eino/schema"
templatecb "github.com/cloudwego/eino/utils/callbacks"
)
type requestTokenMeterCtxKey struct{}
// RequestTokenMeter 是“单次请求级”的 token 统计容器。
//
// 设计目标:
// 1. 聚合本次请求内所有模型调用 token路由/图节点/流式主对话);
// 2. 线程安全,允许在同一请求内被多个链路节点并发累加;
// 3. 最终由服务层一次性读取快照并写入持久化。
type RequestTokenMeter struct {
mu sync.Mutex
promptTokens int
completionTokens int
totalTokens int
}
// RequestTokenMeterSnapshot 是 RequestTokenMeter 的只读快照。
type RequestTokenMeterSnapshot struct {
PromptTokens int
CompletionTokens int
TotalTokens int
}
var registerTokenMeterCallbackOnce sync.Once
// ensureTokenMeterCallbackRegistered 注册一次全局 ChatModel callback。
//
// 说明:
// 1. callback 只负责“采集并累加 token”不做业务决策
// 2. 仅当 ctx 里存在 RequestTokenMeter 时才会生效;
// 3. 采用 once避免在测试/多次构造服务时重复注册。
func ensureTokenMeterCallbackRegistered() {
registerTokenMeterCallbackOnce.Do(func() {
handler := templatecb.NewHandlerHelper().
ChatModel(&templatecb.ModelCallbackHandler{
OnEnd: func(ctx context.Context, _ *einoCallbacks.RunInfo, output *einoModel.CallbackOutput) context.Context {
if output == nil || output.TokenUsage == nil {
return ctx
}
addModelUsageIntoRequest(ctx, output.TokenUsage)
return ctx
},
}).
Handler()
einoCallbacks.AppendGlobalHandlers(handler)
})
}
// withRequestTokenMeter 创建并挂载“请求级 token 统计器”。
func withRequestTokenMeter(ctx context.Context) (context.Context, *RequestTokenMeter) {
meter := &RequestTokenMeter{}
return context.WithValue(ctx, requestTokenMeterCtxKey{}, meter), meter
}
// getRequestTokenMeter 读取请求上下文中的 token 统计器。
func getRequestTokenMeter(ctx context.Context) *RequestTokenMeter {
if ctx == nil {
return nil
}
meter, _ := ctx.Value(requestTokenMeterCtxKey{}).(*RequestTokenMeter)
return meter
}
// addSchemaUsageIntoRequest 把 schema usage 累加到请求级统计器。
func addSchemaUsageIntoRequest(ctx context.Context, usage *schema.TokenUsage) {
if usage == nil {
return
}
addTokenUsageValues(ctx, usage.PromptTokens, usage.CompletionTokens, normalizeUsageTotal(usage.TotalTokens, usage.PromptTokens, usage.CompletionTokens))
}
// addModelUsageIntoRequest 把 Eino model callback usage 累加到请求级统计器。
func addModelUsageIntoRequest(ctx context.Context, usage *einoModel.TokenUsage) {
if usage == nil {
return
}
addTokenUsageValues(ctx, usage.PromptTokens, usage.CompletionTokens, normalizeUsageTotal(usage.TotalTokens, usage.PromptTokens, usage.CompletionTokens))
}
// addTokenUsageValues 统一累加 token 数值。
func addTokenUsageValues(ctx context.Context, promptTokens, completionTokens, totalTokens int) {
meter := getRequestTokenMeter(ctx)
if meter == nil {
return
}
if promptTokens < 0 {
promptTokens = 0
}
if completionTokens < 0 {
completionTokens = 0
}
if totalTokens < 0 {
totalTokens = 0
}
meter.mu.Lock()
defer meter.mu.Unlock()
meter.promptTokens += promptTokens
meter.completionTokens += completionTokens
meter.totalTokens += totalTokens
}
// snapshotRequestTokenMeter 获取请求级 token 统计快照。
func snapshotRequestTokenMeter(ctx context.Context) RequestTokenMeterSnapshot {
meter := getRequestTokenMeter(ctx)
if meter == nil {
return RequestTokenMeterSnapshot{}
}
meter.mu.Lock()
defer meter.mu.Unlock()
return RequestTokenMeterSnapshot{
PromptTokens: meter.promptTokens,
CompletionTokens: meter.completionTokens,
TotalTokens: meter.totalTokens,
}
}
// normalizeUsageTotal 统一 total token 口径。
//
// 规则:
// 1. 模型返回 total>0 时优先使用 total
// 2. total 缺失时使用 prompt+completion 回退。
func normalizeUsageTotal(totalTokens, promptTokens, completionTokens int) int {
if totalTokens > 0 {
return totalTokens
}
sum := promptTokens + completionTokens
if sum < 0 {
return 0
}
return sum
}