Files
smartmate/backend/service/agentsvc/agent_newagent.go
LoveLosita 32bb740b75 Version: 0.9.3.dev.260407
后端:
    1.Execute 上下文修复(无限循环 / 重复确认根治)
      - 更新node/execute.go:speak 写入历史(修复旧 TODO);confirm 动作 speak 不再丢失;
        continue 无工具调用时写 reason 保证上下文推进;区分 tool_call 数组/JSON损坏两种
        correction hint;goal_check hint 区分 plan/ReAct 模式
      - 更新node/execute.go:新增 AlwaysExecute 字段,extra.always_execute=true 时写工具
        跳过确认闸门直接执行并持久化
      - 更新model/graph_run_state.go:AgentGraphRequest 新增 AlwaysExecute;新增
        WriteSchedulePreviewFunc 类型和 WriteSchedulePreview Dep
      - 更新service/agentsvc/agent.go:新增 readAgentExtraBool 辅助

    2.粗排全链路修复
      - 更新service/agentsvc/agent_newagent.go:makeRoughBuildFunc 改用 HybridScheduleEntry
        而非 TaskClassItem.EmbeddedTime,普通时段放置不再被丢弃
      - 更新conv/schedule_provider.go:LoadScheduleState 从 task class 日期范围推算多周
        规划窗口,不再硬编码当前周 7 天;DayMapping 覆盖全部相关周,粗排跨周结果不再
        被 WeekDayToDay 静默丢弃
      - 更新node/rough_build.go:pinned block 区分有/无未覆盖 pending 任务两种情况,
        有 pending 时明确操作顺序(find_free→place)和完成判定,防止 LLM 重复调
        list_tasks;新增 countPendingTasks 辅助(只统计 Slots 为空的真正未覆盖任务)
      - 更新model/common_state.go:新增 StartDirectExecute(),Chat 直接路由 execute 时
        清空旧 PlanSteps,修复跨会话 HasPlan() 误判导致 ReAct 走 plan 模式的 bug
      - 更新node/chat.go:handleRouteExecute 改用 StartDirectExecute()

    3.排程预览缓存迁移至 Deliver 节点
      - 更新node/agent_nodes.go:Deliver 节点完成后调用 WriteSchedulePreview,只有任务
        真正完成才写预览缓存,中断路径不写中间态
      - 更新service/agentsvc/agent_newagent.go:注入 makeWriteSchedulePreviewFunc;移除
        graph 结束后的内联写入;makeRoughBuildFunc 注释修正
      - 更新conv/schedule_preview.go:ScheduleStateToPreview 补设 GeneratedAt
      - 更新model/agent.go:GetSchedulePlanPreviewResponse 新增 HybridEntries 字段
      - 更新service/agentsvc/agent_schedule_preview.go:GET handler Redis/MySQL 两条路径
        均透传 HybridEntries

    4.Execute thinking 模式修复
      - 更新newAgent/llm/ark_adapter.go:thinking 开启时强制 temperature=1,MaxTokens 自
        动托底至 16000,调用方与适配层行为对齐
      - 更新node/execute.go:调用参数同步改为 temperature=1.0 / MaxTokens=16000

    undo:
    1.流式推送换行未修复(undo)
    2.上下文依然待审视

前端:无
仓库:无
2026-04-07 12:10:56 +08:00

490 lines
18 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
package agentsvc
import (
"context"
"fmt"
"log"
"strings"
"time"
newagentgraph "github.com/LoveLosita/smartflow/backend/newAgent/graph"
newagentllm "github.com/LoveLosita/smartflow/backend/newAgent/llm"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
"github.com/cloudwego/eino/schema"
agentchat "github.com/LoveLosita/smartflow/backend/agent/chat"
"github.com/LoveLosita/smartflow/backend/conv"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/pkg"
"github.com/LoveLosita/smartflow/backend/respond"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
)
// runNewAgentGraph 运行 newAgent 通用 graph直接替换旧 agent 路由逻辑。
//
// 职责边界:
// 1. 负责构造 AgentGraphRunInputRuntimeState、ConversationContext、Request、Deps
// 2. 负责将 outChan 适配为 ChunkEmitter
// 3. 负责调用 graph.RunAgentGraph
// 4. 负责持久化聊天历史(复用现有逻辑)。
//
// 设计原则:
// 1. 直接走 newAgent graph不再经过旧的 agentrouter 路由决策;
// 2. 所有任务类型chat、task、quick_note都由 graph 内部 LLM 决策;
// 3. 状态恢复、工具执行、确认流程全部由 graph 节点处理。
func (s *AgentService) runNewAgentGraph(
ctx context.Context,
userMessage string,
ifThinking bool,
modelName string,
userID int,
chatID string,
extra map[string]any,
traceID string,
requestStart time.Time,
outChan chan<- string,
errChan chan error,
) {
requestCtx, _ := withRequestTokenMeter(ctx)
// 1. 规范会话 ID 和模型选择。
chatID = normalizeConversationID(chatID)
_, resolvedModelName := s.pickChatModel(modelName)
// 2. 确保会话存在(优先缓存,必要时回源 DB
result, err := s.agentCache.GetConversationStatus(requestCtx, chatID)
if err != nil {
pushErrNonBlocking(errChan, err)
return
}
if !result {
innerResult, ifErr := s.repo.IfChatExists(requestCtx, userID, chatID)
if ifErr != nil {
pushErrNonBlocking(errChan, ifErr)
return
}
if !innerResult {
if _, err = s.repo.CreateNewChat(userID, chatID); err != nil {
pushErrNonBlocking(errChan, err)
return
}
}
if err = s.agentCache.SetConversationStatus(requestCtx, chatID); err != nil {
log.Printf("设置会话状态缓存失败 chat=%s: %v", chatID, err)
}
}
// 3. 构建重试元数据。
retryMeta, err := s.buildChatRetryMeta(requestCtx, userID, chatID, extra)
if err != nil {
pushErrNonBlocking(errChan, err)
return
}
// 4. 从 StateStore 加载或创建 RuntimeState。
// 恢复场景confirm/ask_user同时拿到快照中保存的 ConversationContext
// 其中包含工具调用/结果等中间消息,保证后续 LLM 调用的消息链完整。
runtimeState, savedConversationContext := s.loadOrCreateRuntimeState(requestCtx, chatID, userID)
// 5. 构造 ConversationContext。
// 优先使用快照中恢复的 ConversationContext含工具调用/结果),
// 无快照时从 Redis LLM 历史缓存加载。
var conversationContext *newagentmodel.ConversationContext
if savedConversationContext != nil {
conversationContext = savedConversationContext
// 把用户本轮输入追加到恢复的上下文中(与 loadConversationContext 行为一致)。
if strings.TrimSpace(userMessage) != "" {
conversationContext.AppendHistory(schema.UserMessage(userMessage))
}
} else {
conversationContext = s.loadConversationContext(requestCtx, chatID, userMessage)
}
// 5.5 若 extra 携带 task_class_ids校验后写入 CommonState仅首轮/尚未设置时生效,跨轮持久化)。
// 校验:通过 LoadTaskClassMetas → GetCompleteTaskClassesByIDs 检查所有 ID 是否存在且属于当前用户;
// 校验失败时向 errChan 推送 WrongTaskClassIDcode=40040前端收到 SSE 错误事件。
if taskClassIDs := readAgentExtraIntSlice(extra, "task_class_ids"); len(taskClassIDs) > 0 {
cs := runtimeState.EnsureCommonState()
if len(cs.TaskClassIDs) == 0 {
if s.scheduleProvider == nil {
pushErrNonBlocking(errChan, respond.WrongTaskClassID)
return
}
metas, metaErr := s.scheduleProvider.LoadTaskClassMetas(requestCtx, userID, taskClassIDs)
if metaErr != nil {
pushErrNonBlocking(errChan, respond.WrongTaskClassID)
return
}
cs.TaskClassIDs = taskClassIDs
cs.TaskClasses = metas
}
}
// 6. 构造 AgentGraphRequest。
var confirmAction string
if len(extra) > 0 {
confirmAction = readAgentExtraString(extra, "confirm_action")
}
graphRequest := newagentmodel.AgentGraphRequest{
UserInput: userMessage,
ConfirmAction: confirmAction,
AlwaysExecute: readAgentExtraBool(extra, "always_execute"),
}
graphRequest.Normalize()
// 7. 适配 LLM clients从 AIHub 的 ark.ChatModel 转换为 newAgent LLM Client
chatClient := newagentllm.WrapArkClient(s.AIHub.Worker)
planClient := newagentllm.WrapArkClient(s.AIHub.Worker)
executeClient := newagentllm.WrapArkClient(s.AIHub.Worker)
deliverClient := newagentllm.WrapArkClient(s.AIHub.Worker)
// 8. 适配 SSE emitter。
sseEmitter := newagentstream.NewSSEPayloadEmitter(outChan)
chunkEmitter := newagentstream.NewChunkEmitter(sseEmitter, traceID, resolvedModelName, requestStart.Unix())
// 9. 构造 AgentGraphDeps由 cmd/start.go 注入的依赖)。
deps := newagentmodel.AgentGraphDeps{
ChatClient: chatClient,
PlanClient: planClient,
ExecuteClient: executeClient,
DeliverClient: deliverClient,
ChunkEmitter: chunkEmitter,
StateStore: s.agentStateStore,
ToolRegistry: s.toolRegistry,
ScheduleProvider: s.scheduleProvider,
SchedulePersistor: s.schedulePersistor,
RoughBuildFunc: s.makeRoughBuildFunc(),
WriteSchedulePreview: s.makeWriteSchedulePreviewFunc(),
}
// 10. 构造 AgentGraphRunInput 并运行 graph。
runInput := newagentmodel.AgentGraphRunInput{
RuntimeState: runtimeState,
ConversationContext: conversationContext,
Request: graphRequest,
Deps: deps,
}
finalState, graphErr := newagentgraph.RunAgentGraph(requestCtx, runInput)
if graphErr != nil {
log.Printf("[ERROR] newAgent graph 执行失败 trace=%s chat=%s: %v", traceID, chatID, graphErr)
pushErrNonBlocking(errChan, fmt.Errorf("graph 执行失败: %w", graphErr))
// Graph 出错时回退普通聊天,保证可用性。
s.runNormalChatFlow(requestCtx, s.AIHub.Worker, resolvedModelName, userMessage, "", nil, retryMeta, ifThinking, userID, chatID, traceID, requestStart, outChan, errChan)
return
}
// 11. 持久化聊天历史(用户消息 + 助手回复)。
s.persistChatAfterGraph(requestCtx, userID, chatID, userMessage, finalState, retryMeta, requestStart, outChan, errChan)
// 11.5. 将最终状态快照异步写入 MySQL通过 outbox
// Deliver 节点已将快照保存到 Redis2h TTL此处通过 outbox 异步写入 MySQL 做永久存储。
if finalState != nil {
snapshot := &newagentmodel.AgentStateSnapshot{
RuntimeState: finalState.EnsureRuntimeState(),
ConversationContext: finalState.EnsureConversationContext(),
}
eventsvc.PublishAgentStateSnapshot(requestCtx, s.eventPublisher, snapshot, chatID, userID)
}
// 排程预览缓存由 Deliver 节点负责写入(通过注入的 WriteSchedulePreview func
// 保证只有任务真正完成时才写,中断路径不写中间态。
// 12. 发送 OpenAI 兼容的流式结束标记,告知客户端 stream 已完成。
_ = chunkEmitter.EmitDone()
// 13. 异步生成会话标题。
s.ensureConversationTitleAsync(userID, chatID)
}
// loadOrCreateRuntimeState 从 StateStore 加载或创建新的 RuntimeState。
//
// 返回值:
// - RuntimeState可持久化流程状态
// - ConversationContext快照中保存的完整对话上下文含工具调用/结果),
// 仅在恢复已有快照时非 nil新建会话时为 nil。
//
// 设计说明:
// 1. 快照中的 ConversationContext 包含 graph 执行期间的完整中间消息(工具调用、工具结果等),
// 这些消息不会出现在 Redis LLM 历史缓存中;
// 2. 恢复场景confirm/ask_user必须使用快照中的 ConversationContext否则工具结果丢失
// 导致后续 LLM 调用收到非法的裸 Tool 消息API 拒绝请求、连接断开。
func (s *AgentService) loadOrCreateRuntimeState(ctx context.Context, chatID string, userID int) (*newagentmodel.AgentRuntimeState, *newagentmodel.ConversationContext) {
newRT := func() (*newagentmodel.AgentRuntimeState, *newagentmodel.ConversationContext) {
rt := newagentmodel.NewAgentRuntimeState(nil)
cs := rt.EnsureCommonState()
cs.UserID = userID
cs.ConversationID = chatID // saveAgentState 依赖此字段决定是否持久化
return rt, nil
}
if s.agentStateStore == nil {
return newRT()
}
snapshot, ok, err := s.agentStateStore.Load(ctx, chatID)
log.Printf("[DEBUG] loadOrCreateRuntimeState chatID=%s ok=%v err=%v hasRuntime=%v hasPending=%v hasCtx=%v",
chatID, ok, err,
snapshot != nil && snapshot.RuntimeState != nil,
snapshot != nil && snapshot.RuntimeState != nil && snapshot.RuntimeState.HasPendingInteraction(),
snapshot != nil && snapshot.ConversationContext != nil,
)
if err != nil {
log.Printf("加载 agent 状态失败 chat=%s: %v", chatID, err)
return newRT()
}
if ok && snapshot != nil && snapshot.RuntimeState != nil {
// 恢复运行态,确保身份信息与当前请求一致。
cs := snapshot.RuntimeState.EnsureCommonState()
cs.UserID = userID
cs.ConversationID = chatID
// 不需要手动重置 Phase所有请求统一先过 Chat 节点Chat 会根据路由决策覆盖 Phase。
// 保留完整的 RuntimeStatePlanSteps、CurrentStep 等),支持连续对话调整日程。
return snapshot.RuntimeState, snapshot.ConversationContext
}
return newRT()
}
// loadConversationContext 加载对话历史,构造 ConversationContext。
func (s *AgentService) loadConversationContext(ctx context.Context, chatID, userMessage string) *newagentmodel.ConversationContext {
// 从 Redis 加载历史。
history, err := s.agentCache.GetHistory(ctx, chatID)
if err != nil {
log.Printf("加载历史失败 chat=%s: %v", chatID, err)
history = nil
}
// 缓存未命中时回源 DB。
if history == nil {
histories, hisErr := s.repo.GetUserChatHistories(ctx, 0, pkg.HistoryFetchLimitByModel("worker"), chatID)
if hisErr != nil {
log.Printf("从 DB 加载历史失败 chat=%s: %v", chatID, hisErr)
} else {
history = conv.ToEinoMessages(histories)
// 回填到 Redis。
if backfillErr := s.agentCache.BackfillHistory(ctx, chatID, history); backfillErr != nil {
log.Printf("回填历史到 Redis 失败 chat=%s: %v", chatID, backfillErr)
}
}
}
// 构造 ConversationContext。
conversationContext := newagentmodel.NewConversationContext(agentchat.SystemPrompt)
if history != nil {
conversationContext.ReplaceHistory(history)
}
// 把用户本轮输入追加到历史(供 graph 使用)。
if strings.TrimSpace(userMessage) != "" {
conversationContext.AppendHistory(schema.UserMessage(userMessage))
}
return conversationContext
}
// persistChatAfterGraph graph 执行完成后持久化聊天历史。
func (s *AgentService) persistChatAfterGraph(
ctx context.Context,
userID int,
chatID string,
userMessage string,
finalState *newagentmodel.AgentGraphState,
retryMeta *chatRetryMeta,
requestStart time.Time,
outChan chan<- string,
errChan chan error,
) {
if finalState == nil {
return
}
// 1. 持久化用户消息:先写 LLM 上下文 Redis再落 DB最后更新 UI 历史缓存。
userMsg := &schema.Message{Role: schema.User, Content: userMessage}
if retryExtra := retryMeta.CacheExtra(); len(retryExtra) > 0 {
userMsg.Extra = retryExtra
}
if err := s.agentCache.PushMessage(ctx, chatID, userMsg); err != nil {
log.Printf("写入用户消息到 LLM 上下文 Redis 失败 chat=%s: %v", chatID, err)
}
userPayload := model.ChatHistoryPersistPayload{
UserID: userID,
ConversationID: chatID,
Role: "user",
Message: userMessage,
ReasoningContent: "",
ReasoningDurationSeconds: 0,
RetryGroupID: retryMeta.GroupIDPtr(),
RetryIndex: retryMeta.IndexPtr(),
RetryFromUserMessageID: retryMeta.FromUserMessageIDPtr(),
RetryFromAssistantMessageID: retryMeta.FromAssistantMessageIDPtr(),
TokensConsumed: 0,
}
if err := s.PersistChatHistory(ctx, userPayload); err != nil {
pushErrNonBlocking(errChan, err)
}
userCreatedAt := time.Now()
s.appendConversationHistoryCacheOptimistically(
context.Background(),
userID,
chatID,
buildOptimisticConversationHistoryItem("user", userMessage, "", 0, retryMeta, userCreatedAt),
)
// 2. 从 ConversationContext 提取助手回复(最后一条 assistant 消息)。
conversationContext := finalState.ConversationContext
if conversationContext == nil || len(conversationContext.History) == 0 {
return
}
var lastAssistantMsg *schema.Message
for i := len(conversationContext.History) - 1; i >= 0; i-- {
msg := conversationContext.History[i]
if msg.Role == schema.Assistant {
lastAssistantMsg = msg
break
}
}
if lastAssistantMsg == nil {
return
}
assistantReply := lastAssistantMsg.Content
reasoningContent := lastAssistantMsg.ReasoningContent
var reasoningDurationSeconds int
if lastAssistantMsg.Extra != nil {
if dur, ok := lastAssistantMsg.Extra["reasoning_duration_seconds"].(float64); ok {
reasoningDurationSeconds = int(dur)
}
}
// 3. 持久化助手消息:先写 LLM 上下文 Redis再落 DB最后更新 UI 历史缓存。
assistantMsg := &schema.Message{
Role: schema.Assistant,
Content: assistantReply,
ReasoningContent: reasoningContent,
}
if reasoningDurationSeconds > 0 {
assistantMsg.Extra = map[string]any{"reasoning_duration_seconds": reasoningDurationSeconds}
}
if retryExtra := retryMeta.CacheExtra(); len(retryExtra) > 0 {
if assistantMsg.Extra == nil {
assistantMsg.Extra = make(map[string]any)
}
for k, v := range retryExtra {
assistantMsg.Extra[k] = v
}
}
if err := s.agentCache.PushMessage(context.Background(), chatID, assistantMsg); err != nil {
log.Printf("写入助手消息到 LLM 上下文 Redis 失败 chat=%s: %v", chatID, err)
}
requestTotalTokens := snapshotRequestTokenMeter(ctx).TotalTokens
assistantPayload := model.ChatHistoryPersistPayload{
UserID: userID,
ConversationID: chatID,
Role: "assistant",
Message: assistantReply,
ReasoningContent: reasoningContent,
ReasoningDurationSeconds: reasoningDurationSeconds,
RetryGroupID: retryMeta.GroupIDPtr(),
RetryIndex: retryMeta.IndexPtr(),
RetryFromUserMessageID: retryMeta.FromUserMessageIDPtr(),
RetryFromAssistantMessageID: retryMeta.FromAssistantMessageIDPtr(),
TokensConsumed: requestTotalTokens,
}
if err := s.PersistChatHistory(ctx, assistantPayload); err != nil {
pushErrNonBlocking(errChan, err)
} else {
s.appendConversationHistoryCacheOptimistically(
context.Background(),
userID,
chatID,
buildOptimisticConversationHistoryItem(
"assistant",
assistantReply,
reasoningContent,
reasoningDurationSeconds,
retryMeta,
time.Now(),
),
)
}
}
// makeRoughBuildFunc 把 AgentService 上的 HybridScheduleWithPlanMultiFunc 封装成
// newAgent 层的 RoughBuildFunc将 HybridScheduleWithPlanMultiFunc 的结果转换为 RoughBuildPlacement。
// HybridScheduleWithPlanMultiFunc 未注入时返回 nilRoughBuild 节点会静默跳过粗排。
//
// 修复说明:
// 旧实现使用第二个返回值 []TaskClassItem只有 EmbeddedTime != nil 的条目(嵌入水课)才生成
// placement普通时段放置的任务全部被丢弃。
// 正确做法:使用第一个返回值 []HybridScheduleEntry过滤 Status="suggested" 且 TaskItemID>0 的条目,
// 这样嵌入和非嵌入的粗排结果都能正确写入 ScheduleState。
func (s *AgentService) makeRoughBuildFunc() newagentmodel.RoughBuildFunc {
if s.HybridScheduleWithPlanMultiFunc == nil {
return nil
}
return func(ctx context.Context, userID int, taskClassIDs []int) ([]newagentmodel.RoughBuildPlacement, error) {
entries, _, err := s.HybridScheduleWithPlanMultiFunc(ctx, userID, taskClassIDs)
if err != nil {
return nil, err
}
placements := make([]newagentmodel.RoughBuildPlacement, 0, len(entries))
for _, entry := range entries {
if entry.Status != "suggested" || entry.TaskItemID == 0 {
continue
}
placements = append(placements, newagentmodel.RoughBuildPlacement{
TaskItemID: entry.TaskItemID,
Week: entry.Week,
DayOfWeek: entry.DayOfWeek,
SectionFrom: entry.SectionFrom,
SectionTo: entry.SectionTo,
})
}
return placements, nil
}
}
// makeWriteSchedulePreviewFunc 封装 cacheDAO 写排程预览缓存的操作,供 Deliver 节点注入。
func (s *AgentService) makeWriteSchedulePreviewFunc() newagentmodel.WriteSchedulePreviewFunc {
if s.cacheDAO == nil {
return nil
}
return func(ctx context.Context, state *newagenttools.ScheduleState, userID int, conversationID string, taskClassIDs []int) error {
preview := conv.ScheduleStateToPreview(state, userID, conversationID, taskClassIDs, "")
if preview == nil {
return nil
}
return s.cacheDAO.SetSchedulePlanPreviewToCache(ctx, userID, conversationID, preview)
}
}
// --- 依赖注入字段 ---
// toolRegistry 由 cmd/start.go 注入
func (s *AgentService) SetToolRegistry(registry *newagenttools.ToolRegistry) {
s.toolRegistry = registry
}
// scheduleProvider 由 cmd/start.go 注入
func (s *AgentService) SetScheduleProvider(provider newagentmodel.ScheduleStateProvider) {
s.scheduleProvider = provider
}
// schedulePersistor 由 cmd/start.go 注入
func (s *AgentService) SetSchedulePersistor(persistor newagentmodel.SchedulePersistor) {
s.schedulePersistor = persistor
}
// agentStateStore 由 cmd/start.go 注入
func (s *AgentService) SetAgentStateStore(store newagentmodel.AgentStateStore) {
s.agentStateStore = store
}