Files
smartmate/backend/newAgent/node/deliver.go
LoveLosita 32bb740b75 Version: 0.9.3.dev.260407
后端:
    1.Execute 上下文修复(无限循环 / 重复确认根治)
      - 更新node/execute.go:speak 写入历史(修复旧 TODO);confirm 动作 speak 不再丢失;
        continue 无工具调用时写 reason 保证上下文推进;区分 tool_call 数组/JSON损坏两种
        correction hint;goal_check hint 区分 plan/ReAct 模式
      - 更新node/execute.go:新增 AlwaysExecute 字段,extra.always_execute=true 时写工具
        跳过确认闸门直接执行并持久化
      - 更新model/graph_run_state.go:AgentGraphRequest 新增 AlwaysExecute;新增
        WriteSchedulePreviewFunc 类型和 WriteSchedulePreview Dep
      - 更新service/agentsvc/agent.go:新增 readAgentExtraBool 辅助

    2.粗排全链路修复
      - 更新service/agentsvc/agent_newagent.go:makeRoughBuildFunc 改用 HybridScheduleEntry
        而非 TaskClassItem.EmbeddedTime,普通时段放置不再被丢弃
      - 更新conv/schedule_provider.go:LoadScheduleState 从 task class 日期范围推算多周
        规划窗口,不再硬编码当前周 7 天;DayMapping 覆盖全部相关周,粗排跨周结果不再
        被 WeekDayToDay 静默丢弃
      - 更新node/rough_build.go:pinned block 区分有/无未覆盖 pending 任务两种情况,
        有 pending 时明确操作顺序(find_free→place)和完成判定,防止 LLM 重复调
        list_tasks;新增 countPendingTasks 辅助(只统计 Slots 为空的真正未覆盖任务)
      - 更新model/common_state.go:新增 StartDirectExecute(),Chat 直接路由 execute 时
        清空旧 PlanSteps,修复跨会话 HasPlan() 误判导致 ReAct 走 plan 模式的 bug
      - 更新node/chat.go:handleRouteExecute 改用 StartDirectExecute()

    3.排程预览缓存迁移至 Deliver 节点
      - 更新node/agent_nodes.go:Deliver 节点完成后调用 WriteSchedulePreview,只有任务
        真正完成才写预览缓存,中断路径不写中间态
      - 更新service/agentsvc/agent_newagent.go:注入 makeWriteSchedulePreviewFunc;移除
        graph 结束后的内联写入;makeRoughBuildFunc 注释修正
      - 更新conv/schedule_preview.go:ScheduleStateToPreview 补设 GeneratedAt
      - 更新model/agent.go:GetSchedulePlanPreviewResponse 新增 HybridEntries 字段
      - 更新service/agentsvc/agent_schedule_preview.go:GET handler Redis/MySQL 两条路径
        均透传 HybridEntries

    4.Execute thinking 模式修复
      - 更新newAgent/llm/ark_adapter.go:thinking 开启时强制 temperature=1,MaxTokens 自
        动托底至 16000,调用方与适配层行为对齐
      - 更新node/execute.go:调用参数同步改为 temperature=1.0 / MaxTokens=16000

    undo:
    1.流式推送换行未修复(undo)
    2.上下文依然待审视

前端:无
仓库:无
2026-04-07 12:10:56 +08:00

185 lines
5.2 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
package newagentnode
import (
"context"
"fmt"
"strings"
"time"
"github.com/cloudwego/eino/schema"
newagentllm "github.com/LoveLosita/smartflow/backend/newAgent/llm"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
)
const (
deliverStageName = "deliver"
deliverStatusBlockID = "deliver.status"
deliverSpeakBlockID = "deliver.speak"
)
// DeliverNodeInput 描述交付节点单轮运行所需的最小依赖。
//
// 职责边界:
// 1. 只负责生成交付总结并推送给用户,不负责后续流程推进;
// 2. RuntimeState 提供计划步骤和执行状态;
// 3. ConversationContext 提供执行阶段的对话历史;
// 4. 交付完成后标记流程结束。
type DeliverNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
Client *newagentllm.Client
ChunkEmitter *newagentstream.ChunkEmitter
}
// RunDeliverNode 执行一轮交付节点逻辑。
//
// 核心职责:
// 1. 调 LLM 基于原始计划 + 执行历史生成交付总结;
// 2. 伪流式推送总结给用户;
// 3. 写入对话历史,保证上下文连续;
// 4. 标记流程结束。
//
// 降级策略:
// 1. LLM 调用失败时,回退到机械格式化总结,不中断流程;
// 2. 机械总结包含计划步骤列表和完成进度。
func RunDeliverNode(ctx context.Context, input DeliverNodeInput) error {
runtimeState, conversationContext, emitter, err := prepareDeliverNodeInput(input)
if err != nil {
return err
}
flowState := runtimeState.EnsureCommonState()
// 1. 推送交付阶段状态,让前端知道正在生成总结。
if err := emitter.EmitStatus(
deliverStatusBlockID,
deliverStageName,
"summarizing",
"正在生成交付总结。",
false,
); err != nil {
return fmt.Errorf("交付阶段状态推送失败: %w", err)
}
// 2. 调 LLM 生成交付总结。
summary := generateDeliverSummary(ctx, input.Client, flowState, conversationContext)
// 3. 伪流式推送总结。
if strings.TrimSpace(summary) != "" {
if err := emitter.EmitPseudoAssistantText(
ctx,
deliverSpeakBlockID,
deliverStageName,
summary,
newagentstream.DefaultPseudoStreamOptions(),
); err != nil {
return fmt.Errorf("交付总结推送失败: %w", err)
}
conversationContext.AppendHistory(schema.AssistantMessage(summary, nil))
}
// 4. 推送最终完成状态。
_ = emitter.EmitStatus(
deliverStatusBlockID,
deliverStageName,
"done",
"任务已完成。",
true,
)
// 5. 标记流程结束。
flowState.Done()
return nil
}
// generateDeliverSummary 尝试调用 LLM 生成交付总结,失败时降级到机械格式化。
func generateDeliverSummary(
ctx context.Context,
client *newagentllm.Client,
flowState *newagentmodel.CommonState,
conversationContext *newagentmodel.ConversationContext,
) string {
if client == nil {
return buildMechanicalSummary(flowState)
}
messages := newagentprompt.BuildDeliverMessages(flowState, conversationContext)
result, err := client.GenerateText(
ctx,
messages,
newagentllm.GenerateOptions{
Temperature: 0.5,
MaxTokens: 800,
Thinking: newagentllm.ThinkingModeDisabled,
Metadata: map[string]any{
"stage": deliverStageName,
},
},
)
if err != nil || result == nil || strings.TrimSpace(result.Text) == "" {
return buildMechanicalSummary(flowState)
}
return normalizeSpeak(result.Text)
}
// buildMechanicalSummary 在 LLM 不可用时,机械拼接一份最小可用总结。
func buildMechanicalSummary(state *newagentmodel.CommonState) string {
if state == nil {
return "任务流程已结束。"
}
var sb strings.Builder
current, total := state.PlanProgress()
if !state.HasPlan() {
return "任务流程已结束。"
}
if state.Exhausted() {
sb.WriteString(fmt.Sprintf("任务因执行轮次耗尽提前结束,已完成 %d/%d 步。\n", current, total))
} else {
sb.WriteString("所有计划步骤已执行完毕。\n")
}
sb.WriteString("\n执行情况\n")
for i, step := range state.PlanSteps {
marker := "[ ]"
if i < current {
marker = "[x]"
}
sb.WriteString(fmt.Sprintf("%s %s\n", marker, strings.TrimSpace(step.Content)))
}
if state.Exhausted() && current < total {
sb.WriteString("\n如需继续完成剩余步骤可以告诉我继续。")
}
return sb.String()
}
// prepareDeliverNodeInput 校验并准备交付节点的运行态依赖。
func prepareDeliverNodeInput(input DeliverNodeInput) (
*newagentmodel.AgentRuntimeState,
*newagentmodel.ConversationContext,
*newagentstream.ChunkEmitter,
error,
) {
if input.RuntimeState == nil {
return nil, nil, nil, fmt.Errorf("deliver node: runtime state 不能为空")
}
input.RuntimeState.EnsureCommonState()
if input.ConversationContext == nil {
input.ConversationContext = newagentmodel.NewConversationContext("")
}
if input.ChunkEmitter == nil {
input.ChunkEmitter = newagentstream.NewChunkEmitter(
newagentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
)
}
return input.RuntimeState, input.ConversationContext, input.ChunkEmitter, nil
}