后端: 1. Prompt 层从 execute 专属骨架重构为全节点统一四段式 buildUnifiedStageMessages - 新增 unified_context.go:定义 StageMessagesConfig + buildUnifiedStageMessages 统一骨架,所有节点(Chat/Plan/Execute/Deliver/DeepAnswer)共用同一套 msg0~msg3 拼装逻辑 - 新增 conversation_view.go:通用对话历史渲染 buildConversationHistoryMessage,各节点复用,不再各自维护提取逻辑 - 新增 chat_context.go / plan_context.go / deliver_context.go:各节点自行渲染 msg1(对话视图)和 msg2(工作区),统一层只负责"怎么拼",不再替节点决定"放什么" - Chat/Plan/Deliver/Execute 的 BuildXXXMessages 全部从 buildStageMessages 切到 buildUnifiedStageMessages,移除旧路径 - 删除 execute_pinned.go:execute 记忆渲染合并到统一层 renderUnifiedMemoryContext - Plan prompt 不再在 user prompt 中拼装任务类 ID 列表和 renderStateSummary,改为依赖 msg2 规划工作区;Chat 粗排判断从"上下文有任务类 ID"改为"批量调度需求" - Deliver prompt 新增 IsAborted/IsExhaustedTerminal 区分,支持粗排收口和主动终止场景 2. Execute ReAct 上下文简化——移除归档搬运、窗口裁剪和重复工具压缩 - 移除 splitExecuteLoopRecordsByBoundary、findLatestExecuteBoundaryMarker、tailExecuteLoops、compressExecuteLoopObservationsByTool、buildEarlyExecuteReactSummary、trimExecuteMessage1ByBudget 等六个函数 - 移除 executeLoopWindowLimit / executeConversationTurnLimit / executeMessage1MaxRunes 等预算常量 - msg1 不再从历史中归档上一轮 ReAct 结果,只保留真实对话流(user + assistant speak),全量注入 - msg2 不再按 loop_closed / step_advanced 边界切分"归档/活跃",直接全量注入全部 ReAct Loop 记录 - token 预算由统一压缩层兜底,prompt 层不再做提前裁剪 3. 压缩层从 Execute 专属提升为全节点通用 UnifiedCompact - 删除 execute_compact.go(Execute 专属压缩文件) - 新增 unified_compact.go:UnifiedCompactInput 参数化,各节点(Plan/Chat/Deliver/Execute)构造时从自己的 NodeInput 提取公共字段,消除对 Execute 的直接依赖 - CompactionStore 接口扩展 LoadStageCompaction / SaveStageCompaction,各节点按 stageKey 独立维护压缩状态互不覆盖 - 非 4 段式消息时退化成按角色汇总统计,确保 context_token_stats 仍然刷新 4. Retry 重试机制全面下线 - dao/agent.go:saveChatHistoryCore / SaveChatHistory / SaveChatHistoryInTx 移除 retry_group_id / retry_index / retry_from_user_message_id / retry_from_assistant_message_id 四个参数,修复乱码注释 - dao/agent-cache.go:移除 ApplyRetrySeed 和 extractMessageHistoryID 两个方法 - conv/agent.go:ToEinoMessages 不再回灌 retry_* 字段到运行期上下文 - service/agentsvc/agent.go:移除 chatRetryMeta 及 resolveRetryGroupID / buildRetrySeed 等全部重试逻辑 - service/agentsvc/agent_quick_note.go:整个文件删除(retry 快速补写路径已无用) - service/events/chat_history_persist.go:移除 retry 参数传递 5. 节点层瘦身 + 可见消息逐条持久化 - agent_nodes.go 大幅简化:Chat/Plan/Execute/Deliver 节点方法移除 ToolSchema 注入、状态摘要渲染等逻辑,只做参数转发和状态落盘 - 新增 visible_message.go:persistVisibleAssistantMessage 统一处理可见 assistant speak 的实时持久化,失败仅记日志不中断主流程 - 新增 llm_debug.go:logNodeLLMContext 统一打印 LLM 上下文调试日志 - graph_run_state.go 新增 PersistVisibleMessageFunc 类型 + AgentGraphDeps.PersistVisibleMessage 字段 - service/agentsvc/agent_newagent.go 精简主循环,注入 PersistVisibleMessage 回调;agent_history.go 精简历史构建 - token_budget.go 移除 Execute 专属预算检查,统一到通用预算 前端: 1. 移除 retry 相关 UI 和类型 - agent.ts 移除 retry_group_id / retry_index / retry_total 字段及 normalize 逻辑 - AssistantPanel.vue 移除 retry 相关 UI 和交互代码(约 700 行精简) - dashboard.ts 移除 retry 相关类型定义 - AssistantView.vue 微调 2. ContextWindowMeter 压缩次数展示和数值格式优化 - 新增 formatCompactCount 工具函数,千位以上用 k 单位压缩(如 80k) - 新增压缩次数显示 3.修复了新对话发消息时,user和assistant消息被自动调换的bug 仓库:无
241 lines
7.5 KiB
Go
241 lines
7.5 KiB
Go
package newagentnode
|
||
|
||
import (
|
||
"context"
|
||
"fmt"
|
||
"strings"
|
||
"time"
|
||
|
||
"github.com/cloudwego/eino/schema"
|
||
|
||
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
|
||
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
|
||
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
|
||
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
|
||
)
|
||
|
||
const (
|
||
deliverStageName = "deliver"
|
||
deliverStatusBlockID = "deliver.status"
|
||
deliverSpeakBlockID = "deliver.speak"
|
||
)
|
||
|
||
// DeliverNodeInput 描述交付节点单轮运行所需的最小依赖。
|
||
//
|
||
// 职责边界:
|
||
// 1. 只负责生成交付总结并推送给用户,不负责后续流程推进;
|
||
// 2. RuntimeState 提供计划步骤和执行状态;
|
||
// 3. ConversationContext 提供执行阶段的对话历史;
|
||
// 4. 交付完成后标记流程结束。
|
||
type DeliverNodeInput struct {
|
||
RuntimeState *newagentmodel.AgentRuntimeState
|
||
ConversationContext *newagentmodel.ConversationContext
|
||
Client *infrallm.Client
|
||
ChunkEmitter *newagentstream.ChunkEmitter
|
||
ThinkingEnabled bool // 是否开启 thinking,由 config.yaml 的 agent.thinking.deliver 注入
|
||
CompactionStore newagentmodel.CompactionStore // 上下文压缩持久化
|
||
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
|
||
}
|
||
|
||
// RunDeliverNode 执行一轮交付节点逻辑。
|
||
//
|
||
// 核心职责:
|
||
// 1. 调 LLM 基于原始计划 + 执行历史生成交付总结;
|
||
// 2. 伪流式推送总结给用户;
|
||
// 3. 写入对话历史,保证上下文连续;
|
||
// 4. 标记流程结束。
|
||
//
|
||
// 降级策略:
|
||
// 1. LLM 调用失败时,回退到机械格式化总结,不中断流程;
|
||
// 2. 机械总结包含计划步骤列表和完成进度。
|
||
func RunDeliverNode(ctx context.Context, input DeliverNodeInput) error {
|
||
runtimeState, conversationContext, emitter, err := prepareDeliverNodeInput(input)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
flowState := runtimeState.EnsureCommonState()
|
||
|
||
// 1. 推送交付阶段状态,让前端知道正在生成总结。
|
||
if err := emitter.EmitStatus(
|
||
deliverStatusBlockID,
|
||
deliverStageName,
|
||
"summarizing",
|
||
"正在生成交付总结。",
|
||
false,
|
||
); err != nil {
|
||
return fmt.Errorf("交付阶段状态推送失败: %w", err)
|
||
}
|
||
|
||
// 2. 调 LLM 生成交付总结。
|
||
summary := generateDeliverSummary(ctx, input.Client, flowState, conversationContext, input.ThinkingEnabled, input.CompactionStore, emitter)
|
||
|
||
// 3. 伪流式推送总结。
|
||
if strings.TrimSpace(summary) != "" {
|
||
msg := schema.AssistantMessage(summary, nil)
|
||
if err := emitter.EmitPseudoAssistantText(
|
||
ctx,
|
||
deliverSpeakBlockID,
|
||
deliverStageName,
|
||
summary,
|
||
newagentstream.DefaultPseudoStreamOptions(),
|
||
); err != nil {
|
||
return fmt.Errorf("交付总结推送失败: %w", err)
|
||
}
|
||
conversationContext.AppendHistory(msg)
|
||
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
|
||
}
|
||
|
||
// 4. 推送最终完成状态。
|
||
_ = emitter.EmitStatus(
|
||
deliverStatusBlockID,
|
||
deliverStageName,
|
||
"done",
|
||
"本轮流程已结束。",
|
||
true,
|
||
)
|
||
|
||
return nil
|
||
}
|
||
|
||
// generateDeliverSummary 尝试调用 LLM 生成交付总结,失败时降级到机械格式化。
|
||
func generateDeliverSummary(
|
||
ctx context.Context,
|
||
client *infrallm.Client,
|
||
flowState *newagentmodel.CommonState,
|
||
conversationContext *newagentmodel.ConversationContext,
|
||
thinkingEnabled bool,
|
||
compactionStore newagentmodel.CompactionStore,
|
||
emitter *newagentstream.ChunkEmitter,
|
||
) string {
|
||
if flowState != nil {
|
||
switch {
|
||
case flowState.IsAborted():
|
||
return normalizeSpeak(buildAbortSummary(flowState))
|
||
case flowState.IsExhaustedTerminal():
|
||
return normalizeSpeak(buildExhaustedSummary(flowState))
|
||
}
|
||
}
|
||
|
||
if client == nil {
|
||
return buildMechanicalSummary(flowState)
|
||
}
|
||
|
||
messages := newagentprompt.BuildDeliverMessages(flowState, conversationContext)
|
||
messages = compactUnifiedMessagesIfNeeded(ctx, messages, UnifiedCompactInput{
|
||
Client: client,
|
||
CompactionStore: compactionStore,
|
||
FlowState: flowState,
|
||
Emitter: emitter,
|
||
StageName: deliverStageName,
|
||
StatusBlockID: deliverStatusBlockID,
|
||
})
|
||
logNodeLLMContext(deliverStageName, "summarizing", flowState, messages)
|
||
result, err := client.GenerateText(
|
||
ctx,
|
||
messages,
|
||
infrallm.GenerateOptions{
|
||
Temperature: 0.5,
|
||
MaxTokens: 800,
|
||
Thinking: resolveThinkingMode(thinkingEnabled),
|
||
Metadata: map[string]any{
|
||
"stage": deliverStageName,
|
||
},
|
||
},
|
||
)
|
||
if err != nil || result == nil || strings.TrimSpace(result.Text) == "" {
|
||
return buildMechanicalSummary(flowState)
|
||
}
|
||
|
||
return normalizeSpeak(result.Text)
|
||
}
|
||
|
||
// buildAbortSummary 生成“流程已终止”的统一交付文案。
|
||
//
|
||
// 说明:
|
||
// 1. 第二轮开始,abort 的用户可见文案由终止方提前写入 CommonState;
|
||
// 2. deliver 不再重新猜测或改写业务异常,只做最终收口;
|
||
// 3. 若历史快照缺失 user_message,则回退到一份通用说明,避免前端收到空白结果。
|
||
func buildAbortSummary(state *newagentmodel.CommonState) string {
|
||
if state == nil || state.TerminalOutcome == nil {
|
||
return "本轮流程已终止。"
|
||
}
|
||
if msg := strings.TrimSpace(state.TerminalOutcome.UserMessage); msg != "" {
|
||
return msg
|
||
}
|
||
return "本轮流程已终止,请根据当前提示检查后再继续。"
|
||
}
|
||
|
||
// buildExhaustedSummary 生成“轮次耗尽”的统一收口文案。
|
||
func buildExhaustedSummary(state *newagentmodel.CommonState) string {
|
||
if state == nil {
|
||
return "本轮执行已达到安全轮次上限,当前先停止继续操作。"
|
||
}
|
||
|
||
prefix := "本轮执行已达到安全轮次上限,当前先停止继续操作。"
|
||
if state.TerminalOutcome != nil && strings.TrimSpace(state.TerminalOutcome.UserMessage) != "" {
|
||
prefix = strings.TrimSpace(state.TerminalOutcome.UserMessage)
|
||
}
|
||
if !state.HasPlan() {
|
||
return prefix
|
||
}
|
||
return prefix + "\n\n" + strings.TrimSpace(buildMechanicalSummary(state))
|
||
}
|
||
|
||
// buildMechanicalSummary 在 LLM 不可用时,机械拼接一份最小可用总结。
|
||
func buildMechanicalSummary(state *newagentmodel.CommonState) string {
|
||
if state == nil {
|
||
return "任务流程已结束。"
|
||
}
|
||
|
||
var sb strings.Builder
|
||
current, total := state.PlanProgress()
|
||
|
||
if !state.HasPlan() {
|
||
return "任务流程已结束。"
|
||
}
|
||
|
||
if state.IsExhaustedTerminal() {
|
||
sb.WriteString(fmt.Sprintf("任务因执行轮次耗尽提前结束,已完成 %d/%d 步。\n", current, total))
|
||
} else {
|
||
sb.WriteString("所有计划步骤已执行完毕。\n")
|
||
}
|
||
|
||
sb.WriteString("\n执行情况:\n")
|
||
for i, step := range state.PlanSteps {
|
||
marker := "[ ]"
|
||
if i < current {
|
||
marker = "[x]"
|
||
}
|
||
sb.WriteString(fmt.Sprintf("%s %s\n", marker, strings.TrimSpace(step.Content)))
|
||
}
|
||
|
||
if state.IsExhaustedTerminal() && current < total {
|
||
sb.WriteString("\n如需继续完成剩余步骤,可以告诉我继续。")
|
||
}
|
||
|
||
return sb.String()
|
||
}
|
||
|
||
// prepareDeliverNodeInput 校验并准备交付节点的运行态依赖。
|
||
func prepareDeliverNodeInput(input DeliverNodeInput) (
|
||
*newagentmodel.AgentRuntimeState,
|
||
*newagentmodel.ConversationContext,
|
||
*newagentstream.ChunkEmitter,
|
||
error,
|
||
) {
|
||
if input.RuntimeState == nil {
|
||
return nil, nil, nil, fmt.Errorf("deliver node: runtime state 不能为空")
|
||
}
|
||
|
||
input.RuntimeState.EnsureCommonState()
|
||
if input.ConversationContext == nil {
|
||
input.ConversationContext = newagentmodel.NewConversationContext("")
|
||
}
|
||
if input.ChunkEmitter == nil {
|
||
input.ChunkEmitter = newagentstream.NewChunkEmitter(
|
||
newagentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
|
||
)
|
||
}
|
||
return input.RuntimeState, input.ConversationContext, input.ChunkEmitter, nil
|
||
}
|