Version: 0.9.75.dev.260505

后端:
1.收口阶段 6 agent 结构迁移,将 newAgent 内核与 agentsvc 编排层迁入 services/agent
- 切换 Agent 启动装配与 HTTP handler 直连 agent sv,移除旧 service agent bridge
- 补齐 Agent 对 memory、task、task-class、schedule 的 RPC 适配与契约字段
- 扩展 schedule、task、task-class RPC/contract 支撑 Agent 查询、写入与 provider 切流
- 更新迁移文档、README 与相关注释,明确 agent 当前切流点和剩余 memory 迁移面
This commit is contained in:
Losita
2026-05-05 16:00:57 +08:00
parent e1819c5653
commit d7184b776b
174 changed files with 2189 additions and 1236 deletions

View File

@@ -0,0 +1,398 @@
package agentnode
import (
"context"
"fmt"
"io"
"log"
"strings"
"time"
"github.com/google/uuid"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentrouter "github.com/LoveLosita/smartflow/backend/services/agent/router"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
)
const (
planStageName = "plan"
planStatusBlockID = "plan.status"
planSpeakBlockID = "plan.speak"
planSummaryBlockID = "plan.summary"
planPinnedKey = "current_plan"
planCurrentStepKey = "current_step"
planCurrentStepTitle = "当前步骤"
planFullPlanTitle = "当前完整计划"
)
// PlanNodeInput 描述单轮规划节点执行所需的最小依赖。
type PlanNodeInput struct {
RuntimeState *agentmodel.AgentRuntimeState
ConversationContext *agentmodel.ConversationContext
UserInput string
Client *llmservice.Client
ChunkEmitter *agentstream.ChunkEmitter
ResumeNode string
AlwaysExecute bool // true 时计划生成后自动确认,不进入 confirm 节点
ThinkingEnabled bool // 是否开启 thinking由 config.yaml 的 agent.thinking.plan 注入
CompactionStore agentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage agentmodel.PersistVisibleMessageFunc
}
// RunPlanNode 执行一轮规划节点逻辑。
//
// 步骤说明:
// 1. 先校验最小依赖,并推送一条"正在规划"的状态,避免用户空等;
// 2. 构造本轮规划输入,调用 LLM Stream 接口;
// 3. 从流中提取 <SMARTFLOW_DECISION> 标签内的 JSON 决策,同时流式推送 speak 正文;
// 4. 按 action 推进流程:
// 4.1 continue继续停留在 planning
// 4.2 ask_user打开 pending interaction后续交给 interrupt 收口;
// 4.3 plan_done固化完整计划刷新 pinned context并进入 waiting_confirm。
func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
runtimeState, conversationContext, emitter, err := preparePlanNodeInput(input)
if err != nil {
return err
}
flowState := runtimeState.EnsureCommonState()
// 1. 先发一条阶段状态,让前端知道当前已经进入规划环节。
if err := emitter.EmitStatus(
planStatusBlockID,
planStageName,
"planning",
"正在梳理目标并补全执行计划。",
false,
); err != nil {
return fmt.Errorf("规划阶段状态推送失败: %w", err)
}
// 2. 构造本轮规划输入。
messages := agentprompt.BuildPlanMessages(flowState, conversationContext, input.UserInput)
messages = compactUnifiedMessagesIfNeeded(ctx, messages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
FlowState: flowState,
Emitter: emitter,
StageName: planStageName,
StatusBlockID: planStatusBlockID,
})
logNodeLLMContext(planStageName, "planning", flowState, messages)
// 3. 两阶段流式规划:从 LLM 流中先提取 <SMARTFLOW_DECISION> 决策标签,再流式推送 speak 正文。
reader, err := input.Client.Stream(
ctx,
messages,
llmservice.GenerateOptions{
Temperature: 0.2,
// 显式设置上限,避免依赖框架默认值(默认 4096导致长决策被截断。
// 注意:当前模型接口 max_tokens 上限为 131072超过会 400。
MaxTokens: 131072,
Thinking: resolveThinkingMode(input.ThinkingEnabled),
Metadata: map[string]any{
"stage": planStageName,
"phase": "planning",
},
},
)
if err != nil {
return fmt.Errorf("规划阶段 Stream 调用失败: %w", err)
}
parser := agentrouter.NewStreamDecisionParser()
firstChunk := true
speakStreamed := false
reasoningDigestor, digestorErr := emitter.NewReasoningDigestor(ctx, planSpeakBlockID, planStageName)
if digestorErr != nil {
return fmt.Errorf("规划 thinking 摘要器初始化失败: %w", digestorErr)
}
defer func() {
if reasoningDigestor != nil {
_ = reasoningDigestor.Close(ctx)
}
}()
// 3.1 阶段一:解析决策标签。
for {
chunk, recvErr := reader.Recv()
if recvErr == io.EOF {
break
}
if recvErr != nil {
log.Printf("[WARN] plan stream recv error chat=%s err=%v", flowState.ConversationID, recvErr)
break
}
// thinking 内容只进入摘要器,不再把 raw reasoning_content 透传给前端。
if chunk != nil && strings.TrimSpace(chunk.ReasoningContent) != "" {
if reasoningDigestor != nil {
reasoningDigestor.Append(chunk.ReasoningContent)
}
}
content := ""
if chunk != nil {
content = chunk.Content
}
visible, ready, _ := parser.Feed(content)
if !ready {
continue
}
result := parser.Result()
if result.Fallback || result.ParseFailed {
return fmt.Errorf("规划解析失败,原始输出=%s", result.RawBuffer)
}
decision, parseErr := llmservice.ParseJSONObject[agentmodel.PlanDecision](result.DecisionJSON)
if parseErr != nil {
return fmt.Errorf("规划决策 JSON 解析失败: %w (raw=%s)", parseErr, result.RawBuffer)
}
if validateErr := decision.Validate(); validateErr != nil {
return fmt.Errorf("规划决策不合法: %w", validateErr)
}
// 3.2 阶段二:流式推送 speak同一 reader 继续读取)。
var fullText strings.Builder
if visible != "" {
if reasoningDigestor != nil {
reasoningDigestor.MarkContentStarted()
}
if emitErr := emitter.EmitAssistantText(planSpeakBlockID, planStageName, visible, firstChunk); emitErr != nil {
return fmt.Errorf("规划文案推送失败: %w", emitErr)
}
speakStreamed = true
fullText.WriteString(visible)
firstChunk = false
}
for {
chunk2, recvErr2 := reader.Recv()
if recvErr2 == io.EOF {
break
}
if recvErr2 != nil {
log.Printf("[WARN] plan speak stream error chat=%s err=%v", flowState.ConversationID, recvErr2)
break
}
if chunk2 == nil {
continue
}
if strings.TrimSpace(chunk2.ReasoningContent) != "" {
if reasoningDigestor != nil {
reasoningDigestor.Append(chunk2.ReasoningContent)
}
}
if chunk2.Content != "" {
if reasoningDigestor != nil {
reasoningDigestor.MarkContentStarted()
}
if emitErr := emitter.EmitAssistantText(planSpeakBlockID, planStageName, chunk2.Content, firstChunk); emitErr != nil {
return fmt.Errorf("规划文案推送失败: %w", emitErr)
}
speakStreamed = true
fullText.WriteString(chunk2.Content)
firstChunk = false
}
}
decision.Speak = fullText.String()
// 4. 若有 speak 且不是 ask_userask_user 交给 interrupt 收口),写入历史。
if strings.TrimSpace(decision.Speak) != "" && decision.Action != agentmodel.PlanActionAskUser {
msg := schema.AssistantMessage(decision.Speak, nil)
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
// 5. 按规划动作推进流程状态。
return handlePlanAction(ctx, input, runtimeState, conversationContext, emitter, flowState, decision, speakStreamed)
}
// 流结束但未找到决策标签。
return fmt.Errorf("规划阶段流结束但未提取到决策标签")
}
// handlePlanAction 根据 PlanDecision.Action 推进流程状态。
func handlePlanAction(
ctx context.Context,
input PlanNodeInput,
runtimeState *agentmodel.AgentRuntimeState,
conversationContext *agentmodel.ConversationContext,
emitter *agentstream.ChunkEmitter,
flowState *agentmodel.CommonState,
decision *agentmodel.PlanDecision,
askUserSpeakStreamed bool,
) error {
switch decision.Action {
case agentmodel.PlanActionContinue:
flowState.Phase = agentmodel.PhasePlanning
return nil
case agentmodel.PlanActionAskUser:
question := resolvePlanAskUserText(decision)
runtimeState.OpenAskUserInteraction(uuid.NewString(), question, strings.TrimSpace(input.ResumeNode))
// 1. plan 阶段若已流式推送过 ask_user 文本interrupt 侧应避免重复正文输出;
// 2. plan 阶段 ask_user 不会提前写入 history这里显式标记为 false。
runtimeState.SetPendingInteractionMetadata(agentmodel.PendingMetaAskUserSpeakStreamed, askUserSpeakStreamed)
runtimeState.SetPendingInteractionMetadata(agentmodel.PendingMetaAskUserHistoryAppended, false)
return nil
case agentmodel.PlanActionDone:
flowState.FinishPlan(decision.PlanSteps)
flowState.PendingContextHook = clonePlanContextHook(decision.ContextHook)
writePlanPinnedBlocks(conversationContext, decision.PlanSteps)
if decision.NeedsRoughBuild {
flowState.NeedsRoughBuild = true
if len(decision.TaskClassIDs) > 0 {
flowState.TaskClassIDs = decision.TaskClassIDs
}
}
// always_execute 开启时,计划层跳过确认闸门,直接进入执行阶段。
if input.AlwaysExecute {
summary := strings.TrimSpace(buildPlanSummary(decision.PlanSteps))
if summary != "" {
msg := schema.AssistantMessage(summary, nil)
if err := emitter.EmitPseudoAssistantText(
ctx,
planSummaryBlockID,
planStageName,
summary,
agentstream.DefaultPseudoStreamOptions(),
); err != nil {
return fmt.Errorf("自动执行前计划摘要推送失败: %w", err)
}
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
flowState.ConfirmPlan()
_ = emitter.EmitStatus(
planStatusBlockID,
planStageName,
"plan_auto_confirmed",
"计划已自动确认,开始执行。",
false,
)
}
return nil
default:
llmOutput := decision.Speak
if strings.TrimSpace(llmOutput) == "" {
llmOutput = decision.Reason
}
AppendLLMCorrectionWithHint(
conversationContext,
llmOutput,
fmt.Sprintf("你输出的 action \"%s\" 不是合法的执行动作。", decision.Action),
"合法的 action 包括continue继续当前步骤、ask_user追问用户、next_plan推进到下一步、done任务完成。",
)
return nil
}
}
func preparePlanNodeInput(input PlanNodeInput) (*agentmodel.AgentRuntimeState, *agentmodel.ConversationContext, *agentstream.ChunkEmitter, error) {
if input.RuntimeState == nil {
return nil, nil, nil, fmt.Errorf("plan node: runtime state 不能为空")
}
if input.Client == nil {
return nil, nil, nil, fmt.Errorf("plan node: plan client 未注入")
}
input.RuntimeState.EnsureCommonState()
if input.ConversationContext == nil {
input.ConversationContext = agentmodel.NewConversationContext("")
}
if input.ChunkEmitter == nil {
input.ChunkEmitter = agentstream.NewChunkEmitter(agentstream.NoopPayloadEmitter(), "", "", time.Now().Unix())
}
return input.RuntimeState, input.ConversationContext, input.ChunkEmitter, nil
}
func resolvePlanAskUserText(decision *agentmodel.PlanDecision) string {
if decision == nil {
return "我还缺一点关键信息,想先向你确认一下。"
}
if strings.TrimSpace(decision.Speak) != "" {
return strings.TrimSpace(decision.Speak)
}
if strings.TrimSpace(decision.Reason) != "" {
return strings.TrimSpace(decision.Reason)
}
return "我还缺一点关键信息,想先向你确认一下。"
}
func clonePlanContextHook(hook *agentmodel.ContextHook) *agentmodel.ContextHook {
if hook == nil {
return nil
}
cloned := *hook
if len(hook.Packs) > 0 {
cloned.Packs = append([]string(nil), hook.Packs...)
}
cloned.Normalize()
if cloned.Domain == "" {
return nil
}
return &cloned
}
func writePlanPinnedBlocks(ctx *agentmodel.ConversationContext, steps []agentmodel.PlanStep) {
if ctx == nil {
return
}
fullPlanText := buildPinnedPlanText(steps)
if strings.TrimSpace(fullPlanText) != "" {
ctx.UpsertPinnedBlock(agentmodel.ContextBlock{
Key: planPinnedKey,
Title: planFullPlanTitle,
Content: fullPlanText,
})
}
if len(steps) == 0 {
return
}
firstStep := strings.TrimSpace(steps[0].Content)
if strings.TrimSpace(steps[0].DoneWhen) != "" {
firstStep = fmt.Sprintf("%s\n完成判定%s", firstStep, strings.TrimSpace(steps[0].DoneWhen))
}
ctx.UpsertPinnedBlock(agentmodel.ContextBlock{
Key: planCurrentStepKey,
Title: planCurrentStepTitle,
Content: firstStep,
})
}
func buildPinnedPlanText(steps []agentmodel.PlanStep) string {
if len(steps) == 0 {
return ""
}
lines := make([]string, 0, len(steps))
for i, step := range steps {
content := strings.TrimSpace(step.Content)
if content == "" {
continue
}
line := fmt.Sprintf("%d. %s", i+1, content)
if strings.TrimSpace(step.DoneWhen) != "" {
line += fmt.Sprintf("\n完成判定%s", strings.TrimSpace(step.DoneWhen))
}
lines = append(lines, line)
}
return strings.TrimSpace(strings.Join(lines, "\n\n"))
}
// resolveThinkingMode 根据配置布尔值返回对应的 ThinkingMode。
// 供 plan / execute / deliver 节点统一使用。
func resolveThinkingMode(enabled bool) llmservice.ThinkingMode {
if enabled {
return llmservice.ThinkingModeEnabled
}
return llmservice.ThinkingModeDisabled
}