Version: 0.9.47.dev.260427
后端: 1. execute 节点继续拆职责——超大 execute.go 下沉为 node/execute 子包,按决策流、动作路由、上下文锚点、工具执行、状态快照、工具展示与参数解析拆分;顶层 execute.go 收敛为桥接导出,降低单文件编排/业务/模型/工具逻辑混写 2. 节点公共能力继续沉到 shared——抽出 LLM 纠错回灌、完整上下文调试日志、thinking 开关、统一上下文压缩、可见 assistant 文本持久化等 node_* 公共件,减少 execute 独占实现并为其他节点复用铺路 3. speak 文本整理能力独立收口——新增 speak_text 辅助文件,补齐正文归一化的独立承载,继续收缩 execute 主文件体积 前端: 4. NewAgent 时间线接入 business_card 业务卡片协议——schedule_agent.ts 新增 task_query / task_record 卡片载荷类型与 business_card kind;AssistantPanel 增加业务卡片事件存储、时间线恢复、块渲染分支与 BusinessCardRenderer 接入,同时保留 interrupt / status / tool / reasoning 多块并存 5. 新增任务查询卡片与任务记录卡片组件,并补充 DesignDemo 设计预览页与路由,前端可先行验证 business_card 的视觉与交互落点 文档: 6. 新增 newagent business card 前后端对接说明,明确 timeline kind、payload 结构、卡片分类、前后端发射/渲染约束
This commit is contained in:
150
backend/newAgent/node/execute/run.go
Normal file
150
backend/newAgent/node/execute/run.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package newagentexecute
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
newagentshared "github.com/LoveLosita/smartflow/backend/newAgent/shared"
|
||||
|
||||
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
|
||||
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
|
||||
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
|
||||
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
|
||||
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
|
||||
"github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
|
||||
)
|
||||
|
||||
const (
|
||||
executeStageName = "execute"
|
||||
executeStatusBlockID = "execute.status"
|
||||
executeSpeakBlockID = "execute.speak"
|
||||
executePinnedKey = "execution_context"
|
||||
toolAnalyzeHealth = "analyze_health"
|
||||
executeHistoryKindKey = "newagent_history_kind"
|
||||
executeHistoryKindStepAdvanced = "execute_step_advanced"
|
||||
|
||||
maxConsecutiveCorrections = 3
|
||||
)
|
||||
|
||||
type ExecuteNodeInput struct {
|
||||
RuntimeState *newagentmodel.AgentRuntimeState
|
||||
ConversationContext *newagentmodel.ConversationContext
|
||||
UserInput string
|
||||
Client *infrallm.Client
|
||||
ChunkEmitter *newagentstream.ChunkEmitter
|
||||
ResumeNode string
|
||||
ToolRegistry *newagenttools.ToolRegistry
|
||||
ScheduleState *schedule.ScheduleState
|
||||
CompactionStore newagentmodel.CompactionStore
|
||||
WriteSchedulePreview newagentmodel.WriteSchedulePreviewFunc
|
||||
OriginalScheduleState *schedule.ScheduleState
|
||||
AlwaysExecute bool
|
||||
ThinkingEnabled bool
|
||||
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
|
||||
}
|
||||
|
||||
type ExecuteRoundObservation struct {
|
||||
Round int `json:"round"`
|
||||
StepIndex int `json:"step_index"`
|
||||
GoalCheck string `json:"goal_check,omitempty"`
|
||||
Decision string `json:"decision,omitempty"`
|
||||
ToolName string `json:"tool_name,omitempty"`
|
||||
ToolParams string `json:"tool_params,omitempty"`
|
||||
ToolSuccess bool `json:"tool_success"`
|
||||
ToolResult string `json:"tool_result,omitempty"`
|
||||
}
|
||||
|
||||
func RunExecuteNode(ctx context.Context, input ExecuteNodeInput) error {
|
||||
runtimeState, conversationContext, emitter, err := prepareExecuteNodeInput(input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
flowState := runtimeState.EnsureCommonState()
|
||||
applyPendingContextHook(flowState)
|
||||
|
||||
if runtimeState.PendingConfirmTool != nil {
|
||||
return executePendingTool(
|
||||
ctx,
|
||||
runtimeState,
|
||||
conversationContext,
|
||||
input.ToolRegistry,
|
||||
input.ScheduleState,
|
||||
input.OriginalScheduleState,
|
||||
input.WriteSchedulePreview,
|
||||
emitter,
|
||||
)
|
||||
}
|
||||
|
||||
if input.ScheduleState != nil && flowState.RoundUsed == 0 {
|
||||
schedule.ResetTaskProcessingQueue(input.ScheduleState)
|
||||
}
|
||||
|
||||
syncExecutePinnedContext(conversationContext, flowState)
|
||||
|
||||
if flowState.HasCurrentPlanStep() {
|
||||
current, total := flowState.PlanProgress()
|
||||
currentStep, _ := flowState.CurrentPlanStep()
|
||||
if err := emitter.EmitStatus(
|
||||
executeStatusBlockID,
|
||||
executeStageName,
|
||||
"executing",
|
||||
fmt.Sprintf("正在执行第 %d/%d 步:%s", current, total, truncateText(currentStep.Content, 60)),
|
||||
false,
|
||||
); err != nil {
|
||||
return fmt.Errorf("执行阶段状态推送失败: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := emitter.EmitStatus(
|
||||
executeStatusBlockID,
|
||||
executeStageName,
|
||||
"executing",
|
||||
"正在处理你的请求...",
|
||||
false,
|
||||
); err != nil {
|
||||
return fmt.Errorf("执行阶段状态推送失败: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !flowState.NextRound() {
|
||||
flowState.Exhaust(
|
||||
executeStageName,
|
||||
"本轮执行已达到安全轮次上限,当前先停止继续操作。如需继续,我可以在你确认后接着处理剩余步骤。",
|
||||
"execute rounds exhausted before task completion",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
messages := newagentprompt.BuildExecuteMessages(flowState, conversationContext)
|
||||
messages = newagentshared.CompactUnifiedMessagesIfNeeded(ctx, messages, newagentshared.UnifiedCompactInput{
|
||||
Client: input.Client,
|
||||
CompactionStore: input.CompactionStore,
|
||||
FlowState: flowState,
|
||||
Emitter: emitter,
|
||||
StageName: executeStageName,
|
||||
StatusBlockID: executeStatusBlockID,
|
||||
})
|
||||
|
||||
newagentshared.LogNodeLLMContext(executeStageName, "decision", flowState, messages)
|
||||
|
||||
decisionOutput, err := collectExecuteDecisionFromLLM(
|
||||
ctx,
|
||||
input,
|
||||
flowState,
|
||||
conversationContext,
|
||||
emitter,
|
||||
messages,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return handleExecuteDecision(
|
||||
ctx,
|
||||
input,
|
||||
runtimeState,
|
||||
flowState,
|
||||
conversationContext,
|
||||
emitter,
|
||||
decisionOutput,
|
||||
)
|
||||
}
|
||||
Reference in New Issue
Block a user