Version: 0.9.75.dev.260505

后端:
1.收口阶段 6 agent 结构迁移,将 newAgent 内核与 agentsvc 编排层迁入 services/agent
- 切换 Agent 启动装配与 HTTP handler 直连 agent sv,移除旧 service agent bridge
- 补齐 Agent 对 memory、task、task-class、schedule 的 RPC 适配与契约字段
- 扩展 schedule、task、task-class RPC/contract 支撑 Agent 查询、写入与 provider 切流
- 更新迁移文档、README 与相关注释,明确 agent 当前切流点和剩余 memory 迁移面
This commit is contained in:
Losita
2026-05-05 16:00:57 +08:00
parent e1819c5653
commit d7184b776b
174 changed files with 2189 additions and 1236 deletions

View File

@@ -320,7 +320,7 @@ CREATE TABLE `users`
## 4.2 Agent可调用的工具定义
以下定义基于当前代码实现(`backend/newAgent/tools/registry.go` + `backend/cmd/start.go` 注入),不是规划态文档。
以下定义基于当前代码实现(`backend/services/agent/tools/registry.go` + `backend/cmd/start.go` 注入),不是规划态文档。
### 4.2.1 调用契约

View File

@@ -31,14 +31,8 @@ import (
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
"github.com/LoveLosita/smartflow/backend/middleware"
"github.com/LoveLosita/smartflow/backend/model"
newagentconv "github.com/LoveLosita/smartflow/backend/newAgent/conv"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
"github.com/LoveLosita/smartflow/backend/newAgent/tools/web"
"github.com/LoveLosita/smartflow/backend/pkg"
"github.com/LoveLosita/smartflow/backend/service"
agentsvcsvc "github.com/LoveLosita/smartflow/backend/service/agentsvc"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
@@ -48,6 +42,10 @@ import (
activesel "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
activeTrigger "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
"github.com/LoveLosita/smartflow/backend/services/agent/tools/web"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
@@ -61,7 +59,7 @@ import (
//
// 职责边界:
// 1. 只负责保存启动期已经装配好的基础设施、仓储、服务和 HTTP handler
// 2. 不承载业务逻辑,业务仍然由 service / newAgent / memory 等领域模块负责;
// 2. 不承载业务逻辑,业务仍然由 service / agent / memory 等领域模块负责;
// 3. 不决定进程角色api / worker / all 由 StartAPI、StartWorker、StartAll 选择启动哪些生命周期。
type appRuntime struct {
db *gorm.DB
@@ -141,7 +139,7 @@ func mustBuildRuntime(ctx context.Context) *appRuntime {
//
// 步骤说明:
// 1. 先初始化配置、数据库、Redis、模型、RAG、memory 等基础设施;
// 2. 再构造 DAO / Service / newAgent 依赖;
// 2. 再构造 DAO / Service / agent 依赖;
// 3. 最后构造 HTTP handlers供 api/all 模式按需启动;
// 4. worker 模式暂时也复用完整依赖图,避免同轮迁移拆出两套装配逻辑。
func buildRuntime(ctx context.Context) (*appRuntime, error) {
@@ -282,7 +280,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
taskSv := service.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
taskSv.SetActiveScheduleDAO(manager.ActiveSchedule)
scheduleService := service.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
agentService := service.NewAgentServiceWithSchedule(
agentService := agentsv.NewAgentService(
llmService,
agentRepo,
taskRepo,
@@ -291,18 +289,22 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
manager.ActiveSchedule,
manager.ActiveScheduleSession,
eventPublisher,
scheduleService,
taskSv,
)
// 1. 仍由启动装配层注入旧 service 的排程能力,避免 agent/sv 反向 import 旧 service 形成循环依赖。
// 2. 后续 schedule/task 完全走 RPC 后,这两个函数注入点可继续缩掉。
agentService.SmartPlanningMultiRawFunc = scheduleService.SmartPlanningMultiRaw
agentService.HybridScheduleWithPlanMultiFunc = scheduleService.HybridScheduleWithPlanMulti
agentService.ResolvePlanningWindowFunc = scheduleService.ResolvePlanningWindowByTaskClasses
agentService.GetTasksWithUrgencyPromotionFunc = taskSv.GetTasksWithUrgencyPromotion
configureAgentService(
agentService,
ragRuntime,
agentRepo,
cacheRepo,
taskRepo,
taskClassRepo,
scheduleRepo,
taskClient,
taskClassClient,
scheduleClient,
memoryClient,
memoryCfg,
memoryObserver,
@@ -528,14 +530,14 @@ func buildActiveScheduleSessionRerunFunc(
graphRunner *activegraph.Runner,
previewConfirm *activesvc.PreviewConfirmService,
feedbackLocator *activefeedbacklocate.Service,
) agentsvcsvc.ActiveScheduleSessionRerunFunc {
) agentsv.ActiveScheduleSessionRerunFunc {
return func(
ctx context.Context,
session *model.ActiveScheduleSessionSnapshot,
userMessage string,
traceID string,
requestStart time.Time,
) (*agentsvcsvc.ActiveScheduleSessionRerunResult, error) {
) (*agentsv.ActiveScheduleSessionRerunResult, error) {
if activeDAO == nil || graphRunner == nil || previewConfirm == nil {
return nil, fmt.Errorf("主动调度 rerun 依赖未初始化")
}
@@ -568,7 +570,7 @@ func buildActiveScheduleSessionRerunFunc(
nextState.LastNotificationID = ""
nextState.FailedReason = ""
nextState.ExpiresAt = nil
return &agentsvcsvc.ActiveScheduleSessionRerunResult{
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: question,
SessionState: nextState,
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
@@ -596,7 +598,7 @@ func buildActiveScheduleSessionRerunFunc(
nextState.LastNotificationID = ""
nextState.FailedReason = ""
nextState.ExpiresAt = nil
return &agentsvcsvc.ActiveScheduleSessionRerunResult{
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: question,
SessionState: nextState,
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
@@ -666,9 +668,9 @@ func buildActiveScheduleSessionRerunFunc(
expiresAt := previewResp.Detail.ExpiresAt
state.ExpiresAt = &expiresAt
return &agentsvcsvc.ActiveScheduleSessionRerunResult{
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: firstNonEmptyString(selectionResult.ExplanationText, selectionResult.NotificationSummary, previewResp.Detail.Explanation, previewResp.Detail.Notification, "主动调度建议已更新。"),
BusinessCard: &newagentstream.StreamBusinessCardExtra{
BusinessCard: &agentstream.StreamBusinessCardExtra{
CardType: "active_schedule_preview",
Title: "SmartFlow 日程调整建议",
Summary: firstNonEmptyString(selectionResult.NotificationSummary, previewResp.Detail.Notification, previewResp.Detail.Explanation),
@@ -683,7 +685,7 @@ func buildActiveScheduleSessionRerunFunc(
question := firstNonEmptyString(selectionResult.AskUserQuestion, selectionResult.ExplanationText, "请继续补充主动调度需要的信息。")
state.PendingQuestion = question
state.ExpiresAt = nil
return &agentsvcsvc.ActiveScheduleSessionRerunResult{
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: question,
SessionState: state,
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
@@ -694,7 +696,7 @@ func buildActiveScheduleSessionRerunFunc(
state.PendingQuestion = ""
state.MissingInfo = nil
state.ExpiresAt = nil
return &agentsvcsvc.ActiveScheduleSessionRerunResult{
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: assistantText,
SessionState: state,
SessionStatus: model.ActiveScheduleSessionStatusIgnored,
@@ -766,13 +768,13 @@ func containsString(values []string, target string) bool {
}
func configureAgentService(
agentService *service.AgentService,
agentService *agentsv.AgentService,
ragRuntime ragservice.Runtime,
agentRepo *dao.AgentDAO,
cacheRepo *dao.CacheDAO,
taskRepo *dao.TaskDAO,
taskClassRepo *dao.TaskClassDAO,
scheduleRepo *dao.ScheduleDAO,
taskClient agentsv.TaskRPCClient,
taskClassClient agentsv.TaskClassAgentRPCClient,
scheduleClient agentsv.ScheduleAgentRPCClient,
memoryReaderClient ports.MemoryReaderClient,
memoryCfg memorymodel.Config,
memoryObserver memoryobserve.Observer,
@@ -782,7 +784,7 @@ func configureAgentService(
return
}
// newAgent 依赖接线。
// agent 依赖接线。
agentService.SetAgentStateStore(dao.NewAgentStateStoreAdapter(cacheRepo))
var webSearchProvider web.SearchProvider
@@ -806,151 +808,24 @@ func configureAgentService(
webSearchProvider = &web.MockProvider{}
}
agentService.SetToolRegistry(newagenttools.NewDefaultRegistryWithDeps(newagenttools.DefaultRegistryDeps{
agentService.SetToolRegistry(agenttools.NewDefaultRegistryWithDeps(agenttools.DefaultRegistryDeps{
RAGRuntime: ragRuntime,
WebSearchProvider: webSearchProvider,
TaskClassWriteDeps: newagenttools.TaskClassWriteDeps{
UpsertTaskClass: buildTaskClassUpsertFunc(taskClassRepo),
TaskClassWriteDeps: agenttools.TaskClassWriteDeps{
UpsertTaskClass: agentsv.NewTaskClassRPCUpsertFunc(taskClassClient),
},
}))
agentService.SetScheduleProvider(newagentconv.NewScheduleProvider(scheduleRepo, taskClassRepo))
agentService.SetScheduleProvider(agentsv.NewScheduleRPCProvider(scheduleClient, taskClassClient))
agentService.SetCompactionStore(agentRepo)
agentService.SetQuickTaskDeps(newagentmodel.QuickTaskDeps{
CreateTask: buildQuickTaskCreateFunc(taskRepo),
QueryTasks: buildQuickTaskQueryFunc(agentService),
})
// 1. quick task 创建 / 查询统一走 task zrpc避免 agent 工具链继续直连 tasks 表;
// 2. task-class upsert 与 schedule provider 已在 CP5 统一切到 task-class/schedule zrpc
// 3. task 服务不可用时由 quick_task 节点返回轻量失败文案,不影响 agent 其它分支。
agentService.SetQuickTaskDeps(agentsv.NewTaskRPCQuickTaskDeps(taskClient))
// 1. agent 主链路读取记忆统一走 memory zrpc避免 CP3 后继续直连本进程 memory.Module
// 2. observer / metrics 继续复用启动期装配,保证注入侧观测在 RPC 切流后不丢;
// 3. 旧 memoryModule 仍保留在启动图中,作为迁移期依赖和后续回退面;
// 4. memory 服务暂不可用时,预取链路只记录警告并软降级,不阻断聊天主流程。
agentService.SetMemoryReader(agentsvcsvc.NewMemoryRPCReader(memoryReaderClient, memoryObserver, memoryMetrics), memoryCfg)
}
func buildTaskClassUpsertFunc(taskClassRepo *dao.TaskClassDAO) func(userID int, input newagenttools.TaskClassUpsertInput) (newagenttools.TaskClassUpsertPersistResult, error) {
return func(userID int, input newagenttools.TaskClassUpsertInput) (newagenttools.TaskClassUpsertPersistResult, error) {
req := input.Request
taskClassID := 0
created := input.ID == 0
err := taskClassRepo.Transaction(func(txDAO *dao.TaskClassDAO) error {
// 1. 先构造任务类主体,保持与现有 AddOrUpdateTaskClass 口径一致。
taskClass := &model.TaskClass{
ID: input.ID,
Name: &req.Name,
Mode: &req.Mode,
SubjectType: stringPtrOrNil(req.SubjectType),
DifficultyLevel: stringPtrOrNil(req.DifficultyLevel),
CognitiveIntensity: stringPtrOrNil(req.CognitiveIntensity),
TotalSlots: &req.Config.TotalSlots,
Strategy: &req.Config.Strategy,
ExcludedSlots: req.Config.ExcludedSlots,
ExcludedDaysOfWeek: req.Config.ExcludedDaysOfWeek,
}
taskClass.AllowFillerCourse = &req.Config.AllowFillerCourse
// 2. 自动模式下写入日期范围;手动模式允许为空。
if req.StartDate != "" {
startDate, parseErr := time.ParseInLocation("2006-01-02", req.StartDate, time.Local)
if parseErr != nil {
return parseErr
}
taskClass.StartDate = &startDate
}
if req.EndDate != "" {
endDate, parseErr := time.ParseInLocation("2006-01-02", req.EndDate, time.Local)
if parseErr != nil {
return parseErr
}
taskClass.EndDate = &endDate
}
// 3. upsert 主体后拿到稳定 task_class_id供 items 绑定 category_id。
updatedID, upsertErr := txDAO.AddOrUpdateTaskClass(userID, taskClass)
if upsertErr != nil {
return upsertErr
}
taskClassID = updatedID
// 4. 构造任务块并批量 upsert。
items := make([]model.TaskClassItem, 0, len(req.Items))
for _, itemReq := range req.Items {
categoryID := taskClassID
order := itemReq.Order
content := itemReq.Content
status := model.TaskItemStatusUnscheduled
items = append(items, model.TaskClassItem{
ID: itemReq.ID,
CategoryID: &categoryID,
Order: &order,
Content: &content,
EmbeddedTime: itemReq.EmbeddedTime,
Status: &status,
})
}
return txDAO.AddOrUpdateTaskClassItems(userID, items)
})
if err != nil {
return newagenttools.TaskClassUpsertPersistResult{}, err
}
return newagenttools.TaskClassUpsertPersistResult{
TaskClassID: taskClassID,
Created: created,
}, nil
}
}
func buildQuickTaskCreateFunc(taskRepo *dao.TaskDAO) func(userID int, title string, priorityGroup int, estimatedSections int, deadlineAt *time.Time, urgencyThresholdAt *time.Time) (int, error) {
return func(userID int, title string, priorityGroup int, estimatedSections int, deadlineAt *time.Time, urgencyThresholdAt *time.Time) (int, error) {
created, err := taskRepo.AddTask(&model.Task{
UserID: userID,
Title: title,
Priority: priorityGroup,
EstimatedSections: model.NormalizeEstimatedSections(&estimatedSections),
IsCompleted: false,
DeadlineAt: deadlineAt,
UrgencyThresholdAt: urgencyThresholdAt,
})
if err != nil {
return 0, err
}
return created.ID, nil
}
}
func buildQuickTaskQueryFunc(agentService *service.AgentService) func(ctx context.Context, userID int, params newagentmodel.TaskQueryParams) ([]newagentmodel.TaskQueryResult, error) {
return func(ctx context.Context, userID int, params newagentmodel.TaskQueryParams) ([]newagentmodel.TaskQueryResult, error) {
req := newagentmodel.TaskQueryRequest{
UserID: userID,
Quadrant: params.Quadrant,
SortBy: params.SortBy,
Order: params.Order,
Limit: params.Limit,
IncludeCompleted: params.IncludeCompleted,
Keyword: params.Keyword,
DeadlineBefore: params.DeadlineBefore,
DeadlineAfter: params.DeadlineAfter,
}
records, err := agentService.QueryTasksForTool(ctx, req)
if err != nil {
return nil, err
}
results := make([]newagentmodel.TaskQueryResult, 0, len(records))
for _, r := range records {
deadlineStr := ""
if r.DeadlineAt != nil {
deadlineStr = r.DeadlineAt.In(time.Local).Format("2006-01-02 15:04")
}
results = append(results, newagentmodel.TaskQueryResult{
ID: r.ID,
Title: r.Title,
PriorityGroup: r.PriorityGroup,
EstimatedSections: model.NormalizeEstimatedSections(&r.EstimatedSections),
IsCompleted: r.IsCompleted,
DeadlineAt: deadlineStr,
})
}
return results, nil
}
agentService.SetMemoryReader(agentsv.NewMemoryRPCReader(memoryReaderClient, memoryObserver, memoryMetrics), memoryCfg)
}
func buildAPIHandlers(
@@ -958,7 +833,7 @@ func buildAPIHandlers(
taskClassClient ports.TaskClassCommandClient,
courseClient ports.CourseCommandClient,
scheduleClient ports.ScheduleCommandClient,
agentService *service.AgentService,
agentService *agentsv.AgentService,
memoryClient ports.MemoryCommandClient,
activeSchedulerClient ports.ActiveSchedulerCommandClient,
notificationClient ports.NotificationCommandClient,
@@ -1018,11 +893,3 @@ func (r *appRuntime) close() {
r.eventBus.Close()
}
}
func stringPtrOrNil(value string) *string {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
return nil
}
return &trimmed
}

View File

@@ -12,6 +12,7 @@ func UserAddTaskRequestToModel(request *model.UserAddTaskRequest, userID int) *m
Priority: request.PriorityGroup,
EstimatedSections: model.NormalizeEstimatedSections(&request.EstimatedSections),
DeadlineAt: request.DeadlineAt,
UrgencyThresholdAt: request.UrgencyThresholdAt,
UserID: userID,
}
}
@@ -28,7 +29,7 @@ func ModelToUserAddTaskResponse(task *model.Task) *model.UserAddTaskResponse {
EstimatedSections: model.NormalizeEstimatedSections(&task.EstimatedSections),
DeadlineAt: task.DeadlineAt,
Status: status,
CreatedAt: time.Now(), // 创建时间为当前时间
CreatedAt: time.Now(), // 创建时间使用当前服务时间,保持既有响应语义。
}
}

View File

@@ -4,10 +4,10 @@ import (
"context"
"errors"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
// AgentStateStoreAdapter 将 CacheDAO 适配为 newAgent 的 AgentStateStore 接口。
// AgentStateStoreAdapter 将 CacheDAO 适配为 agent 的 AgentStateStore 接口。
//
// 职责边界:
// 1. CacheDAO 的 LoadAgentState 使用 out-parameter 模式,需要适配到返回值模式;
@@ -23,7 +23,7 @@ func NewAgentStateStoreAdapter(cache *CacheDAO) *AgentStateStoreAdapter {
}
// Save 序列化并保存 agent 状态快照。
func (a *AgentStateStoreAdapter) Save(ctx context.Context, conversationID string, snapshot *newagentmodel.AgentStateSnapshot) error {
func (a *AgentStateStoreAdapter) Save(ctx context.Context, conversationID string, snapshot *agentmodel.AgentStateSnapshot) error {
if a == nil || a.cache == nil {
return errors.New("agent state store adapter is not initialized")
}
@@ -31,12 +31,12 @@ func (a *AgentStateStoreAdapter) Save(ctx context.Context, conversationID string
}
// Load 读取并反序列化 agent 状态快照。
func (a *AgentStateStoreAdapter) Load(ctx context.Context, conversationID string) (*newagentmodel.AgentStateSnapshot, bool, error) {
func (a *AgentStateStoreAdapter) Load(ctx context.Context, conversationID string) (*agentmodel.AgentStateSnapshot, bool, error) {
if a == nil || a.cache == nil {
return nil, false, errors.New("agent state store adapter is not initialized")
}
var snapshot newagentmodel.AgentStateSnapshot
var snapshot agentmodel.AgentStateSnapshot
ok, err := a.cache.LoadAgentState(ctx, conversationID, &snapshot)
if err != nil || !ok {
return nil, ok, err

View File

@@ -12,18 +12,18 @@ import (
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/respond"
"github.com/LoveLosita/smartflow/backend/service"
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"gorm.io/gorm"
)
type AgentHandler struct {
svc *service.AgentService
svc *agentsv.AgentService
}
// NewAgentHandler 组装 AgentHandler。
func NewAgentHandler(svc *service.AgentService) *AgentHandler {
func NewAgentHandler(svc *agentsv.AgentService) *AgentHandler {
return &AgentHandler{
svc: svc,
}

View File

@@ -148,6 +148,18 @@ func (c *Client) SmartPlanningMulti(ctx context.Context, req schedulecontracts.S
return jsonFromResponse(resp, err)
}
func (c *Client) GetAgentWeekSchedule(ctx context.Context, req schedulecontracts.AgentScheduleWeekRequest) (json.RawMessage, error) {
if err := c.ensureReady(); err != nil {
return nil, err
}
payload, err := json.Marshal(req)
if err != nil {
return nil, err
}
resp, err := c.rpc.GetAgentWeekSchedule(ctx, &schedulepb.JSONRequest{PayloadJson: payload})
return jsonFromResponse(resp, err)
}
func (c *Client) ensureReady() error {
if c == nil || c.rpc == nil {
return errors.New("schedule zrpc client is not initialized")

View File

@@ -81,6 +81,11 @@ func (c *Client) UpdateTaskClass(ctx context.Context, req taskclasscontracts.Ups
return jsonFromResponse(resp, err)
}
func (c *Client) GetAgentTaskClasses(ctx context.Context, req taskclasscontracts.AgentTaskClassesRequest) (json.RawMessage, error) {
resp, err := c.callJSON(ctx, c.rpc.GetAgentTaskClasses, req)
return jsonFromResponse(resp, err)
}
func (c *Client) InsertTaskClassItemIntoSchedule(ctx context.Context, req taskclasscontracts.InsertTaskClassItemIntoScheduleRequest) (json.RawMessage, error) {
resp, err := c.callJSON(ctx, c.rpc.InsertTaskClassItemIntoSchedule, req)
return jsonFromResponse(resp, err)

View File

@@ -26,7 +26,7 @@ const (
// 职责边界:
// 1. 负责把 memory_items 读出来并做用户设置过滤;
// 2. 负责最小可用的排序与截断,为后续 prompt 注入提供稳定入口;
// 3. 不直接依赖 newAgent不负责真正把记忆拼进 prompt。
// 3. 不直接依赖 agent不负责真正把记忆拼进 prompt。
type ReadService struct {
itemRepo *memoryrepo.ItemRepo
settingsRepo *memoryrepo.SettingsRepo

View File

@@ -47,7 +47,7 @@ const (
// }
// }
//
// TODO(newagent/api): 进入聊天主流程前,优先调用 req.ResumeRequest();若命中恢复协议,则不要把本轮请求按普通聊天处理。
// TODO(agent/api): 进入聊天主流程前,优先调用 req.ResumeRequest();若命中恢复协议,则不要把本轮请求按普通聊天处理。
type AgentResumeRequest struct {
InteractionID string `json:"interaction_id"`
Type AgentResumeType `json:"type,omitempty"`

View File

@@ -82,6 +82,7 @@ type UserAddTaskRequest struct {
PriorityGroup int `json:"priority_group"`
EstimatedSections int `json:"estimated_sections"`
DeadlineAt *time.Time `json:"deadline_at"`
UrgencyThresholdAt *time.Time `json:"urgency_threshold_at"`
}
// UserCompleteTaskRequest 是"标记任务完成"接口的请求体。

View File

@@ -1,14 +0,0 @@
package newagentnode
import (
"context"
newagentexecute "github.com/LoveLosita/smartflow/backend/newAgent/node/execute"
)
type ExecuteNodeInput = newagentexecute.ExecuteNodeInput
type ExecuteRoundObservation = newagentexecute.ExecuteRoundObservation
func RunExecuteNode(ctx context.Context, input ExecuteNodeInput) error {
return newagentexecute.RunExecuteNode(ctx, input)
}

View File

@@ -1,68 +0,0 @@
package service
import (
"github.com/LoveLosita/smartflow/backend/dao"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
"github.com/LoveLosita/smartflow/backend/service/agentsvc"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
)
// AgentService 是 service 层对 agentsvc.AgentService 的兼容别名。
// 迁移目的:
// 1) 把 Agent 业务实现收拢到 service/agentsvc提升目录整洁度
// 2) 不破坏既有调用方api/cmd 仍然可以引用 service.AgentService
type AgentService = agentsvc.AgentService
// NewAgentService 是迁移期兼容构造函数。
//
// 说明:
// 1) 继续保留 service 层入口形式,避免 api/cmd 侧直接感知 agentsvc 包路径;
// 2) 主动调度 session DAO 也在这里显式透传,避免聊天入口再去回查全局单例;
// 3) 真实构造逻辑已下沉到 service/agentsvc 包。
func NewAgentService(
llmService *llmservice.Service,
repo *dao.AgentDAO,
taskRepo *dao.TaskDAO,
cacheDAO *dao.CacheDAO,
agentRedis *dao.AgentCache,
activeScheduleDAO *dao.ActiveScheduleDAO,
activeSessionDAO *dao.ActiveScheduleSessionDAO,
eventPublisher outboxinfra.EventPublisher,
) *AgentService {
return agentsvc.NewAgentService(llmService, repo, taskRepo, cacheDAO, agentRedis, activeScheduleDAO, activeSessionDAO, eventPublisher)
}
// NewAgentServiceWithSchedule 在基础 AgentService 上注入排程依赖。
//
// 设计目的:
// 1) 通过函数注入避免 agentsvc 包直接依赖 service 层的 ScheduleService
// 2) 排程依赖为可选:未注入时排程路由自动回退到普通聊天;
// 3) 主动调度 session DAO 仍沿用统一构造注入,避免排程分支自己拼装仓储。
func NewAgentServiceWithSchedule(
llmService *llmservice.Service,
repo *dao.AgentDAO,
taskRepo *dao.TaskDAO,
cacheDAO *dao.CacheDAO,
agentRedis *dao.AgentCache,
activeScheduleDAO *dao.ActiveScheduleDAO,
activeSessionDAO *dao.ActiveScheduleSessionDAO,
eventPublisher outboxinfra.EventPublisher,
scheduleSvc *ScheduleService,
taskSvc *TaskService,
) *AgentService {
svc := agentsvc.NewAgentService(llmService, repo, taskRepo, cacheDAO, agentRedis, activeScheduleDAO, activeSessionDAO, eventPublisher)
// 注入排程依赖:将 service 层方法包装为函数闭包,避免循环依赖。
if scheduleSvc != nil {
svc.SmartPlanningMultiRawFunc = scheduleSvc.SmartPlanningMultiRaw
svc.HybridScheduleWithPlanMultiFunc = scheduleSvc.HybridScheduleWithPlanMulti
svc.ResolvePlanningWindowFunc = scheduleSvc.ResolvePlanningWindowByTaskClasses
}
// 注入任务紧急性提升依赖:复用 TaskService 的统一提升 + outbox 投递链路。
if taskSvc != nil {
svc.GetTasksWithUrgencyPromotionFunc = taskSvc.GetTasksWithUrgencyPromotion
}
return svc
}

View File

@@ -10,7 +10,7 @@ import (
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
"github.com/LoveLosita/smartflow/backend/model"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
@@ -86,7 +86,7 @@ func RegisterAgentStateSnapshotHandler(
func PublishAgentStateSnapshot(
ctx context.Context,
publisher outboxinfra.EventPublisher,
snapshot *newagentmodel.AgentStateSnapshot,
snapshot *agentmodel.AgentStateSnapshot,
conversationID string,
userID int,
) {

View File

@@ -211,7 +211,7 @@ func loadConversationTimelineMaxSeq(
// 说明:
// 1. 这里只在缓存存在时执行;未接 Redis 的环境直接跳过即可;
// 2. 需要整表重建而不是只 append 一条,因为旧缓存里已经存在错误 seq 的事件;
// 3. 这里不抽到 agentsvc 复用,是因为 events 不能反向依赖 service否则会形成循环依赖。
// 3. 这里不抽到 agent/sv 复用,是因为 events 不能反向依赖 service否则会形成循环依赖。
func rebuildConversationTimelineCache(
ctx context.Context,
agentRepo *dao.AgentDAO,

View File

@@ -29,7 +29,7 @@ const (
//
// 职责边界:
// 1. 只推进主动调度 trigger 的后台状态机,不负责启动 outbox worker
// 2. dry-run 与选择器都复用 active_scheduler 独立模块,不再往 newAgent 里塞主动调度逻辑;
// 2. dry-run 与选择器都复用 active_scheduler 独立模块,不再往 agent 里塞主动调度逻辑;
// 3. notification 只发布 requested 事件,不直接接真实飞书 provider。
type TriggerWorkflowService struct {
activeDAO *dao.ActiveScheduleDAO

View File

@@ -1,14 +1,14 @@
package newagentconv
package agentconv
import (
"fmt"
"time"
"github.com/LoveLosita/smartflow/backend/model"
schedule "github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
)
// ScheduleStateToPreview 将 newAgent 的 ScheduleState 转换为前端预览缓存格式。
// ScheduleStateToPreview 将 agent 的 ScheduleState 转换为前端预览缓存格式。
//
// 职责边界:
// 1. 只做数据格式转换,不做业务逻辑;

View File

@@ -1,4 +1,4 @@
package newagentconv
package agentconv
import (
"context"
@@ -9,7 +9,7 @@ import (
baseconv "github.com/LoveLosita/smartflow/backend/conv"
"github.com/LoveLosita/smartflow/backend/dao"
"github.com/LoveLosita/smartflow/backend/model"
schedule "github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
)
// ScheduleProvider 实现 model.ScheduleStateProvider 接口。
@@ -181,6 +181,16 @@ func buildWindowFromTaskClasses(taskClasses []model.TaskClass) (windowDays []Win
return windowDays, weeks
}
// BuildWindowFromTaskClasses 暴露任务类时间窗计算给 RPC provider 复用。
//
// 职责边界:
// 1. 只复用老 DAO provider 的窗口推导算法,保证迁移前后 day_mapping 口径一致;
// 2. 不读取数据库、不调用 RPC
// 3. 无有效日期时返回空切片,由调用方决定是否降级当前周。
func BuildWindowFromTaskClasses(taskClasses []model.TaskClass) (windowDays []WindowDay, weeks []int) {
return buildWindowFromTaskClasses(taskClasses)
}
// buildCurrentWeekWindow 构造“当前周 7 天”的兜底窗口。
func buildCurrentWeekWindow() (windowDays []WindowDay, weeks []int, err error) {
now := time.Now()
@@ -195,6 +205,11 @@ func buildCurrentWeekWindow() (windowDays []WindowDay, weeks []int, err error) {
return windowDays, []int{currentWeek}, nil
}
// BuildCurrentWeekWindow 暴露当前周兜底窗口给 RPC provider 复用。
func BuildCurrentWeekWindow() (windowDays []WindowDay, weeks []int, err error) {
return buildCurrentWeekWindow()
}
// isRelativeDateBefore 比较两个“相对周/天”坐标的先后关系。
func isRelativeDateBefore(leftWeek, leftDay, rightWeek, rightDay int) bool {
if leftWeek != rightWeek {
@@ -251,8 +266,53 @@ func (p *ScheduleProvider) LoadTaskClassMetas(ctx context.Context, userID int, t
if err != nil {
return nil, fmt.Errorf("加载任务类元数据失败: %w", err)
}
metas := make([]schedule.TaskClassMeta, 0, len(complete))
for _, tc := range complete {
return TaskClassesToScheduleMetas(complete), nil
}
func derefString(s *string) string {
if s == nil {
return ""
}
return *s
}
// buildExtraItemCategories 从已有日程中提取不属于给定 taskClasses 的 task event 的 category 映射。
// 当加载全部 taskClass 时,通常返回空 map。
func buildExtraItemCategories(schedules []model.Schedule, taskClasses []model.TaskClass) map[int]string {
knownItemIDs := make(map[int]bool)
for _, tc := range taskClasses {
for _, item := range tc.Items {
knownItemIDs[item.ID] = true
}
}
categories := make(map[int]string)
for _, s := range schedules {
if s.Event == nil || s.Event.Type != "task" || s.Event.RelID == nil {
continue
}
itemID := *s.Event.RelID
if !knownItemIDs[itemID] {
categories[itemID] = "任务"
}
}
return categories
}
// BuildExtraItemCategories 暴露额外任务分类兜底映射给 RPC provider 复用。
func BuildExtraItemCategories(schedules []model.Schedule, taskClasses []model.TaskClass) map[int]string {
return buildExtraItemCategories(schedules, taskClasses)
}
// TaskClassesToScheduleMetas 把完整任务类转换成工具层约束元数据。
//
// 职责边界:
// 1. 只做字段映射,不筛选 pending item
// 2. DAO provider 与 RPC provider 共用,避免迁移后 Plan 阶段元数据口径分裂;
// 3. nil 指针字段按工具层零值处理。
func TaskClassesToScheduleMetas(taskClasses []model.TaskClass) []schedule.TaskClassMeta {
metas := make([]schedule.TaskClassMeta, 0, len(taskClasses))
for _, tc := range taskClasses {
meta := schedule.TaskClassMeta{
ID: tc.ID,
Name: derefString(tc.Name),
@@ -289,35 +349,5 @@ func (p *ScheduleProvider) LoadTaskClassMetas(ctx context.Context, userID int, t
}
metas = append(metas, meta)
}
return metas, nil
}
func derefString(s *string) string {
if s == nil {
return ""
}
return *s
}
// buildExtraItemCategories 从已有日程中提取不属于给定 taskClasses 的 task event 的 category 映射。
// 当加载全部 taskClass 时,通常返回空 map。
func buildExtraItemCategories(schedules []model.Schedule, taskClasses []model.TaskClass) map[int]string {
knownItemIDs := make(map[int]bool)
for _, tc := range taskClasses {
for _, item := range tc.Items {
knownItemIDs[item.ID] = true
}
}
categories := make(map[int]string)
for _, s := range schedules {
if s.Event == nil || s.Event.Type != "task" || s.Event.RelID == nil {
continue
}
itemID := *s.Event.RelID
if !knownItemIDs[itemID] {
categories[itemID] = "任务"
}
}
return categories
return metas
}

View File

@@ -1,10 +1,10 @@
package newagentconv
package agentconv
import (
"sort"
"github.com/LoveLosita/smartflow/backend/model"
schedule "github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
)
// WindowDay 表示排课窗口中的一天(相对周 + 周几)。
@@ -13,7 +13,7 @@ type WindowDay struct {
DayOfWeek int
}
// LoadScheduleState 将数据库层的 schedules + taskClasses 聚合为 newAgent 工具层可直接操作的 ScheduleState。
// LoadScheduleState 将数据库层的 schedules + taskClasses 聚合为 agent 工具层可直接操作的 ScheduleState。
//
// 职责边界:
// 1. 只负责数据映射与状态归一,不做数据库读写;

View File

@@ -1,9 +1,9 @@
package newagentconv
package agentconv
import (
"github.com/LoveLosita/smartflow/backend/model"
schedule "github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
"github.com/LoveLosita/smartflow/backend/respond"
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
)
// ApplyPlacedItems 将前端提交的绝对时间放置项应用到 ScheduleState。

View File

@@ -4,8 +4,8 @@ import (
"context"
"errors"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentnode "github.com/LoveLosita/smartflow/backend/newAgent/node"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentnode "github.com/LoveLosita/smartflow/backend/services/agent/node"
"github.com/cloudwego/eino/compose"
)
@@ -22,8 +22,8 @@ const (
NodeQuickTask = "quick_task"
)
func RunAgentGraph(ctx context.Context, input newagentmodel.AgentGraphRunInput) (*newagentmodel.AgentGraphState, error) {
state := newagentmodel.NewAgentGraphState(input)
func RunAgentGraph(ctx context.Context, input agentmodel.AgentGraphRunInput) (*agentmodel.AgentGraphState, error) {
state := agentmodel.NewAgentGraphState(input)
if state == nil {
return nil, errors.New("agent graph: graph state is nil")
}
@@ -33,8 +33,8 @@ func RunAgentGraph(ctx context.Context, input newagentmodel.AgentGraphRunInput)
return nil, errors.New("agent graph: flow state is nil")
}
nodes := newagentnode.NewAgentNodes()
g := compose.NewGraph[*newagentmodel.AgentGraphState, *newagentmodel.AgentGraphState]()
nodes := agentnode.NewAgentNodes()
g := compose.NewGraph[*agentmodel.AgentGraphState, *agentmodel.AgentGraphState]()
// --- 注册节点 ---
if err := g.AddLambdaNode(NodeChat, compose.InvokableLambda(nodes.Chat)); err != nil {
@@ -164,7 +164,7 @@ func RunAgentGraph(ctx context.Context, input newagentmodel.AgentGraphRunInput)
// --- 分支函数 ---
func branchAfterChat(_ context.Context, st *newagentmodel.AgentGraphState) (string, error) {
func branchAfterChat(_ context.Context, st *agentmodel.AgentGraphState) (string, error) {
if st == nil {
return compose.END, nil
}
@@ -177,28 +177,28 @@ func branchAfterChat(_ context.Context, st *newagentmodel.AgentGraphState) (stri
return compose.END, nil
}
switch flowState.Phase {
case newagentmodel.PhaseChatting:
case agentmodel.PhaseChatting:
// 简单任务直接回复 / 深度回答完成,回复已在 Chat 节点生成。
return compose.END, nil
case newagentmodel.PhasePlanning:
case agentmodel.PhasePlanning:
return NodePlan, nil
case newagentmodel.PhaseWaitingConfirm:
case agentmodel.PhaseWaitingConfirm:
return NodeConfirm, nil
case newagentmodel.PhaseQuickTask:
case agentmodel.PhaseQuickTask:
return NodeQuickTask, nil
case newagentmodel.PhaseExecuting:
case agentmodel.PhaseExecuting:
if flowState.NeedsRoughBuild && st.Deps.RoughBuildFunc != nil {
return NodeRoughBuild, nil
}
return NodeExecute, nil
case newagentmodel.PhaseDone:
case agentmodel.PhaseDone:
return NodeDeliver, nil
default:
return compose.END, nil
}
}
func branchAfterPlan(_ context.Context, st *newagentmodel.AgentGraphState) (string, error) {
func branchAfterPlan(_ context.Context, st *agentmodel.AgentGraphState) (string, error) {
if st == nil {
return NodePlan, nil
}
@@ -210,22 +210,22 @@ func branchAfterPlan(_ context.Context, st *newagentmodel.AgentGraphState) (stri
if flowState == nil {
return NodePlan, nil
}
if flowState.Phase == newagentmodel.PhaseWaitingConfirm {
if flowState.Phase == agentmodel.PhaseWaitingConfirm {
return NodeConfirm, nil
}
if flowState.Phase == newagentmodel.PhaseExecuting {
if flowState.Phase == agentmodel.PhaseExecuting {
if flowState.NeedsRoughBuild && st.Deps.RoughBuildFunc != nil {
return NodeRoughBuild, nil
}
return NodeExecute, nil
}
if flowState.Phase == newagentmodel.PhaseDone {
if flowState.Phase == agentmodel.PhaseDone {
return NodeDeliver, nil
}
return NodePlan, nil
}
func branchAfterConfirm(_ context.Context, st *newagentmodel.AgentGraphState) (string, error) {
func branchAfterConfirm(_ context.Context, st *agentmodel.AgentGraphState) (string, error) {
if st == nil {
return NodePlan, nil
}
@@ -238,24 +238,24 @@ func branchAfterConfirm(_ context.Context, st *newagentmodel.AgentGraphState) (s
return NodePlan, nil
}
switch flowState.Phase {
case newagentmodel.PhaseExecuting:
case agentmodel.PhaseExecuting:
// 若 Plan 节点标记了需要粗排且 RoughBuildFunc 已注入,走粗排节点。
if flowState.NeedsRoughBuild && st.Deps.RoughBuildFunc != nil {
return NodeRoughBuild, nil
}
return NodeExecute, nil
case newagentmodel.PhaseWaitingConfirm:
case agentmodel.PhaseWaitingConfirm:
// confirm 节点产出确认请求后,当前连接必须进入 interrupt 收口。
// 真正的用户确认结果应由外部回调写回状态,再重新进入 graph。
return NodeInterrupt, nil
case newagentmodel.PhaseDone:
case agentmodel.PhaseDone:
return NodeDeliver, nil
default:
return NodePlan, nil
}
}
func branchAfterRoughBuild(_ context.Context, st *newagentmodel.AgentGraphState) (string, error) {
func branchAfterRoughBuild(_ context.Context, st *agentmodel.AgentGraphState) (string, error) {
if st == nil {
return NodeExecute, nil
}
@@ -267,13 +267,13 @@ func branchAfterRoughBuild(_ context.Context, st *newagentmodel.AgentGraphState)
if flowState == nil {
return NodeExecute, nil
}
if flowState.Phase == newagentmodel.PhaseDone {
if flowState.Phase == agentmodel.PhaseDone {
return NodeDeliver, nil
}
return NodeExecute, nil
}
func branchAfterExecute(_ context.Context, st *newagentmodel.AgentGraphState) (string, error) {
func branchAfterExecute(_ context.Context, st *agentmodel.AgentGraphState) (string, error) {
if st == nil {
return NodeExecute, nil
}
@@ -285,7 +285,7 @@ func branchAfterExecute(_ context.Context, st *newagentmodel.AgentGraphState) (s
if flowState == nil {
return NodeExecute, nil
}
if flowState.Phase == newagentmodel.PhaseWaitingConfirm {
if flowState.Phase == agentmodel.PhaseWaitingConfirm {
return NodeConfirm, nil
}
// 1. 这里只围绕“是否已经写入正式终止结果”做路由,避免把“刚好用完最后一轮预算”
@@ -294,13 +294,13 @@ func branchAfterExecute(_ context.Context, st *newagentmodel.AgentGraphState) (s
// 这样 rough_build / execute / deliver 才都围绕同一份 terminal outcome 工作;
// 3. 若此处直接按 RoundUsed>=MaxRounds 跳 Deliver会绕过 Execute 内的 Exhaust 写入,
// 导致 deliver 收口和后续预览落盘语义不一致。
if flowState.Phase == newagentmodel.PhaseDone {
if flowState.Phase == agentmodel.PhaseDone {
return NodeDeliver, nil
}
return NodeExecute, nil
}
func branchIfInterrupted(st *newagentmodel.AgentGraphState) (string, bool) {
func branchIfInterrupted(st *agentmodel.AgentGraphState) (string, bool) {
if st == nil {
return "", false
}

View File

@@ -3,7 +3,7 @@ package model
import (
"strings"
schedule "github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
)
// Phase 表示 agent 主循环当前所处的大阶段。

View File

@@ -5,9 +5,9 @@ import (
"strings"
"time"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
schedule "github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
)
@@ -47,7 +47,7 @@ type RoughBuildPlacement struct {
}
// RoughBuildFunc 是粗排算法的依赖注入签名。
// 由 service 层封装 HybridScheduleWithPlanMulti 后注入,newAgent 层不直接依赖外层 model。
// 由 service 层封装 HybridScheduleWithPlanMulti 后注入,agent 层不直接依赖外层 model。
type RoughBuildFunc func(ctx context.Context, userID int, taskClassIDs []int) ([]RoughBuildPlacement, error)
// WriteSchedulePreviewFunc 是排程预览写入的依赖注入签名。
@@ -56,7 +56,7 @@ type RoughBuildFunc func(ctx context.Context, userID int, taskClassIDs []int) ([
// 2. deliver 结束时再做最终覆盖写,保障收口状态一致。
type WriteSchedulePreviewFunc func(ctx context.Context, state *schedule.ScheduleState, userID int, conversationID string, taskClassIDs []int) error
// PersistVisibleMessageFunc 是 newAgent 主循环逐条持久化可见消息的回调签名。
// PersistVisibleMessageFunc 是 agent 主循环逐条持久化可见消息的回调签名。
//
// 职责边界:
// 1. 只处理真正对用户可见的 assistant speak不处理工具结果或内部纠错提示
@@ -75,9 +75,9 @@ type AgentGraphDeps struct {
PlanClient *llmservice.Client
ExecuteClient *llmservice.Client
DeliverClient *llmservice.Client
ChunkEmitter *newagentstream.ChunkEmitter
ChunkEmitter *agentstream.ChunkEmitter
StateStore AgentStateStore
ToolRegistry *newagenttools.ToolRegistry
ToolRegistry *agenttools.ToolRegistry
ScheduleProvider ScheduleStateProvider // 按 DAO 注入Execute 节点按需加载 ScheduleState
CompactionStore CompactionStore // 按 DAO 注入,用于 Execute 上下文压缩持久化
RoughBuildFunc RoughBuildFunc // 按 Service 注入,粗排算法入口
@@ -93,7 +93,7 @@ type AgentGraphDeps struct {
MemoryFuture chan string // buffered(1),携带 renderMemoryPinnedContentByMode 的输出
MemoryConsumed bool // 保证 channel 只读一次,后续 Execute ReAct 循环跳过等待
// PersistVisibleMessage 按 Service 注入,newAgent 每个节点产出的可见 speak
// PersistVisibleMessage 按 Service 注入,agent 每个节点产出的可见 speak
// 都会在 AppendHistory 之后立刻调用这个回调,把消息同步落到 Redis + MySQL。
PersistVisibleMessage PersistVisibleMessageFunc
@@ -113,7 +113,7 @@ type QuickTaskDeps struct {
QueryTasks func(ctx context.Context, userID int, params TaskQueryParams) ([]TaskQueryResult, error)
}
// --- 记忆 pinned block 常量(供 agentsvc 和 node 层共享) ---
// --- 记忆 pinned block 常量(供 agent/sv 和 node 层共享) ---
const (
// MemoryContextBlockKey 记忆上下文在 ConversationContext PinnedBlock 中的唯一 key。
@@ -130,12 +130,12 @@ const (
// 1. 依赖为空时回退到 Noop emitter避免骨架期因为没接前端而到处判空
// 2. 这里只兜底"能安全调用",不负责填充真实 request_id / model_name
// 3. 后续 service 层一旦接上真实 emitter会自然覆盖这里的空实现。
func (d *AgentGraphDeps) EnsureChunkEmitter() *newagentstream.ChunkEmitter {
func (d *AgentGraphDeps) EnsureChunkEmitter() *agentstream.ChunkEmitter {
if d == nil {
return newagentstream.NewChunkEmitter(newagentstream.NoopPayloadEmitter(), "", "", 0)
return agentstream.NewChunkEmitter(agentstream.NoopPayloadEmitter(), "", "", 0)
}
if d.ChunkEmitter == nil {
d.ChunkEmitter = newagentstream.NewChunkEmitter(newagentstream.NoopPayloadEmitter(), "", "", 0)
d.ChunkEmitter = agentstream.NewChunkEmitter(agentstream.NoopPayloadEmitter(), "", "", 0)
}
return d.ChunkEmitter
}
@@ -195,7 +195,7 @@ func (d *AgentGraphDeps) ResolveDeliverClient() *llmservice.Client {
return d.ChatClient
}
// AgentGraphRunInput 是执行 newAgent 通用 graph 所需的完整入口参数。
// AgentGraphRunInput 是执行 agent 通用 graph 所需的完整入口参数。
//
// 字段说明:
// 1. RuntimeState可持久化流程状态与 pending interaction
@@ -276,15 +276,15 @@ func (s *AgentGraphState) EnsureConversationContext() *ConversationContext {
}
// EnsureChunkEmitter 返回 graph 可安全调用的 chunk 发射器。
func (s *AgentGraphState) EnsureChunkEmitter() *newagentstream.ChunkEmitter {
func (s *AgentGraphState) EnsureChunkEmitter() *agentstream.ChunkEmitter {
if s == nil {
return newagentstream.NewChunkEmitter(newagentstream.NoopPayloadEmitter(), "", "", 0)
return agentstream.NewChunkEmitter(agentstream.NoopPayloadEmitter(), "", "", 0)
}
return s.Deps.EnsureChunkEmitter()
}
// ResolveToolRegistry 返回可用的工具注册表。
func (s *AgentGraphState) ResolveToolRegistry() *newagenttools.ToolRegistry {
func (s *AgentGraphState) ResolveToolRegistry() *agenttools.ToolRegistry {
if s == nil {
return nil
}

View File

@@ -58,7 +58,7 @@ type PendingToolCallSnapshot struct {
// 2. ResumeNode / ResumePhase / ResumeStep 用来记录恢复点,避免用户回答后整条链路从头乱跑;
// 3. 该结构设计成可被 Redis + MySQL 直接存储的快照骨架,后续只需要补序列化与持久化接线。
//
// TODO(newagent/api): 后续由"用户追问回复接口 / 确认回调接口"读取这份快照并恢复运行。
// TODO(agent/api): 后续由"用户追问回复接口 / 确认回调接口"读取这份快照并恢复运行。
type PendingInteraction struct {
Version int `json:"version"`
InteractionID string `json:"interaction_id"`

View File

@@ -3,7 +3,7 @@ package model
import (
"context"
schedule "github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
)
// AgentStateSnapshot 是需要持久化的 agent 运行态最小快照。
@@ -29,7 +29,7 @@ type AgentStateSnapshot struct {
//
// 实现层:
// 1. dao/cache.go 上的 CacheDAO 隐式实现该接口Go duck typing
// 2. newAgent 包不直接 import dao由 Service 层在组装 Deps 时注入。
// 2. agent 包不直接 import dao由 Service 层在组装 Deps 时注入。
type AgentStateStore interface {
// Save 序列化并保存一份 agent 状态快照。
//

View File

@@ -22,7 +22,7 @@ type TaskQueryParams struct {
// TaskQueryResult 描述快捷任务查询返回给上层的轻量任务视图。
//
// 职责边界:
// 1. 这里只保留展示所需字段,避免把底层任务模型直接暴露给 newAgent 节点;
// 1. 这里只保留展示所需字段,避免把底层任务模型直接暴露给 agent 节点;
// 2. 结果既可用于 quick_task 节点文本回复,也可供 service 装配其他轻量输出;
// 3. 不负责序列化策略和文案渲染。
type TaskQueryResult struct {

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"context"
@@ -8,9 +8,9 @@ import (
"strings"
"time"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
"github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
"github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
)
// AgentNodes 负责把 graph 层的节点调用统一转成 node 层真正的执行入口。
@@ -27,7 +27,7 @@ func NewAgentNodes() *AgentNodes {
}
// Chat 负责把 graph 的 chat 节点请求转给 RunChatNode。
func (n *AgentNodes) Chat(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
func (n *AgentNodes) Chat(ctx context.Context, st *agentmodel.AgentGraphState) (*agentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("chat node: state is nil")
}
@@ -54,7 +54,7 @@ func (n *AgentNodes) Chat(ctx context.Context, st *newagentmodel.AgentGraphState
}
// Confirm 负责把 graph 的 confirm 节点请求转给 RunConfirmNode。
func (n *AgentNodes) Confirm(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
func (n *AgentNodes) Confirm(ctx context.Context, st *agentmodel.AgentGraphState) (*agentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("confirm node: state is nil")
}
@@ -72,7 +72,7 @@ func (n *AgentNodes) Confirm(ctx context.Context, st *newagentmodel.AgentGraphSt
}
// Plan 负责把 graph 的 plan 节点请求转给 RunPlanNode。
func (n *AgentNodes) Plan(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
func (n *AgentNodes) Plan(ctx context.Context, st *agentmodel.AgentGraphState) (*agentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("plan node: state is nil")
}
@@ -100,7 +100,7 @@ func (n *AgentNodes) Plan(ctx context.Context, st *newagentmodel.AgentGraphState
}
// RoughBuild 负责把 graph 的 rough_build 节点请求转给 RunRoughBuildNode。
func (n *AgentNodes) RoughBuild(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
func (n *AgentNodes) RoughBuild(ctx context.Context, st *agentmodel.AgentGraphState) (*agentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("rough_build node: state is nil")
}
@@ -114,7 +114,7 @@ func (n *AgentNodes) RoughBuild(ctx context.Context, st *newagentmodel.AgentGrap
}
// Interrupt 负责把 graph 的 interrupt 节点请求转给 RunInterruptNode。
func (n *AgentNodes) Interrupt(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
func (n *AgentNodes) Interrupt(ctx context.Context, st *agentmodel.AgentGraphState) (*agentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("interrupt node: state is nil")
}
@@ -132,7 +132,7 @@ func (n *AgentNodes) Interrupt(ctx context.Context, st *newagentmodel.AgentGraph
}
// Execute 负责把 graph 的 execute 节点请求转给 RunExecuteNode。
func (n *AgentNodes) Execute(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
func (n *AgentNodes) Execute(ctx context.Context, st *agentmodel.AgentGraphState) (*agentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("execute node: state is nil")
}
@@ -154,11 +154,11 @@ func (n *AgentNodes) Execute(ctx context.Context, st *newagentmodel.AgentGraphSt
}
schemas := st.Deps.ToolRegistry.SchemasForActiveDomain(activeDomain, activePacks)
if flowState := st.EnsureFlowState(); flowState != nil && flowState.ActiveOptimizeOnly {
schemas = newagenttools.FilterSchemasForActiveOptimize(schemas)
schemas = agenttools.FilterSchemasForActiveOptimize(schemas)
}
toolSchemas := make([]newagentmodel.ToolSchemaContext, len(schemas))
toolSchemas := make([]agentmodel.ToolSchemaContext, len(schemas))
for i, s := range schemas {
toolSchemas[i] = newagentmodel.ToolSchemaContext{
toolSchemas[i] = agentmodel.ToolSchemaContext{
Name: s.Name,
Desc: s.Desc,
SchemaText: s.SchemaText,
@@ -194,7 +194,7 @@ func (n *AgentNodes) Execute(ctx context.Context, st *newagentmodel.AgentGraphSt
}
// QuickTask 负责把 graph 的 quick_task 节点请求转给 RunQuickTaskNode。
func (n *AgentNodes) QuickTask(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
func (n *AgentNodes) QuickTask(ctx context.Context, st *agentmodel.AgentGraphState) (*agentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("quick_task node: state is nil")
}
@@ -219,7 +219,7 @@ func (n *AgentNodes) QuickTask(ctx context.Context, st *newagentmodel.AgentGraph
}
// Deliver 负责把 graph 的 deliver 节点请求转给 RunDeliverNode。
func (n *AgentNodes) Deliver(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
func (n *AgentNodes) Deliver(ctx context.Context, st *agentmodel.AgentGraphState) (*agentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("deliver node: state is nil")
}
@@ -260,7 +260,7 @@ func (n *AgentNodes) Deliver(ctx context.Context, st *newagentmodel.AgentGraphSt
// 1. 只在首次调用时等待 channel后续调用直接跳过。
// 2. 超时后保留原有上下文,不额外覆盖。
// 3. 记忆为空时也不做额外写入,避免污染 prompt。
func ensureFreshMemory(st *newagentmodel.AgentGraphState) {
func ensureFreshMemory(st *agentmodel.AgentGraphState) {
if st == nil || st.Deps.MemoryConsumed || st.Deps.MemoryFuture == nil {
return
}
@@ -269,19 +269,19 @@ func ensureFreshMemory(st *newagentmodel.AgentGraphState) {
select {
case content := <-st.Deps.MemoryFuture:
if strings.TrimSpace(content) != "" {
st.EnsureConversationContext().UpsertPinnedBlock(newagentmodel.ContextBlock{
Key: newagentmodel.MemoryContextBlockKey,
Title: newagentmodel.MemoryContextBlockTitle,
st.EnsureConversationContext().UpsertPinnedBlock(agentmodel.ContextBlock{
Key: agentmodel.MemoryContextBlockKey,
Title: agentmodel.MemoryContextBlockTitle,
Content: content,
})
}
case <-time.After(newagentmodel.MemoryFreshTimeout):
case <-time.After(agentmodel.MemoryFreshTimeout):
// 超时后保留原有上下文即可。
}
}
// saveAgentState 在节点成功执行后保存运行快照。
func saveAgentState(ctx context.Context, st *newagentmodel.AgentGraphState) {
func saveAgentState(ctx context.Context, st *agentmodel.AgentGraphState) {
if st == nil {
return
}
@@ -300,7 +300,7 @@ func saveAgentState(ctx context.Context, st *newagentmodel.AgentGraphState) {
return
}
snapshot := &newagentmodel.AgentStateSnapshot{
snapshot := &agentmodel.AgentStateSnapshot{
RuntimeState: runtimeState,
ConversationContext: st.EnsureConversationContext(),
ScheduleState: st.ScheduleState.Clone(),
@@ -311,7 +311,7 @@ func saveAgentState(ctx context.Context, st *newagentmodel.AgentGraphState) {
}
// deleteAgentState 在任务完成后删除运行快照。
func deleteAgentState(ctx context.Context, st *newagentmodel.AgentGraphState) {
func deleteAgentState(ctx context.Context, st *agentmodel.AgentGraphState) {
if st == nil {
return
}
@@ -339,7 +339,7 @@ func deleteAgentState(ctx context.Context, st *newagentmodel.AgentGraphState) {
// 1. 优先读取 PendingContextHook让首轮 execute 的 schema 注入与即将生效的规则包保持一致;
// 2. 只做只读推导,不消费 PendingContextHook真正的状态更新仍由 RunExecuteNode 统一处理;
// 3. hook 非法或为空时,回退到已持久化的 ActiveToolDomain/ActiveToolPacks保持历史链路兼容。
func resolveEffectiveExecuteToolDomain(flowState *newagentmodel.CommonState) (string, []string) {
func resolveEffectiveExecuteToolDomain(flowState *agentmodel.CommonState) (string, []string) {
if flowState == nil {
return "", nil
}
@@ -347,16 +347,16 @@ func resolveEffectiveExecuteToolDomain(flowState *newagentmodel.CommonState) (st
// 1. 若 plan / rough_build 已写入待生效 hook则首轮 execute 必须优先按它推导工具域,
// 否则 prompt 里的规则包和注入的工具 schema 会错位,模型第一轮看不到该用的工具。
if hook := flowState.PendingContextHook; hook != nil {
domain := newagenttools.NormalizeToolDomain(hook.Domain)
domain := agenttools.NormalizeToolDomain(hook.Domain)
if domain != "" {
return domain, newagenttools.ResolveEffectiveToolPacks(domain, hook.Packs)
return domain, agenttools.ResolveEffectiveToolPacks(domain, hook.Packs)
}
}
// 2. hook 不可用时回退到当前已激活域,保持老链路与恢复链路的行为不变。
domain := newagenttools.NormalizeToolDomain(flowState.ActiveToolDomain)
domain := agenttools.NormalizeToolDomain(flowState.ActiveToolDomain)
if domain == "" {
return "", nil
}
return domain, newagenttools.ResolveEffectiveToolPacks(domain, flowState.ActiveToolPacks)
return domain, agenttools.ResolveEffectiveToolPacks(domain, flowState.ActiveToolPacks)
}

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"context"
@@ -11,10 +11,10 @@ import (
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentrouter "github.com/LoveLosita/smartflow/backend/newAgent/router"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentrouter "github.com/LoveLosita/smartflow/backend/services/agent/router"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
)
@@ -45,15 +45,15 @@ const (
// 3. ConversationContext 提供历史对话;
// 4. ConfirmAction 仅在 confirm 恢复场景下由前端传入 "accept" / "reject"。
type ChatNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
RuntimeState *agentmodel.AgentRuntimeState
ConversationContext *agentmodel.ConversationContext
UserInput string
ConfirmAction string
ResumeInteractionID string
Client *llmservice.Client
ChunkEmitter *newagentstream.ChunkEmitter
CompactionStore newagentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
ChunkEmitter *agentstream.ChunkEmitter
CompactionStore agentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage agentmodel.PersistVisibleMessageFunc
}
// RunChatNode 执行一轮聊天节点逻辑。
@@ -78,13 +78,13 @@ func RunChatNode(ctx context.Context, input ChatNodeInput) error {
// 2. 无 pending → 路由决策(一次快速 LLM 调用,不开 thinking
flowState := runtimeState.EnsureCommonState()
if !runtimeState.HasPendingInteraction() && flowState.Phase == newagentmodel.PhaseDone {
if !runtimeState.HasPendingInteraction() && flowState.Phase == agentmodel.PhaseDone {
terminalBefore := flowState.TerminalStatus()
roundBefore := flowState.RoundUsed
// 1. 只有"正常完成(completed)"才打 loop 收口标记:
// 1.1 这样下一轮进入 execute 时msg2 会只保留"当前活跃循环"窗口;
// 1.2 异常收口exhausted/aborted不打标记允许后续"继续"时沿用上一轮 loop 轨迹。
if terminalBefore == newagentmodel.FlowTerminalStatusCompleted {
if terminalBefore == agentmodel.FlowTerminalStatusCompleted {
appendExecuteLoopClosedMarker(conversationContext)
}
flowState.ResetForNextRun()
@@ -96,7 +96,7 @@ func RunChatNode(ctx context.Context, input ChatNodeInput) error {
)
}
nonce := uuid.NewString()
messages := newagentprompt.BuildChatRoutingMessages(conversationContext, input.UserInput, flowState, nonce)
messages := agentprompt.BuildChatRoutingMessages(conversationContext, input.UserInput, flowState, nonce)
messages = compactUnifiedMessagesIfNeeded(ctx, messages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
@@ -117,11 +117,11 @@ func RunChatNode(ctx context.Context, input ChatNodeInput) error {
})
if err != nil {
log.Printf("[WARN] chat routing stream failed chat=%s err=%v", flowState.ConversationID, err)
flowState.Phase = newagentmodel.PhasePlanning
flowState.Phase = agentmodel.PhasePlanning
return nil
}
parser := newagentrouter.NewStreamRouteParser(nonce)
parser := agentrouter.NewStreamRouteParser(nonce)
return streamAndDispatch(ctx, reader, parser, input, emitter, flowState, conversationContext)
}
@@ -131,7 +131,7 @@ func RunChatNode(ctx context.Context, input ChatNodeInput) error {
// 1. 只负责写一个轻量 marker供 prompt 分层;
// 2. 不负责历史裁剪,不负责消息摘要;
// 3. 若末尾已经是同类 marker则幂等跳过避免重复写入。
func appendExecuteLoopClosedMarker(conversationContext *newagentmodel.ConversationContext) {
func appendExecuteLoopClosedMarker(conversationContext *agentmodel.ConversationContext) {
if conversationContext == nil {
return
}
@@ -173,25 +173,25 @@ func isExecuteLoopClosedMarker(msg *schema.Message) bool {
func streamAndDispatch(
ctx context.Context,
reader llmservice.StreamReader,
parser *newagentrouter.StreamRouteParser,
parser *agentrouter.StreamRouteParser,
input ChatNodeInput,
emitter *newagentstream.ChunkEmitter,
flowState *newagentmodel.CommonState,
conversationContext *newagentmodel.ConversationContext,
emitter *agentstream.ChunkEmitter,
flowState *agentmodel.CommonState,
conversationContext *agentmodel.ConversationContext,
) error {
for {
chunk, err := reader.Recv()
if err == io.EOF {
if !parser.RouteReady() {
log.Printf("[WARN] chat stream ended before route resolved chat=%s", flowState.ConversationID)
flowState.Phase = newagentmodel.PhasePlanning
flowState.Phase = agentmodel.PhasePlanning
return nil
}
break
}
if err != nil {
log.Printf("[WARN] chat stream recv error chat=%s err=%v", flowState.ConversationID, err)
flowState.Phase = newagentmodel.PhasePlanning
flowState.Phase = agentmodel.PhasePlanning
return nil
}
@@ -236,26 +236,26 @@ func streamAndDispatch(
effectiveThinking := resolveEffectiveThinking(flowState.ThinkingMode, decision.Route, decision.Thinking)
switch decision.Route {
case newagentmodel.ChatRouteDirectReply:
case agentmodel.ChatRouteDirectReply:
return handleDirectReplyStream(ctx, reader, input, emitter, conversationContext, flowState, effectiveThinking, visible)
case newagentmodel.ChatRouteExecute:
case agentmodel.ChatRouteExecute:
return handleRouteExecuteStream(reader, emitter, flowState, decision, input.UserInput, effectiveThinking, visible)
case newagentmodel.ChatRouteDeepAnswer:
case agentmodel.ChatRouteDeepAnswer:
return handleDeepAnswerStream(ctx, reader, input, emitter, conversationContext, flowState, effectiveThinking)
case newagentmodel.ChatRoutePlan:
case agentmodel.ChatRoutePlan:
return handleRoutePlanStream(reader, emitter, flowState, effectiveThinking, visible)
case newagentmodel.ChatRouteQuickTask:
case agentmodel.ChatRouteQuickTask:
// 关闭路由流,后续由 QuickTask 节点自行处理。
_ = reader.Close()
flowState.Phase = newagentmodel.PhaseQuickTask
flowState.Phase = agentmodel.PhaseQuickTask
return nil
default:
flowState.Phase = newagentmodel.PhasePlanning
flowState.Phase = agentmodel.PhasePlanning
return nil
}
}
@@ -271,14 +271,14 @@ func streamAndDispatch(
// 3.1 deep_answer 的语义本身就是"复杂问答 + 原地深度思考",因此默认开启;
// 3.2 execute 继续沿用路由模型给出的 decisionThinking
// 3.3 其余路由默认关闭,避免把轻量闲聊误升成高成本推理。
func resolveEffectiveThinking(mode string, route newagentmodel.ChatRoute, decisionThinking bool) bool {
func resolveEffectiveThinking(mode string, route agentmodel.ChatRoute, decisionThinking bool) bool {
switch strings.TrimSpace(strings.ToLower(mode)) {
case "true":
return true
case "false":
return false
default:
if route == newagentmodel.ChatRouteDeepAnswer {
if route == agentmodel.ChatRouteDeepAnswer {
return true
}
return decisionThinking
@@ -294,9 +294,9 @@ func handleDirectReplyStream(
ctx context.Context,
reader llmservice.StreamReader,
input ChatNodeInput,
emitter *newagentstream.ChunkEmitter,
conversationContext *newagentmodel.ConversationContext,
flowState *newagentmodel.CommonState,
emitter *agentstream.ChunkEmitter,
conversationContext *agentmodel.ConversationContext,
flowState *agentmodel.CommonState,
effectiveThinking bool,
firstVisible string,
) error {
@@ -311,13 +311,13 @@ func handleThinkingReplyStream(
ctx context.Context,
reader llmservice.StreamReader,
input ChatNodeInput,
emitter *newagentstream.ChunkEmitter,
conversationContext *newagentmodel.ConversationContext,
flowState *newagentmodel.CommonState,
emitter *agentstream.ChunkEmitter,
conversationContext *agentmodel.ConversationContext,
flowState *agentmodel.CommonState,
) error {
_ = reader.Close()
deepMessages := newagentprompt.BuildDeepAnswerMessages(flowState, conversationContext, input.UserInput)
deepMessages := agentprompt.BuildDeepAnswerMessages(flowState, conversationContext, input.UserInput)
deepMessages = compactUnifiedMessagesIfNeeded(ctx, deepMessages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
@@ -338,7 +338,7 @@ func handleThinkingReplyStream(
})
if err != nil {
log.Printf("[WARN] thinking reply stream failed chat=%s err=%v", flowState.ConversationID, err)
flowState.Phase = newagentmodel.PhaseChatting
flowState.Phase = agentmodel.PhaseChatting
return nil
}
@@ -346,7 +346,7 @@ func handleThinkingReplyStream(
_ = deepReader.Close()
if err != nil {
log.Printf("[WARN] thinking reply emit error chat=%s err=%v", flowState.ConversationID, err)
flowState.Phase = newagentmodel.PhaseChatting
flowState.Phase = agentmodel.PhaseChatting
return nil
}
@@ -356,7 +356,7 @@ func handleThinkingReplyStream(
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, schema.AssistantMessage(deepText, nil))
}
flowState.Phase = newagentmodel.PhaseChatting
flowState.Phase = agentmodel.PhaseChatting
return nil
}
@@ -365,9 +365,9 @@ func handleDirectReplyContinueStream(
ctx context.Context,
reader llmservice.StreamReader,
input ChatNodeInput,
emitter *newagentstream.ChunkEmitter,
conversationContext *newagentmodel.ConversationContext,
flowState *newagentmodel.CommonState,
emitter *agentstream.ChunkEmitter,
conversationContext *agentmodel.ConversationContext,
flowState *agentmodel.CommonState,
firstVisible string,
) error {
var fullText strings.Builder
@@ -408,7 +408,7 @@ func handleDirectReplyContinueStream(
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
flowState.Phase = newagentmodel.PhaseChatting
flowState.Phase = agentmodel.PhaseChatting
return nil
}
@@ -420,9 +420,9 @@ func handleDirectReplyContinueStream(
// 3. 设置流程状态,进入 Execute 或 RoughBuild。
func handleRouteExecuteStream(
reader llmservice.StreamReader,
emitter *newagentstream.ChunkEmitter,
flowState *newagentmodel.CommonState,
decision *newagentmodel.ChatRoutingDecision,
emitter *agentstream.ChunkEmitter,
flowState *agentmodel.CommonState,
decision *agentmodel.ChatRoutingDecision,
userInput string,
effectiveThinking bool,
speak string,
@@ -518,8 +518,8 @@ func detectReorderPreference(userInput string) reorderPreference {
// resolveOptimizationMode 统一确定当前 execute 的优化模式。
func resolveOptimizationMode(
userInput string,
decision *newagentmodel.ChatRoutingDecision,
flowState *newagentmodel.CommonState,
decision *agentmodel.ChatRoutingDecision,
flowState *agentmodel.CommonState,
) string {
if decision != nil && decision.NeedsRoughBuild && flowState != nil && len(flowState.TaskClassIDs) > 0 {
return "first_full"
@@ -570,9 +570,9 @@ func containsAnyPhrase(text string, phrases []string) bool {
// 2. 上下文不存在 rough_build_done 时,不干预(首次粗排仍可走);
// 3. 若用户未明确要求"重新粗排/从头重排",则关闭粗排开关,避免误触发。
func shouldDisableRoughBuildForRefine(
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
userInput string,
decision *newagentmodel.ChatRoutingDecision,
decision *agentmodel.ChatRoutingDecision,
) bool {
if decision == nil || !decision.NeedsRoughBuild {
return false
@@ -591,9 +591,9 @@ func shouldDisableRoughBuildForRefine(
// 3. 若用户明确表达"只要初稿/先不优化",则不强制开启;
// 4. 其余首次粗排场景一律开启,确保符合 PRD 的默认主动优化策略。
func shouldForceRefineAfterFirstRoughBuild(
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
userInput string,
decision *newagentmodel.ChatRoutingDecision,
decision *agentmodel.ChatRoutingDecision,
) bool {
if decision == nil || !decision.NeedsRoughBuild {
return false
@@ -604,7 +604,7 @@ func shouldForceRefineAfterFirstRoughBuild(
return !isExplicitNoRefineAfterRoughBuildRequest(userInput)
}
func hasRoughBuildDoneMarker(conversationContext *newagentmodel.ConversationContext) bool {
func hasRoughBuildDoneMarker(conversationContext *agentmodel.ConversationContext) bool {
if conversationContext == nil {
return false
}
@@ -676,9 +676,9 @@ func handleDeepAnswerStream(
ctx context.Context,
reader llmservice.StreamReader,
input ChatNodeInput,
emitter *newagentstream.ChunkEmitter,
conversationContext *newagentmodel.ConversationContext,
flowState *newagentmodel.CommonState,
emitter *agentstream.ChunkEmitter,
conversationContext *agentmodel.ConversationContext,
flowState *agentmodel.CommonState,
effectiveThinking bool,
) error {
// 1. 关闭第一个路由流。
@@ -689,7 +689,7 @@ func handleDeepAnswerStream(
if effectiveThinking {
thinkingOpt = llmservice.ThinkingModeEnabled
}
deepMessages := newagentprompt.BuildDeepAnswerMessages(flowState, conversationContext, input.UserInput)
deepMessages := agentprompt.BuildDeepAnswerMessages(flowState, conversationContext, input.UserInput)
deepMessages = compactUnifiedMessagesIfNeeded(ctx, deepMessages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
@@ -711,7 +711,7 @@ func handleDeepAnswerStream(
if err != nil {
// 深度回答失败 → 降级返回。
log.Printf("[WARN] deep answer stream failed chat=%s err=%v", flowState.ConversationID, err)
flowState.Phase = newagentmodel.PhaseChatting
flowState.Phase = agentmodel.PhaseChatting
return nil
}
@@ -720,13 +720,13 @@ func handleDeepAnswerStream(
_ = deepReader.Close()
if err != nil {
log.Printf("[WARN] deep answer stream emit error chat=%s err=%v", flowState.ConversationID, err)
flowState.Phase = newagentmodel.PhaseChatting
flowState.Phase = agentmodel.PhaseChatting
return nil
}
deepText = strings.TrimSpace(deepText)
if deepText == "" {
flowState.Phase = newagentmodel.PhaseChatting
flowState.Phase = agentmodel.PhaseChatting
return nil
}
@@ -735,15 +735,15 @@ func handleDeepAnswerStream(
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
flowState.Phase = newagentmodel.PhaseChatting
flowState.Phase = agentmodel.PhaseChatting
return nil
}
// handleRoutePlanStream 处理规划路由:推送状态确认 → 设 PhasePlanning。
func handleRoutePlanStream(
reader llmservice.StreamReader,
emitter *newagentstream.ChunkEmitter,
flowState *newagentmodel.CommonState,
emitter *agentstream.ChunkEmitter,
flowState *agentmodel.CommonState,
effectiveThinking bool,
speak string,
) error {
@@ -756,7 +756,7 @@ func handleRoutePlanStream(
_ = emitter.EmitStatus(chatStatusBlockID, chatStageName, "planning", speak, false)
flowState.Phase = newagentmodel.PhasePlanning
flowState.Phase = agentmodel.PhasePlanning
return nil
}
@@ -770,8 +770,8 @@ func handleRoutePlanStream(
// 3. 只推送轻量 status 通知前端"已收到回复,正在继续"。
func handleChatResume(
input ChatNodeInput,
runtimeState *newagentmodel.AgentRuntimeState,
emitter *newagentstream.ChunkEmitter,
runtimeState *agentmodel.AgentRuntimeState,
emitter *agentstream.ChunkEmitter,
) error {
pending := runtimeState.PendingInteraction
flowState := runtimeState.EnsureCommonState()
@@ -788,7 +788,7 @@ func handleChatResume(
// 这里不再二次写入,避免 pending 恢复路径把同一轮 user message 追加两次。
switch pending.Type {
case newagentmodel.PendingInteractionTypeAskUser:
case agentmodel.PendingInteractionTypeAskUser:
// 用户回答了问题 → 恢复 phase交给下游节点继续。
runtimeState.ResumeFromPending()
_ = emitter.EmitStatus(
@@ -797,7 +797,7 @@ func handleChatResume(
)
return nil
case newagentmodel.PendingInteractionTypeConfirm:
case agentmodel.PendingInteractionTypeConfirm:
return handleConfirmResume(input, runtimeState, flowState, pending, emitter)
default:
@@ -815,10 +815,10 @@ func handleChatResume(
// 3. reject + 无 PendingTool计划确认→ 清空计划,回到 planning 重新规划。
func handleConfirmResume(
input ChatNodeInput,
runtimeState *newagentmodel.AgentRuntimeState,
flowState *newagentmodel.CommonState,
pending *newagentmodel.PendingInteraction,
emitter *newagentstream.ChunkEmitter,
runtimeState *agentmodel.AgentRuntimeState,
flowState *agentmodel.CommonState,
pending *agentmodel.PendingInteraction,
emitter *agentstream.ChunkEmitter,
) error {
if isMismatchedResumeInteraction(input.ResumeInteractionID, pending) {
_ = emitter.EmitStatus(
@@ -840,7 +840,7 @@ func handleConfirmResume(
copied := *pendingTool
runtimeState.PendingConfirmTool = &copied
}
flowState.Phase = newagentmodel.PhaseExecuting
flowState.Phase = agentmodel.PhaseExecuting
_ = emitter.EmitStatus(
chatStatusBlockID, chatStageName,
"confirmed", "已确认,开始执行。", false,
@@ -850,7 +850,7 @@ func handleConfirmResume(
runtimeState.ResumeFromPending()
if pending.PendingTool != nil {
// 工具确认被拒 → 回到 executing 换策略。
flowState.Phase = newagentmodel.PhaseExecuting
flowState.Phase = agentmodel.PhaseExecuting
} else {
// 计划确认被拒 → 清空计划,回到 planning。
flowState.RejectPlan()
@@ -869,7 +869,7 @@ func handleConfirmResume(
return nil
}
func isMismatchedResumeInteraction(resumeInteractionID string, pending *newagentmodel.PendingInteraction) bool {
func isMismatchedResumeInteraction(resumeInteractionID string, pending *agentmodel.PendingInteraction) bool {
if pending == nil {
return false
}
@@ -883,9 +883,9 @@ func isMismatchedResumeInteraction(resumeInteractionID string, pending *newagent
// prepareChatNodeInput 校验并准备聊天节点的运行态依赖。
func prepareChatNodeInput(input ChatNodeInput) (
*newagentmodel.AgentRuntimeState,
*newagentmodel.ConversationContext,
*newagentstream.ChunkEmitter,
*agentmodel.AgentRuntimeState,
*agentmodel.ConversationContext,
*agentstream.ChunkEmitter,
error,
) {
if input.RuntimeState == nil {
@@ -897,11 +897,11 @@ func prepareChatNodeInput(input ChatNodeInput) (
input.RuntimeState.EnsureCommonState()
if input.ConversationContext == nil {
input.ConversationContext = newagentmodel.NewConversationContext("")
input.ConversationContext = agentmodel.NewConversationContext("")
}
if input.ChunkEmitter == nil {
input.ChunkEmitter = newagentstream.NewChunkEmitter(
newagentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
input.ChunkEmitter = agentstream.NewChunkEmitter(
agentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
)
}
return input.RuntimeState, input.ConversationContext, input.ChunkEmitter, nil

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"context"
@@ -7,8 +7,8 @@ import (
"strings"
"time"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
)
const (
@@ -23,9 +23,9 @@ const (
// 2. RuntimeState 提供计划步骤和待确认工具快照;
// 3. ChunkEmitter 负责推送确认事件到前端。
type ConfirmNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
ChunkEmitter *newagentstream.ChunkEmitter
RuntimeState *agentmodel.AgentRuntimeState
ConversationContext *agentmodel.ConversationContext
ChunkEmitter *agentstream.ChunkEmitter
}
// RunConfirmNode 执行一轮确认节点逻辑。
@@ -69,9 +69,9 @@ func RunConfirmNode(ctx context.Context, input ConfirmNodeInput) error {
// 3. 调用 OpenConfirmInteraction 固化快照(无 PendingTool
func handlePlanConfirm(
ctx context.Context,
runtimeState *newagentmodel.AgentRuntimeState,
flowState *newagentmodel.CommonState,
emitter *newagentstream.ChunkEmitter,
runtimeState *agentmodel.AgentRuntimeState,
flowState *agentmodel.CommonState,
emitter *agentstream.ChunkEmitter,
) error {
summary := buildPlanSummary(flowState.PlanSteps)
interactionID := generateConfirmInteractionID(flowState)
@@ -81,7 +81,7 @@ func handlePlanConfirm(
interactionID,
"计划确认",
summary,
newagentstream.DefaultPseudoStreamOptions(),
agentstream.DefaultPseudoStreamOptions(),
); err != nil {
return fmt.Errorf("计划确认事件推送失败: %w", err)
}
@@ -109,9 +109,9 @@ func handlePlanConfirm(
// 4. 清空 PendingConfirmTool 临时邮箱。
func handleToolConfirm(
ctx context.Context,
runtimeState *newagentmodel.AgentRuntimeState,
flowState *newagentmodel.CommonState,
emitter *newagentstream.ChunkEmitter,
runtimeState *agentmodel.AgentRuntimeState,
flowState *agentmodel.CommonState,
emitter *agentstream.ChunkEmitter,
) error {
pendingTool := runtimeState.PendingConfirmTool
summary := buildToolConfirmSummary(pendingTool)
@@ -122,7 +122,7 @@ func handleToolConfirm(
interactionID,
"操作确认",
summary,
newagentstream.DefaultPseudoStreamOptions(),
agentstream.DefaultPseudoStreamOptions(),
); err != nil {
return fmt.Errorf("工具确认事件推送失败: %w", err)
}
@@ -145,7 +145,7 @@ func handleToolConfirm(
}
// buildPlanSummary 把 PlanSteps 格式化成人类可读的确认摘要。
func buildPlanSummary(steps []newagentmodel.PlanStep) string {
func buildPlanSummary(steps []agentmodel.PlanStep) string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("共 %d 步:\n", len(steps)))
for i, step := range steps {
@@ -159,7 +159,7 @@ func buildPlanSummary(steps []newagentmodel.PlanStep) string {
}
// buildToolConfirmSummary 从工具快照构建确认摘要。
func buildToolConfirmSummary(tool *newagentmodel.PendingToolCallSnapshot) string {
func buildToolConfirmSummary(tool *agentmodel.PendingToolCallSnapshot) string {
if tool == nil {
return "待确认操作"
}
@@ -177,7 +177,7 @@ func buildToolConfirmSummary(tool *newagentmodel.PendingToolCallSnapshot) string
}
// generateConfirmInteractionID 生成确认交互的唯一标识。
func generateConfirmInteractionID(flowState *newagentmodel.CommonState) string {
func generateConfirmInteractionID(flowState *agentmodel.CommonState) string {
prefix := flowState.TraceID
if prefix == "" {
prefix = "confirm"
@@ -187,9 +187,9 @@ func generateConfirmInteractionID(flowState *newagentmodel.CommonState) string {
// prepareConfirmNodeInput 校验并准备确认节点的运行态依赖。
func prepareConfirmNodeInput(input ConfirmNodeInput) (
*newagentmodel.AgentRuntimeState,
*newagentmodel.ConversationContext,
*newagentstream.ChunkEmitter,
*agentmodel.AgentRuntimeState,
*agentmodel.ConversationContext,
*agentstream.ChunkEmitter,
error,
) {
if input.RuntimeState == nil {
@@ -197,11 +197,11 @@ func prepareConfirmNodeInput(input ConfirmNodeInput) (
}
input.RuntimeState.EnsureCommonState()
if input.ConversationContext == nil {
input.ConversationContext = newagentmodel.NewConversationContext("")
input.ConversationContext = agentmodel.NewConversationContext("")
}
if input.ChunkEmitter == nil {
input.ChunkEmitter = newagentstream.NewChunkEmitter(
newagentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
input.ChunkEmitter = agentstream.NewChunkEmitter(
agentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
)
}
return input.RuntimeState, input.ConversationContext, input.ChunkEmitter, nil

View File

@@ -1,10 +1,10 @@
package newagentnode
package agentnode
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -33,7 +33,7 @@ const (
// - 返回 nil 表示修正流程完成,调用方应继续 Graph 循环;
// - 该函数不会返回 error因为追加历史失败不影响主流程。
func AppendLLMCorrection(
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
llmOutput string,
validOptionsDesc string,
) {
@@ -73,7 +73,7 @@ func AppendLLMCorrection(
// - errorDesc: 具体的错误描述,如 "action \"invalid\" 不是合法的执行动作"
// - validOptionsDesc: 合法选项的描述。
func AppendLLMCorrectionWithHint(
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
llmOutput string,
errorDesc string,
validOptionsDesc string,
@@ -105,7 +105,7 @@ func AppendLLMCorrectionWithHint(
// 2. 若与“最近一条 assistant 文本”完全一致则跳过,避免同句反复回灌;
// 3. 仅负责“是否回灌”判定,不负责生成纠错 user 提示。
func appendCorrectionAssistantIfNeeded(
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
assistantContent string,
) {
if conversationContext == nil {

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"context"
@@ -9,9 +9,9 @@ import (
"github.com/cloudwego/eino/schema"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
)
@@ -29,13 +29,13 @@ const (
// 3. ConversationContext 提供执行阶段的对话历史;
// 4. 交付完成后标记流程结束。
type DeliverNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
RuntimeState *agentmodel.AgentRuntimeState
ConversationContext *agentmodel.ConversationContext
Client *llmservice.Client
ChunkEmitter *newagentstream.ChunkEmitter
ChunkEmitter *agentstream.ChunkEmitter
ThinkingEnabled bool // 是否开启 thinking由 config.yaml 的 agent.thinking.deliver 注入
CompactionStore newagentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
CompactionStore agentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage agentmodel.PersistVisibleMessageFunc
}
// RunDeliverNode 执行一轮交付节点逻辑。
@@ -96,7 +96,7 @@ func RunDeliverNode(ctx context.Context, input DeliverNodeInput) error {
deliverSpeakBlockID,
deliverStageName,
summary,
newagentstream.DefaultPseudoStreamOptions(),
agentstream.DefaultPseudoStreamOptions(),
); err != nil {
return fmt.Errorf("交付总结推送失败: %w", err)
}
@@ -129,11 +129,11 @@ func RunDeliverNode(ctx context.Context, input DeliverNodeInput) error {
func generateDeliverSummary(
ctx context.Context,
client *llmservice.Client,
flowState *newagentmodel.CommonState,
conversationContext *newagentmodel.ConversationContext,
flowState *agentmodel.CommonState,
conversationContext *agentmodel.ConversationContext,
thinkingEnabled bool,
compactionStore newagentmodel.CompactionStore,
emitter *newagentstream.ChunkEmitter,
compactionStore agentmodel.CompactionStore,
emitter *agentstream.ChunkEmitter,
) (string, bool) {
if flowState != nil {
switch {
@@ -148,7 +148,7 @@ func generateDeliverSummary(
return buildMechanicalSummary(flowState), false
}
messages := newagentprompt.BuildDeliverMessages(flowState, conversationContext)
messages := agentprompt.BuildDeliverMessages(flowState, conversationContext)
messages = compactUnifiedMessagesIfNeeded(ctx, messages, UnifiedCompactInput{
Client: client,
CompactionStore: compactionStore,
@@ -191,7 +191,7 @@ func generateDeliverSummary(
// 1. 第二轮开始abort 的用户可见文案由终止方提前写入 CommonState
// 2. deliver 不再重新猜测或改写业务异常,只做最终收口;
// 3. 若历史快照缺失 user_message则回退到一份通用说明避免前端收到空白结果。
func buildAbortSummary(state *newagentmodel.CommonState) string {
func buildAbortSummary(state *agentmodel.CommonState) string {
if state == nil || state.TerminalOutcome == nil {
return "本轮流程已终止。"
}
@@ -202,7 +202,7 @@ func buildAbortSummary(state *newagentmodel.CommonState) string {
}
// buildExhaustedSummary 生成“轮次耗尽”的统一收口文案。
func buildExhaustedSummary(state *newagentmodel.CommonState) string {
func buildExhaustedSummary(state *agentmodel.CommonState) string {
if state == nil {
return "本轮执行已达到安全轮次上限,当前先停止继续操作。"
}
@@ -218,7 +218,7 @@ func buildExhaustedSummary(state *newagentmodel.CommonState) string {
}
// buildMechanicalSummary 在 LLM 不可用时,机械拼接一份最小可用总结。
func buildMechanicalSummary(state *newagentmodel.CommonState) string {
func buildMechanicalSummary(state *agentmodel.CommonState) string {
if state == nil {
return "任务流程已结束。"
}
@@ -254,9 +254,9 @@ func buildMechanicalSummary(state *newagentmodel.CommonState) string {
// prepareDeliverNodeInput 校验并准备交付节点的运行态依赖。
func prepareDeliverNodeInput(input DeliverNodeInput) (
*newagentmodel.AgentRuntimeState,
*newagentmodel.ConversationContext,
*newagentstream.ChunkEmitter,
*agentmodel.AgentRuntimeState,
*agentmodel.ConversationContext,
*agentstream.ChunkEmitter,
error,
) {
if input.RuntimeState == nil {
@@ -265,11 +265,11 @@ func prepareDeliverNodeInput(input DeliverNodeInput) (
input.RuntimeState.EnsureCommonState()
if input.ConversationContext == nil {
input.ConversationContext = newagentmodel.NewConversationContext("")
input.ConversationContext = agentmodel.NewConversationContext("")
}
if input.ChunkEmitter == nil {
input.ChunkEmitter = newagentstream.NewChunkEmitter(
newagentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
input.ChunkEmitter = agentstream.NewChunkEmitter(
agentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
)
}
return input.RuntimeState, input.ConversationContext, input.ChunkEmitter, nil

View File

@@ -0,0 +1,14 @@
package agentnode
import (
"context"
agentexecute "github.com/LoveLosita/smartflow/backend/services/agent/node/execute"
)
type ExecuteNodeInput = agentexecute.ExecuteNodeInput
type ExecuteRoundObservation = agentexecute.ExecuteRoundObservation
func RunExecuteNode(ctx context.Context, input ExecuteNodeInput) error {
return agentexecute.RunExecuteNode(ctx, input)
}

View File

@@ -1,24 +1,24 @@
package newagentexecute
package agentexecute
import (
"context"
"fmt"
newagentshared "github.com/LoveLosita/smartflow/backend/newAgent/shared"
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
"io"
"log"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentrouter "github.com/LoveLosita/smartflow/backend/newAgent/router"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentrouter "github.com/LoveLosita/smartflow/backend/services/agent/router"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
)
type executeDecisionStreamOutput struct {
decision *newagentmodel.ExecuteDecision
decision *agentmodel.ExecuteDecision
rawText string
parsedBeforeText string
parsedAfterText string
@@ -30,9 +30,9 @@ type executeDecisionStreamOutput struct {
func collectExecuteDecisionFromLLM(
ctx context.Context,
input ExecuteNodeInput,
flowState *newagentmodel.CommonState,
conversationContext *newagentmodel.ConversationContext,
emitter *newagentstream.ChunkEmitter,
flowState *agentmodel.CommonState,
conversationContext *agentmodel.ConversationContext,
emitter *agentstream.ChunkEmitter,
messages []*schema.Message,
) (*executeDecisionStreamOutput, error) {
reader, err := input.Client.Stream(
@@ -41,7 +41,7 @@ func collectExecuteDecisionFromLLM(
llmservice.GenerateOptions{
Temperature: 1.0,
MaxTokens: 131072,
Thinking: newagentshared.ResolveThinkingMode(input.ThinkingEnabled),
Thinking: agentshared.ResolveThinkingMode(input.ThinkingEnabled),
Metadata: map[string]any{
"stage": executeStageName,
"step_index": flowState.CurrentStep,
@@ -53,7 +53,7 @@ func collectExecuteDecisionFromLLM(
return nil, fmt.Errorf("执行阶段 Stream 请求失败: %w", err)
}
parser := newagentrouter.NewStreamDecisionParser()
parser := agentrouter.NewStreamDecisionParser()
output := &executeDecisionStreamOutput{firstChunk: true}
var fullText strings.Builder
reasoningDigestor, digestorErr := emitter.NewReasoningDigestor(ctx, executeSpeakBlockID, executeStageName)
@@ -119,11 +119,11 @@ func collectExecuteDecisionFromLLM(
errorDesc = "检测到 tool_call 字段被错误写成数组;每次只允许调用一个工具,不支持数组形式。"
optionHint = "请把多次工具调用拆开,每次只保留一个 tool_call然后再继续下一轮。"
}
newagentshared.AppendLLMCorrectionWithHint(conversationContext, output.rawText, errorDesc, optionHint)
agentshared.AppendLLMCorrectionWithHint(conversationContext, output.rawText, errorDesc, optionHint)
return nil, nil
}
decision, parseErr := llmservice.ParseJSONObject[newagentmodel.ExecuteDecision](result.DecisionJSON)
decision, parseErr := llmservice.ParseJSONObject[agentmodel.ExecuteDecision](result.DecisionJSON)
if parseErr != nil {
log.Printf(
"[DEBUG] execute LLM JSON 解析失败 chat=%s round=%d json=%s raw=%s",
@@ -140,7 +140,7 @@ func collectExecuteDecisionFromLLM(
output.rawText,
)
}
newagentshared.AppendLLMCorrectionWithHint(
agentshared.AppendLLMCorrectionWithHint(
conversationContext,
"",
"决策标签内的 JSON 格式不合法。",
@@ -217,7 +217,7 @@ func collectExecuteDecisionFromLLM(
if flowState.ConsecutiveCorrections >= maxConsecutiveCorrections {
return nil, fmt.Errorf("连续 %d 次模型返回空文本,终止执行", flowState.ConsecutiveCorrections)
}
newagentshared.AppendLLMCorrectionWithHint(
agentshared.AppendLLMCorrectionWithHint(
conversationContext,
"",
"模型没有返回任何内容。",
@@ -250,10 +250,10 @@ func collectExecuteDecisionFromLLM(
func handleExecuteDecision(
ctx context.Context,
input ExecuteNodeInput,
runtimeState *newagentmodel.AgentRuntimeState,
flowState *newagentmodel.CommonState,
conversationContext *newagentmodel.ConversationContext,
emitter *newagentstream.ChunkEmitter,
runtimeState *agentmodel.AgentRuntimeState,
flowState *agentmodel.CommonState,
conversationContext *agentmodel.ConversationContext,
emitter *agentstream.ChunkEmitter,
output *executeDecisionStreamOutput,
) error {
if output == nil || output.decision == nil {
@@ -261,9 +261,9 @@ func handleExecuteDecision(
}
decision := output.decision
if decision.Action == newagentmodel.ExecuteActionDone &&
if decision.Action == agentmodel.ExecuteActionDone &&
decision.ToolCall != nil &&
strings.EqualFold(strings.TrimSpace(decision.ToolCall.Name), newagenttools.ToolNameContextToolsRemove) {
strings.EqualFold(strings.TrimSpace(decision.ToolCall.Name), agenttools.ToolNameContextToolsRemove) {
decision.ToolCall = nil
}
@@ -292,7 +292,7 @@ func handleExecuteDecision(
fmt.Sprintf("执行校验:决策不合法:%s已请求模型重试。", err.Error()),
false,
)
newagentshared.AppendLLMCorrectionWithHint(
agentshared.AppendLLMCorrectionWithHint(
conversationContext,
"",
fmt.Sprintf("本次执行决策不合法:%s", err.Error()),
@@ -310,16 +310,16 @@ func handleExecuteDecision(
)
decision.Speak = normalizeSpeak(decision.Speak)
if decision.Action == newagentmodel.ExecuteActionConfirm &&
if decision.Action == agentmodel.ExecuteActionConfirm &&
decision.ToolCall != nil &&
input.ToolRegistry != nil &&
!input.ToolRegistry.IsWriteTool(decision.ToolCall.Name) {
decision.Action = newagentmodel.ExecuteActionContinue
decision.Action = agentmodel.ExecuteActionContinue
}
if decision.Action == newagentmodel.ExecuteActionContinue &&
if decision.Action == agentmodel.ExecuteActionContinue &&
decision.ToolCall != nil &&
newagenttools.IsContextManagementTool(decision.ToolCall.Name) {
agenttools.IsContextManagementTool(decision.ToolCall.Name) {
decision.Speak = ""
}
@@ -351,8 +351,8 @@ func handleExecuteDecision(
}
if flowState.HasPlan() &&
(decision.Action == newagentmodel.ExecuteActionNextPlan ||
decision.Action == newagentmodel.ExecuteActionDone) {
(decision.Action == agentmodel.ExecuteActionNextPlan ||
decision.Action == agentmodel.ExecuteActionDone) {
if strings.TrimSpace(decision.GoalCheck) == "" {
flowState.ConsecutiveCorrections++
if flowState.ConsecutiveCorrections >= maxConsecutiveCorrections {
@@ -365,7 +365,7 @@ func handleExecuteDecision(
fmt.Sprintf("执行校验action=%s 缺少 goal_check已请求模型重试。", decision.Action),
false,
)
newagentshared.AppendLLMCorrectionWithHint(
agentshared.AppendLLMCorrectionWithHint(
conversationContext,
"",
fmt.Sprintf("你输出了 action=%s但 goal_check 为空。", decision.Action),
@@ -377,13 +377,13 @@ func handleExecuteDecision(
askUserHistoryAppended := false
if strings.TrimSpace(decision.Speak) != "" {
isConfirmWithCard := decision.Action == newagentmodel.ExecuteActionConfirm && !input.AlwaysExecute
isAskUser := decision.Action == newagentmodel.ExecuteActionAskUser
isAbort := decision.Action == newagentmodel.ExecuteActionAbort
isConfirmWithCard := decision.Action == agentmodel.ExecuteActionConfirm && !input.AlwaysExecute
isAskUser := decision.Action == agentmodel.ExecuteActionAskUser
isAbort := decision.Action == agentmodel.ExecuteActionAbort
if !isConfirmWithCard && !isAskUser && !isAbort {
msg := schema.AssistantMessage(decision.Speak, nil)
newagentshared.PersistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
agentshared.PersistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
if !isAbort {
conversationContext.AppendHistory(&schema.Message{
@@ -397,7 +397,7 @@ func handleExecuteDecision(
}
switch decision.Action {
case newagentmodel.ExecuteActionContinue:
case agentmodel.ExecuteActionContinue:
if decision.ToolCall != nil {
if input.ToolRegistry != nil && input.ToolRegistry.IsWriteTool(decision.ToolCall.Name) {
flowState.ConsecutiveCorrections++
@@ -426,7 +426,7 @@ func handleExecuteDecision(
if strings.TrimSpace(llmOutput) == "" {
llmOutput = decision.Reason
}
newagentshared.AppendLLMCorrectionWithHint(
agentshared.AppendLLMCorrectionWithHint(
conversationContext,
llmOutput,
fmt.Sprintf("你输出了 action=continue但同时提供了 %q 这个写工具。", decision.ToolCall.Name),
@@ -461,14 +461,14 @@ func handleExecuteDecision(
}
return nil
case newagentmodel.ExecuteActionAskUser:
case agentmodel.ExecuteActionAskUser:
question := resolveExecuteAskUserText(decision)
runtimeState.OpenAskUserInteraction(uuid.NewString(), question, strings.TrimSpace(input.ResumeNode))
runtimeState.SetPendingInteractionMetadata(newagentmodel.PendingMetaAskUserSpeakStreamed, output.speakStreamed)
runtimeState.SetPendingInteractionMetadata(newagentmodel.PendingMetaAskUserHistoryAppended, askUserHistoryAppended)
runtimeState.SetPendingInteractionMetadata(agentmodel.PendingMetaAskUserSpeakStreamed, output.speakStreamed)
runtimeState.SetPendingInteractionMetadata(agentmodel.PendingMetaAskUserHistoryAppended, askUserHistoryAppended)
return nil
case newagentmodel.ExecuteActionConfirm:
case agentmodel.ExecuteActionConfirm:
if decision.ToolCall != nil && shouldForceFeasibilityNegotiation(flowState, input.ToolRegistry, decision.ToolCall.Name) {
runtimeState.OpenAskUserInteraction(
uuid.NewString(),
@@ -491,7 +491,7 @@ func handleExecuteDecision(
}
return handleExecuteActionConfirm(decision, runtimeState, flowState)
case newagentmodel.ExecuteActionNextPlan:
case agentmodel.ExecuteActionNextPlan:
if !flowState.AdvanceStep() {
flowState.Done()
}
@@ -499,11 +499,11 @@ func handleExecuteDecision(
syncExecutePinnedContext(conversationContext, flowState)
return nil
case newagentmodel.ExecuteActionDone:
case agentmodel.ExecuteActionDone:
flowState.Done()
return nil
case newagentmodel.ExecuteActionAbort:
case agentmodel.ExecuteActionAbort:
return handleExecuteActionAbort(decision, flowState)
default:
@@ -511,7 +511,7 @@ func handleExecuteDecision(
if strings.TrimSpace(llmOutput) == "" {
llmOutput = decision.Reason
}
newagentshared.AppendLLMCorrectionWithHint(
agentshared.AppendLLMCorrectionWithHint(
conversationContext,
llmOutput,
fmt.Sprintf("你输出的 action %q 不是合法的执行动作。", decision.Action),

View File

@@ -1,14 +1,14 @@
package newagentexecute
package agentexecute
import (
"encoding/json"
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
func resolveExecuteAskUserText(decision *newagentmodel.ExecuteDecision) string {
func resolveExecuteAskUserText(decision *agentmodel.ExecuteDecision) string {
if decision == nil {
return "执行过程中遇到不确定的情况,需要向你确认。"
}
@@ -25,7 +25,7 @@ func pickExecuteVisibleSpeak(
streamed string,
afterText string,
beforeText string,
decision *newagentmodel.ExecuteDecision,
decision *agentmodel.ExecuteDecision,
) string {
if text := strings.TrimSpace(streamed); text != "" {
return text
@@ -39,7 +39,7 @@ func pickExecuteVisibleSpeak(
return buildExecuteSpeakWithFallback(decision)
}
func buildExecuteSpeakWithFallback(decision *newagentmodel.ExecuteDecision) string {
func buildExecuteSpeakWithFallback(decision *agentmodel.ExecuteDecision) string {
if decision == nil {
return ""
}
@@ -50,16 +50,16 @@ func buildExecuteSpeakWithFallback(decision *newagentmodel.ExecuteDecision) stri
}
switch decision.Action {
case newagentmodel.ExecuteActionContinue,
newagentmodel.ExecuteActionAskUser,
newagentmodel.ExecuteActionConfirm:
case agentmodel.ExecuteActionContinue,
agentmodel.ExecuteActionAskUser,
agentmodel.ExecuteActionConfirm:
if reason := strings.TrimSpace(decision.Reason); reason != "" {
return reason
}
switch decision.Action {
case newagentmodel.ExecuteActionAskUser:
case agentmodel.ExecuteActionAskUser:
return "我还缺少一条关键信息,想先向你确认。"
case newagentmodel.ExecuteActionConfirm:
case agentmodel.ExecuteActionConfirm:
return "我先整理好这一步操作,等待你的确认。"
default:
return "我先继续这一步处理,马上给你结果。"
@@ -70,9 +70,9 @@ func buildExecuteSpeakWithFallback(decision *newagentmodel.ExecuteDecision) stri
}
func handleExecuteActionConfirm(
decision *newagentmodel.ExecuteDecision,
runtimeState *newagentmodel.AgentRuntimeState,
flowState *newagentmodel.CommonState,
decision *agentmodel.ExecuteDecision,
runtimeState *agentmodel.AgentRuntimeState,
flowState *agentmodel.CommonState,
) error {
toolCall := decision.ToolCall
@@ -83,19 +83,19 @@ func handleExecuteActionConfirm(
}
}
runtimeState.PendingConfirmTool = &newagentmodel.PendingToolCallSnapshot{
runtimeState.PendingConfirmTool = &agentmodel.PendingToolCallSnapshot{
ToolName: toolCall.Name,
ArgsJSON: argsJSON,
Summary: strings.TrimSpace(decision.Speak),
}
flowState.Phase = newagentmodel.PhaseWaitingConfirm
flowState.Phase = agentmodel.PhaseWaitingConfirm
return nil
}
func handleExecuteActionAbort(
decision *newagentmodel.ExecuteDecision,
flowState *newagentmodel.CommonState,
decision *agentmodel.ExecuteDecision,
flowState *agentmodel.CommonState,
) error {
if decision == nil || decision.Abort == nil {
return fmt.Errorf("abort 动作缺少终止信息")

View File

@@ -1,4 +1,4 @@
package newagentexecute
package agentexecute
import (
"encoding/json"

View File

@@ -1,12 +1,12 @@
package newagentexecute
package agentexecute
import (
"fmt"
"strings"
"time"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
"github.com/cloudwego/eino/schema"
)
@@ -15,7 +15,7 @@ const (
planCurrentStepTitle = "当前步骤"
)
func prepareExecuteNodeInput(input ExecuteNodeInput) (*newagentmodel.AgentRuntimeState, *newagentmodel.ConversationContext, *newagentstream.ChunkEmitter, error) {
func prepareExecuteNodeInput(input ExecuteNodeInput) (*agentmodel.AgentRuntimeState, *agentmodel.ConversationContext, *agentstream.ChunkEmitter, error) {
if input.RuntimeState == nil {
return nil, nil, nil, fmt.Errorf("execute node: runtime state 不能为空")
}
@@ -25,17 +25,17 @@ func prepareExecuteNodeInput(input ExecuteNodeInput) (*newagentmodel.AgentRuntim
input.RuntimeState.EnsureCommonState()
if input.ConversationContext == nil {
input.ConversationContext = newagentmodel.NewConversationContext("")
input.ConversationContext = agentmodel.NewConversationContext("")
}
if input.ChunkEmitter == nil {
input.ChunkEmitter = newagentstream.NewChunkEmitter(newagentstream.NoopPayloadEmitter(), "", "", time.Now().Unix())
input.ChunkEmitter = agentstream.NewChunkEmitter(agentstream.NoopPayloadEmitter(), "", "", time.Now().Unix())
}
return input.RuntimeState, input.ConversationContext, input.ChunkEmitter, nil
}
func syncExecutePinnedContext(
conversationContext *newagentmodel.ConversationContext,
flowState *newagentmodel.CommonState,
conversationContext *agentmodel.ConversationContext,
flowState *agentmodel.CommonState,
) {
if conversationContext == nil || flowState == nil {
return
@@ -43,7 +43,7 @@ func syncExecutePinnedContext(
execContent := buildExecuteContextPinnedMarkdown(flowState)
if strings.TrimSpace(execContent) != "" {
conversationContext.UpsertPinnedBlock(newagentmodel.ContextBlock{
conversationContext.UpsertPinnedBlock(agentmodel.ContextBlock{
Key: executePinnedKey,
Title: "执行上下文",
Content: execContent,
@@ -66,14 +66,14 @@ func syncExecutePinnedContext(
if title == "" {
title = "当前步骤"
}
conversationContext.UpsertPinnedBlock(newagentmodel.ContextBlock{
conversationContext.UpsertPinnedBlock(agentmodel.ContextBlock{
Key: planCurrentStepKey,
Title: title,
Content: buildCurrentPlanStepPinnedMarkdown(step, current, total),
})
}
func appendExecuteStepAdvancedMarker(conversationContext *newagentmodel.ConversationContext) {
func appendExecuteStepAdvancedMarker(conversationContext *agentmodel.ConversationContext) {
if conversationContext == nil {
return
}
@@ -97,7 +97,7 @@ func appendExecuteStepAdvancedMarker(conversationContext *newagentmodel.Conversa
})
}
func buildExecuteContextPinnedMarkdown(flowState *newagentmodel.CommonState) string {
func buildExecuteContextPinnedMarkdown(flowState *agentmodel.CommonState) string {
if flowState == nil {
return ""
}
@@ -128,7 +128,7 @@ func buildExecuteContextPinnedMarkdown(flowState *newagentmodel.CommonState) str
return strings.TrimSpace(strings.Join(lines, "\n"))
}
func buildCurrentPlanStepPinnedMarkdown(step newagentmodel.PlanStep, current, total int) string {
func buildCurrentPlanStepPinnedMarkdown(step agentmodel.PlanStep, current, total int) string {
lines := make([]string, 0, 4)
lines = append(lines, fmt.Sprintf("步骤进度:第 %d/%d 步", current, total))

View File

@@ -1,15 +1,15 @@
package newagentexecute
package agentexecute
import (
"context"
"fmt"
newagentshared "github.com/LoveLosita/smartflow/backend/newAgent/shared"
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
"github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
"github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
)
@@ -26,20 +26,20 @@ const (
)
type ExecuteNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
RuntimeState *agentmodel.AgentRuntimeState
ConversationContext *agentmodel.ConversationContext
UserInput string
Client *llmservice.Client
ChunkEmitter *newagentstream.ChunkEmitter
ChunkEmitter *agentstream.ChunkEmitter
ResumeNode string
ToolRegistry *newagenttools.ToolRegistry
ToolRegistry *agenttools.ToolRegistry
ScheduleState *schedule.ScheduleState
CompactionStore newagentmodel.CompactionStore
WriteSchedulePreview newagentmodel.WriteSchedulePreviewFunc
CompactionStore agentmodel.CompactionStore
WriteSchedulePreview agentmodel.WriteSchedulePreviewFunc
OriginalScheduleState *schedule.ScheduleState
AlwaysExecute bool
ThinkingEnabled bool
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
PersistVisibleMessage agentmodel.PersistVisibleMessageFunc
}
type ExecuteRoundObservation struct {
@@ -114,8 +114,8 @@ func RunExecuteNode(ctx context.Context, input ExecuteNodeInput) error {
return nil
}
messages := newagentprompt.BuildExecuteMessages(flowState, conversationContext)
messages = newagentshared.CompactUnifiedMessagesIfNeeded(ctx, messages, newagentshared.UnifiedCompactInput{
messages := agentprompt.BuildExecuteMessages(flowState, conversationContext)
messages = agentshared.CompactUnifiedMessagesIfNeeded(ctx, messages, agentshared.UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
FlowState: flowState,
@@ -124,7 +124,7 @@ func RunExecuteNode(ctx context.Context, input ExecuteNodeInput) error {
StatusBlockID: executeStatusBlockID,
})
newagentshared.LogNodeLLMContext(executeStageName, "decision", flowState, messages)
agentshared.LogNodeLLMContext(executeStageName, "decision", flowState, messages)
decisionOutput, err := collectExecuteDecisionFromLLM(
ctx,

View File

@@ -1,17 +1,17 @@
package newagentexecute
package agentexecute
import (
"encoding/json"
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
)
func shouldForceFeasibilityNegotiation(
flowState *newagentmodel.CommonState,
registry *newagenttools.ToolRegistry,
flowState *agentmodel.CommonState,
registry *agenttools.ToolRegistry,
toolName string,
) bool {
if flowState == nil || registry == nil {
@@ -26,7 +26,7 @@ func shouldForceFeasibilityNegotiation(
return true
}
func buildInfeasibleNegotiationQuestion(flowState *newagentmodel.CommonState) string {
func buildInfeasibleNegotiationQuestion(flowState *agentmodel.CommonState) string {
capacityGap := 0
reasonCode := "capacity_insufficient"
if flowState != nil {
@@ -42,7 +42,7 @@ func buildInfeasibleNegotiationQuestion(flowState *newagentmodel.CommonState) st
)
}
func buildInfeasibleBlockedResult(flowState *newagentmodel.CommonState) string {
func buildInfeasibleBlockedResult(flowState *agentmodel.CommonState) string {
capacityGap := 0
reasonCode := "capacity_insufficient"
if flowState != nil {
@@ -101,8 +101,8 @@ type upsertTaskClassValidationPart struct {
Issues []string `json:"issues"`
}
func updateActiveToolDomainSnapshot(flowState *newagentmodel.CommonState, toolName string, result string) {
if flowState == nil || !newagenttools.IsContextManagementTool(toolName) {
func updateActiveToolDomainSnapshot(flowState *agentmodel.CommonState, toolName string, result string) {
if flowState == nil || !agenttools.IsContextManagementTool(toolName) {
return
}
@@ -115,17 +115,17 @@ func updateActiveToolDomainSnapshot(flowState *newagentmodel.CommonState, toolNa
}
switch strings.TrimSpace(toolName) {
case newagenttools.ToolNameContextToolsAdd:
domain := newagenttools.NormalizeToolDomain(envelope.Domain)
case agenttools.ToolNameContextToolsAdd:
domain := agenttools.NormalizeToolDomain(envelope.Domain)
if domain == "" {
return
}
nextPacks := newagenttools.ResolveEffectiveToolPacks(domain, envelope.Packs)
nextPacks := agenttools.ResolveEffectiveToolPacks(domain, envelope.Packs)
mode := strings.ToLower(strings.TrimSpace(envelope.Mode))
if mode == "merge" && newagenttools.NormalizeToolDomain(flowState.ActiveToolDomain) == domain {
if mode == "merge" && agenttools.NormalizeToolDomain(flowState.ActiveToolDomain) == domain {
merged := make([]string, 0, len(flowState.ActiveToolPacks)+len(nextPacks))
seen := make(map[string]struct{}, len(flowState.ActiveToolPacks)+len(nextPacks))
current := newagenttools.ResolveEffectiveToolPacks(domain, flowState.ActiveToolPacks)
current := agenttools.ResolveEffectiveToolPacks(domain, flowState.ActiveToolPacks)
for _, pack := range current {
if _, exists := seen[pack]; exists {
continue
@@ -144,29 +144,29 @@ func updateActiveToolDomainSnapshot(flowState *newagentmodel.CommonState, toolNa
}
flowState.ActiveToolDomain = domain
flowState.ActiveToolPacks = nextPacks
case newagenttools.ToolNameContextToolsRemove:
case agenttools.ToolNameContextToolsRemove:
if envelope.All {
flowState.ActiveToolDomain = ""
flowState.ActiveToolPacks = nil
return
}
domain := newagenttools.NormalizeToolDomain(envelope.Domain)
domain := agenttools.NormalizeToolDomain(envelope.Domain)
if domain == "" {
return
}
currentDomain := newagenttools.NormalizeToolDomain(flowState.ActiveToolDomain)
currentDomain := agenttools.NormalizeToolDomain(flowState.ActiveToolDomain)
if currentDomain != domain {
return
}
removedPacks := newagenttools.NormalizeToolPacks(domain, envelope.Packs)
removedPacks := agenttools.NormalizeToolPacks(domain, envelope.Packs)
if len(removedPacks) == 0 {
flowState.ActiveToolDomain = ""
flowState.ActiveToolPacks = nil
return
}
currentEffective := newagenttools.ResolveEffectiveToolPacks(domain, flowState.ActiveToolPacks)
currentEffective := agenttools.ResolveEffectiveToolPacks(domain, flowState.ActiveToolPacks)
if len(currentEffective) == 0 {
flowState.ActiveToolDomain = ""
flowState.ActiveToolPacks = nil
@@ -193,7 +193,7 @@ func updateActiveToolDomainSnapshot(flowState *newagentmodel.CommonState, toolNa
}
}
func updateHealthFeasibilitySnapshot(flowState *newagentmodel.CommonState, toolName string, result string) {
func updateHealthFeasibilitySnapshot(flowState *agentmodel.CommonState, toolName string, result string) {
if flowState == nil || !strings.EqualFold(strings.TrimSpace(toolName), toolAnalyzeHealth) {
return
}
@@ -217,7 +217,7 @@ func updateHealthFeasibilitySnapshot(flowState *newagentmodel.CommonState, toolN
flowState.HealthReasonCode = strings.TrimSpace(envelope.Feasibility.ReasonCode)
}
func updateTaskClassUpsertSnapshot(flowState *newagentmodel.CommonState, toolName string, result string) {
func updateTaskClassUpsertSnapshot(flowState *agentmodel.CommonState, toolName string, result string) {
if flowState == nil || !strings.EqualFold(strings.TrimSpace(toolName), "upsert_task_class") {
return
}
@@ -274,7 +274,7 @@ func uniqueNonEmptyStrings(values []string) []string {
return result
}
func updateHealthSnapshotV2(flowState *newagentmodel.CommonState, toolName string, result string) {
func updateHealthSnapshotV2(flowState *agentmodel.CommonState, toolName string, result string) {
if flowState == nil || !strings.EqualFold(strings.TrimSpace(toolName), toolAnalyzeHealth) {
return
}

View File

@@ -1,27 +1,27 @@
package newagentexecute
package agentexecute
import (
"context"
"encoding/json"
"fmt"
newagentshared "github.com/LoveLosita/smartflow/backend/newAgent/shared"
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
"log"
"regexp"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
"github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
"github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
)
func appendToolCallResultHistory(
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
toolName string,
args map[string]any,
result newagenttools.ToolExecutionResult,
result agenttools.ToolExecutionResult,
) {
if conversationContext == nil {
return
@@ -58,13 +58,13 @@ func appendToolCallResultHistory(
func executeToolCall(
ctx context.Context,
flowState *newagentmodel.CommonState,
conversationContext *newagentmodel.ConversationContext,
toolCall *newagentmodel.ToolCallIntent,
emitter *newagentstream.ChunkEmitter,
registry *newagenttools.ToolRegistry,
flowState *agentmodel.CommonState,
conversationContext *agentmodel.ConversationContext,
toolCall *agentmodel.ToolCallIntent,
emitter *agentstream.ChunkEmitter,
registry *agenttools.ToolRegistry,
scheduleState *schedule.ScheduleState,
writePreview newagentmodel.WriteSchedulePreviewFunc,
writePreview agentmodel.WriteSchedulePreviewFunc,
) error {
if toolCall == nil {
return nil
@@ -99,10 +99,10 @@ func executeToolCall(
flowState.ConsecutiveCorrections, toolName)
}
blockedText := buildTemporarilyDisabledToolResult(toolName)
blockedResult := newagenttools.BlockedResult(toolName, toolCall.Arguments, blockedText, "tool_temporarily_disabled", blockedText)
blockedResult := agenttools.BlockedResult(toolName, toolCall.Arguments, blockedText, "tool_temporarily_disabled", blockedText)
emitToolCallResultEvent(emitter, executeStatusBlockID, executeStageName, blockedResult, toolCall.Arguments)
appendToolCallResultHistory(conversationContext, toolName, toolCall.Arguments, blockedResult)
newagentshared.AppendLLMCorrectionWithHint(
agentshared.AppendLLMCorrectionWithHint(
conversationContext,
"",
fmt.Sprintf("工具 %q 当前暂时禁用。", toolName),
@@ -119,7 +119,7 @@ func executeToolCall(
log.Printf("[WARN] execute 工具名不合法 chat=%s round=%d tool=%s consecutive=%d/%d available=%v",
flowState.ConversationID, flowState.RoundUsed, toolName,
flowState.ConsecutiveCorrections, maxConsecutiveCorrections, registry.ToolNames())
newagentshared.AppendLLMCorrectionWithHint(
agentshared.AppendLLMCorrectionWithHint(
conversationContext,
"",
fmt.Sprintf("你调用的工具 %q 不存在。", toolName),
@@ -134,21 +134,21 @@ func executeToolCall(
flowState.ConsecutiveCorrections,
toolName,
flowState.ActiveToolDomain,
newagenttools.ResolveEffectiveToolPacks(flowState.ActiveToolDomain, flowState.ActiveToolPacks))
agenttools.ResolveEffectiveToolPacks(flowState.ActiveToolDomain, flowState.ActiveToolPacks))
}
addHint := `请先调用 context_tools_add 激活目标工具域后再继续。`
if flowState != nil && flowState.ActiveOptimizeOnly {
addHint = `当前处于“粗排后主动优化专用模式”,只允许使用 analyze_health、move、swap不要再尝试 query_target_tasks / query_available_slots 等全窗搜索工具。`
} else if domain, pack, ok := newagenttools.ResolveToolDomainPack(toolName); ok {
if newagenttools.IsFixedToolPack(domain, pack) {
} else if domain, pack, ok := agenttools.ResolveToolDomainPack(toolName); ok {
if agenttools.IsFixedToolPack(domain, pack) {
addHint = fmt.Sprintf(`请先调用 context_tools_add参数 domain="%s"。`, domain)
} else {
addHint = fmt.Sprintf(`请先调用 context_tools_add参数 domain="%s", packs=["%s"]。`, domain, pack)
}
}
newagentshared.AppendLLMCorrectionWithHint(
agentshared.AppendLLMCorrectionWithHint(
conversationContext,
"",
fmt.Sprintf("你调用的工具 %q 当前不在已激活工具域内。", toolName),
@@ -159,7 +159,7 @@ func executeToolCall(
if shouldForceFeasibilityNegotiation(flowState, registry, toolName) {
blockedText := buildInfeasibleBlockedResult(flowState)
blockedResult := newagenttools.BlockedResult(toolName, toolCall.Arguments, blockedText, "health_negotiation_required", blockedText)
blockedResult := agenttools.BlockedResult(toolName, toolCall.Arguments, blockedText, "health_negotiation_required", blockedText)
emitToolCallResultEvent(emitter, executeStatusBlockID, executeStageName, blockedResult, toolCall.Arguments)
appendToolCallResultHistory(conversationContext, toolName, toolCall.Arguments, blockedResult)
return nil
@@ -173,7 +173,7 @@ func executeToolCall(
toolCall.Arguments["_user_id"] = flowState.UserID
}
result := registry.Execute(scheduleState, toolName, toolCall.Arguments)
result = newagenttools.EnsureToolResultDefaults(result, toolCall.Arguments)
result = agenttools.EnsureToolResultDefaults(result, toolCall.Arguments)
updateHealthSnapshotV2(flowState, toolName, result.ObservationText)
updateTaskClassUpsertSnapshot(flowState, toolName, result.ObservationText)
updateActiveToolDomainSnapshot(flowState, toolName, result.ObservationText)
@@ -202,24 +202,24 @@ func executeToolCall(
return nil
}
func applyPendingContextHook(flowState *newagentmodel.CommonState) {
func applyPendingContextHook(flowState *agentmodel.CommonState) {
if flowState == nil || flowState.PendingContextHook == nil {
return
}
hook := flowState.PendingContextHook
domain := newagenttools.NormalizeToolDomain(hook.Domain)
domain := agenttools.NormalizeToolDomain(hook.Domain)
if domain == "" {
flowState.PendingContextHook = nil
return
}
flowState.ActiveToolDomain = domain
flowState.ActiveToolPacks = newagenttools.ResolveEffectiveToolPacks(domain, hook.Packs)
flowState.ActiveToolPacks = agenttools.ResolveEffectiveToolPacks(domain, hook.Packs)
flowState.PendingContextHook = nil
}
func isToolVisibleForCurrentExecuteMode(
flowState *newagentmodel.CommonState,
registry *newagenttools.ToolRegistry,
flowState *agentmodel.CommonState,
registry *agenttools.ToolRegistry,
toolName string,
) bool {
if registry == nil {
@@ -234,7 +234,7 @@ func isToolVisibleForCurrentExecuteMode(
if !registry.IsToolVisibleInDomain(activeDomain, activePacks, toolName) {
return false
}
if flowState != nil && flowState.ActiveOptimizeOnly && !newagenttools.IsToolAllowedInActiveOptimize(toolName) {
if flowState != nil && flowState.ActiveOptimizeOnly && !agenttools.IsToolAllowedInActiveOptimize(toolName) {
return false
}
return true
@@ -246,13 +246,13 @@ func buildTemporarilyDisabledToolResult(toolName string) string {
func executePendingTool(
ctx context.Context,
runtimeState *newagentmodel.AgentRuntimeState,
conversationContext *newagentmodel.ConversationContext,
registry *newagenttools.ToolRegistry,
runtimeState *agentmodel.AgentRuntimeState,
conversationContext *agentmodel.ConversationContext,
registry *agenttools.ToolRegistry,
scheduleState *schedule.ScheduleState,
originalState *schedule.ScheduleState,
writePreview newagentmodel.WriteSchedulePreviewFunc,
emitter *newagentstream.ChunkEmitter,
writePreview agentmodel.WriteSchedulePreviewFunc,
emitter *agentstream.ChunkEmitter,
) error {
pending := runtimeState.PendingConfirmTool
if pending == nil {
@@ -281,7 +281,7 @@ func executePendingTool(
flowState := runtimeState.EnsureCommonState()
if registry.IsToolTemporarilyDisabled(pending.ToolName) {
blockedText := buildTemporarilyDisabledToolResult(pending.ToolName)
blockedResult := newagenttools.BlockedResult(pending.ToolName, args, blockedText, "tool_temporarily_disabled", blockedText)
blockedResult := agenttools.BlockedResult(pending.ToolName, args, blockedText, "tool_temporarily_disabled", blockedText)
emitToolCallResultEvent(emitter, executeStatusBlockID, executeStageName, blockedResult, args)
appendToolCallResultHistory(conversationContext, pending.ToolName, args, blockedResult)
runtimeState.PendingConfirmTool = nil
@@ -290,7 +290,7 @@ func executePendingTool(
if shouldForceFeasibilityNegotiation(flowState, registry, pending.ToolName) {
blockedText := buildInfeasibleBlockedResult(flowState)
blockedResult := newagenttools.BlockedResult(pending.ToolName, args, blockedText, "health_negotiation_required", blockedText)
blockedResult := agenttools.BlockedResult(pending.ToolName, args, blockedText, "health_negotiation_required", blockedText)
emitToolCallResultEvent(emitter, executeStatusBlockID, executeStageName, blockedResult, args)
appendToolCallResultHistory(conversationContext, pending.ToolName, args, blockedResult)
runtimeState.PendingConfirmTool = nil
@@ -305,7 +305,7 @@ func executePendingTool(
args["_user_id"] = flowState.UserID
}
result := registry.Execute(scheduleState, pending.ToolName, args)
result = newagenttools.EnsureToolResultDefaults(result, args)
result = agenttools.EnsureToolResultDefaults(result, args)
updateHealthSnapshotV2(flowState, pending.ToolName, result.ObservationText)
updateTaskClassUpsertSnapshot(flowState, pending.ToolName, result.ObservationText)
updateActiveToolDomainSnapshot(flowState, pending.ToolName, result.ObservationText)
@@ -338,11 +338,11 @@ func executePendingTool(
func tryWritePreviewAfterWriteTool(
ctx context.Context,
flowState *newagentmodel.CommonState,
flowState *agentmodel.CommonState,
scheduleState *schedule.ScheduleState,
registry *newagenttools.ToolRegistry,
registry *agenttools.ToolRegistry,
toolName string,
writePreview newagentmodel.WriteSchedulePreviewFunc,
writePreview agentmodel.WriteSchedulePreviewFunc,
) {
if flowState == nil || scheduleState == nil || registry == nil || writePreview == nil {
return
@@ -394,16 +394,16 @@ func buildExecuteNormalizedSpeakTail(streamed, normalized string) string {
}
func emitToolCallResultEvent(
emitter *newagentstream.ChunkEmitter,
emitter *agentstream.ChunkEmitter,
blockID string,
stage string,
result newagenttools.ToolExecutionResult,
result agenttools.ToolExecutionResult,
args map[string]any,
) {
if emitter == nil {
return
}
result = newagenttools.EnsureToolResultDefaults(result, args)
result = agenttools.EnsureToolResultDefaults(result, args)
_ = emitter.EmitToolCallResult(
blockID,
stage,
@@ -411,8 +411,8 @@ func emitToolCallResultEvent(
result.Status,
result.Summary,
result.ArgumentsPreview,
newagenttools.ToolArgumentViewToMap(result.ArgumentView),
newagenttools.ToolDisplayViewToMap(result.ResultView),
agenttools.ToolArgumentViewToMap(result.ArgumentView),
agenttools.ToolDisplayViewToMap(result.ResultView),
false,
)
}

View File

@@ -1,4 +1,4 @@
package newagentexecute
package agentexecute
import (
"encoding/json"
@@ -6,7 +6,7 @@ import (
"strconv"
"strings"
"github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
"github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
)
func summarizeScheduleStateForDebug(state *schedule.ScheduleState) string {

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"context"
@@ -7,8 +7,8 @@ import (
"github.com/cloudwego/eino/schema"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
)
const (
@@ -24,10 +24,10 @@ const (
// 2. RuntimeState 提供 PendingInteraction
// 3. ChunkEmitter 负责推送收尾消息。
type InterruptNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
ChunkEmitter *newagentstream.ChunkEmitter
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
RuntimeState *agentmodel.AgentRuntimeState
ConversationContext *agentmodel.ConversationContext
ChunkEmitter *agentstream.ChunkEmitter
PersistVisibleMessage agentmodel.PersistVisibleMessageFunc
}
// RunInterruptNode 执行一轮中断节点逻辑。
@@ -55,9 +55,9 @@ func RunInterruptNode(ctx context.Context, input InterruptNodeInput) error {
}
switch pending.Type {
case newagentmodel.PendingInteractionTypeAskUser:
case agentmodel.PendingInteractionTypeAskUser:
return handleInterruptAskUser(ctx, runtimeState, input.PersistVisibleMessage, pending, conversationContext, emitter)
case newagentmodel.PendingInteractionTypeConfirm:
case agentmodel.PendingInteractionTypeConfirm:
return handleInterruptConfirm(pending, emitter)
default:
// connection_lost 等其他类型 → 仅持久化,不输出。
@@ -71,19 +71,19 @@ func RunInterruptNode(ctx context.Context, input InterruptNodeInput) error {
// 写入历史,然后结束。用户体验和正常对话一样 — 助手问了问题,停下来等回复。
func handleInterruptAskUser(
ctx context.Context,
runtimeState *newagentmodel.AgentRuntimeState,
persist newagentmodel.PersistVisibleMessageFunc,
pending *newagentmodel.PendingInteraction,
conversationContext *newagentmodel.ConversationContext,
emitter *newagentstream.ChunkEmitter,
runtimeState *agentmodel.AgentRuntimeState,
persist agentmodel.PersistVisibleMessageFunc,
pending *agentmodel.PendingInteraction,
conversationContext *agentmodel.ConversationContext,
emitter *agentstream.ChunkEmitter,
) error {
text := pending.DisplayText
if text == "" {
text = "请补充更多信息。"
}
speakStreamed := readPendingMetadataBool(pending, newagentmodel.PendingMetaAskUserSpeakStreamed)
historyAppended := readPendingMetadataBool(pending, newagentmodel.PendingMetaAskUserHistoryAppended)
speakStreamed := readPendingMetadataBool(pending, agentmodel.PendingMetaAskUserSpeakStreamed)
historyAppended := readPendingMetadataBool(pending, agentmodel.PendingMetaAskUserHistoryAppended)
// 1. 若上游节点已流式推送过 ask_user 文本,则这里跳过二次正文推送;
// 2. 这样既保留 interrupt 的统一收口状态,又避免前端出现重复气泡。
@@ -92,7 +92,7 @@ func handleInterruptAskUser(
if err := emitter.EmitPseudoAssistantText(
ctx, interruptSpeakBlockID, interruptStageName,
text,
newagentstream.DefaultPseudoStreamOptions(),
agentstream.DefaultPseudoStreamOptions(),
); err != nil {
return fmt.Errorf("追问消息推送失败: %w", err)
}
@@ -114,7 +114,7 @@ func handleInterruptAskUser(
return nil
}
func readPendingMetadataBool(pending *newagentmodel.PendingInteraction, key string) bool {
func readPendingMetadataBool(pending *agentmodel.PendingInteraction, key string) bool {
if pending == nil || pending.Metadata == nil {
return false
}
@@ -133,8 +133,8 @@ func readPendingMetadataBool(pending *newagentmodel.PendingInteraction, key stri
//
// 确认卡片已由 confirm 节点推送,这里只需推送状态通知并持久化。
func handleInterruptConfirm(
pending *newagentmodel.PendingInteraction,
emitter *newagentstream.ChunkEmitter,
pending *agentmodel.PendingInteraction,
emitter *agentstream.ChunkEmitter,
) error {
// 状态持久化已由 agent_nodes 层统一处理,此处不再需要自行存快照。
@@ -147,8 +147,8 @@ func handleInterruptConfirm(
// handleInterruptDefault 处理其他类型的中断(如 connection_lost
func handleInterruptDefault(
pending *newagentmodel.PendingInteraction,
emitter *newagentstream.ChunkEmitter,
pending *agentmodel.PendingInteraction,
emitter *agentstream.ChunkEmitter,
) error {
// 状态持久化已由 agent_nodes 层统一处理,此处不再需要自行存快照。
@@ -161,9 +161,9 @@ func handleInterruptDefault(
// prepareInterruptNodeInput 校验并准备中断节点的运行态依赖。
func prepareInterruptNodeInput(input InterruptNodeInput) (
*newagentmodel.AgentRuntimeState,
*newagentmodel.ConversationContext,
*newagentstream.ChunkEmitter,
*agentmodel.AgentRuntimeState,
*agentmodel.ConversationContext,
*agentstream.ChunkEmitter,
error,
) {
if input.RuntimeState == nil {
@@ -171,11 +171,11 @@ func prepareInterruptNodeInput(input InterruptNodeInput) (
}
input.RuntimeState.EnsureCommonState()
if input.ConversationContext == nil {
input.ConversationContext = newagentmodel.NewConversationContext("")
input.ConversationContext = agentmodel.NewConversationContext("")
}
if input.ChunkEmitter == nil {
input.ChunkEmitter = newagentstream.NewChunkEmitter(
newagentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
input.ChunkEmitter = agentstream.NewChunkEmitter(
agentstream.NoopPayloadEmitter(), "", "", time.Now().Unix(),
)
}
return input.RuntimeState, input.ConversationContext, input.ChunkEmitter, nil

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"encoding/json"
@@ -6,7 +6,7 @@ import (
"log"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -19,7 +19,7 @@ import (
func logNodeLLMContext(
stage string,
phase string,
flowState *newagentmodel.CommonState,
flowState *agentmodel.CommonState,
messages []*schema.Message,
) {
chatID := ""

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"context"
@@ -10,10 +10,10 @@ import (
"github.com/google/uuid"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentrouter "github.com/LoveLosita/smartflow/backend/newAgent/router"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentrouter "github.com/LoveLosita/smartflow/backend/services/agent/router"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
)
@@ -31,16 +31,16 @@ const (
// PlanNodeInput 描述单轮规划节点执行所需的最小依赖。
type PlanNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
RuntimeState *agentmodel.AgentRuntimeState
ConversationContext *agentmodel.ConversationContext
UserInput string
Client *llmservice.Client
ChunkEmitter *newagentstream.ChunkEmitter
ChunkEmitter *agentstream.ChunkEmitter
ResumeNode string
AlwaysExecute bool // true 时计划生成后自动确认,不进入 confirm 节点
ThinkingEnabled bool // 是否开启 thinking由 config.yaml 的 agent.thinking.plan 注入
CompactionStore newagentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
CompactionStore agentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage agentmodel.PersistVisibleMessageFunc
}
// RunPlanNode 执行一轮规划节点逻辑。
@@ -72,7 +72,7 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
}
// 2. 构造本轮规划输入。
messages := newagentprompt.BuildPlanMessages(flowState, conversationContext, input.UserInput)
messages := agentprompt.BuildPlanMessages(flowState, conversationContext, input.UserInput)
messages = compactUnifiedMessagesIfNeeded(ctx, messages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
@@ -103,7 +103,7 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
return fmt.Errorf("规划阶段 Stream 调用失败: %w", err)
}
parser := newagentrouter.NewStreamDecisionParser()
parser := agentrouter.NewStreamDecisionParser()
firstChunk := true
speakStreamed := false
reasoningDigestor, digestorErr := emitter.NewReasoningDigestor(ctx, planSpeakBlockID, planStageName)
@@ -149,7 +149,7 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
return fmt.Errorf("规划解析失败,原始输出=%s", result.RawBuffer)
}
decision, parseErr := llmservice.ParseJSONObject[newagentmodel.PlanDecision](result.DecisionJSON)
decision, parseErr := llmservice.ParseJSONObject[agentmodel.PlanDecision](result.DecisionJSON)
if parseErr != nil {
return fmt.Errorf("规划决策 JSON 解析失败: %w (raw=%s)", parseErr, result.RawBuffer)
}
@@ -202,7 +202,7 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
decision.Speak = fullText.String()
// 4. 若有 speak 且不是 ask_userask_user 交给 interrupt 收口),写入历史。
if strings.TrimSpace(decision.Speak) != "" && decision.Action != newagentmodel.PlanActionAskUser {
if strings.TrimSpace(decision.Speak) != "" && decision.Action != agentmodel.PlanActionAskUser {
msg := schema.AssistantMessage(decision.Speak, nil)
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
@@ -220,26 +220,26 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
func handlePlanAction(
ctx context.Context,
input PlanNodeInput,
runtimeState *newagentmodel.AgentRuntimeState,
conversationContext *newagentmodel.ConversationContext,
emitter *newagentstream.ChunkEmitter,
flowState *newagentmodel.CommonState,
decision *newagentmodel.PlanDecision,
runtimeState *agentmodel.AgentRuntimeState,
conversationContext *agentmodel.ConversationContext,
emitter *agentstream.ChunkEmitter,
flowState *agentmodel.CommonState,
decision *agentmodel.PlanDecision,
askUserSpeakStreamed bool,
) error {
switch decision.Action {
case newagentmodel.PlanActionContinue:
flowState.Phase = newagentmodel.PhasePlanning
case agentmodel.PlanActionContinue:
flowState.Phase = agentmodel.PhasePlanning
return nil
case newagentmodel.PlanActionAskUser:
case agentmodel.PlanActionAskUser:
question := resolvePlanAskUserText(decision)
runtimeState.OpenAskUserInteraction(uuid.NewString(), question, strings.TrimSpace(input.ResumeNode))
// 1. plan 阶段若已流式推送过 ask_user 文本interrupt 侧应避免重复正文输出;
// 2. plan 阶段 ask_user 不会提前写入 history这里显式标记为 false。
runtimeState.SetPendingInteractionMetadata(newagentmodel.PendingMetaAskUserSpeakStreamed, askUserSpeakStreamed)
runtimeState.SetPendingInteractionMetadata(newagentmodel.PendingMetaAskUserHistoryAppended, false)
runtimeState.SetPendingInteractionMetadata(agentmodel.PendingMetaAskUserSpeakStreamed, askUserSpeakStreamed)
runtimeState.SetPendingInteractionMetadata(agentmodel.PendingMetaAskUserHistoryAppended, false)
return nil
case newagentmodel.PlanActionDone:
case agentmodel.PlanActionDone:
flowState.FinishPlan(decision.PlanSteps)
flowState.PendingContextHook = clonePlanContextHook(decision.ContextHook)
writePlanPinnedBlocks(conversationContext, decision.PlanSteps)
@@ -259,7 +259,7 @@ func handlePlanAction(
planSummaryBlockID,
planStageName,
summary,
newagentstream.DefaultPseudoStreamOptions(),
agentstream.DefaultPseudoStreamOptions(),
); err != nil {
return fmt.Errorf("自动执行前计划摘要推送失败: %w", err)
}
@@ -292,7 +292,7 @@ func handlePlanAction(
}
}
func preparePlanNodeInput(input PlanNodeInput) (*newagentmodel.AgentRuntimeState, *newagentmodel.ConversationContext, *newagentstream.ChunkEmitter, error) {
func preparePlanNodeInput(input PlanNodeInput) (*agentmodel.AgentRuntimeState, *agentmodel.ConversationContext, *agentstream.ChunkEmitter, error) {
if input.RuntimeState == nil {
return nil, nil, nil, fmt.Errorf("plan node: runtime state 不能为空")
}
@@ -302,15 +302,15 @@ func preparePlanNodeInput(input PlanNodeInput) (*newagentmodel.AgentRuntimeState
input.RuntimeState.EnsureCommonState()
if input.ConversationContext == nil {
input.ConversationContext = newagentmodel.NewConversationContext("")
input.ConversationContext = agentmodel.NewConversationContext("")
}
if input.ChunkEmitter == nil {
input.ChunkEmitter = newagentstream.NewChunkEmitter(newagentstream.NoopPayloadEmitter(), "", "", time.Now().Unix())
input.ChunkEmitter = agentstream.NewChunkEmitter(agentstream.NoopPayloadEmitter(), "", "", time.Now().Unix())
}
return input.RuntimeState, input.ConversationContext, input.ChunkEmitter, nil
}
func resolvePlanAskUserText(decision *newagentmodel.PlanDecision) string {
func resolvePlanAskUserText(decision *agentmodel.PlanDecision) string {
if decision == nil {
return "我还缺一点关键信息,想先向你确认一下。"
}
@@ -323,7 +323,7 @@ func resolvePlanAskUserText(decision *newagentmodel.PlanDecision) string {
return "我还缺一点关键信息,想先向你确认一下。"
}
func clonePlanContextHook(hook *newagentmodel.ContextHook) *newagentmodel.ContextHook {
func clonePlanContextHook(hook *agentmodel.ContextHook) *agentmodel.ContextHook {
if hook == nil {
return nil
}
@@ -338,14 +338,14 @@ func clonePlanContextHook(hook *newagentmodel.ContextHook) *newagentmodel.Contex
return &cloned
}
func writePlanPinnedBlocks(ctx *newagentmodel.ConversationContext, steps []newagentmodel.PlanStep) {
func writePlanPinnedBlocks(ctx *agentmodel.ConversationContext, steps []agentmodel.PlanStep) {
if ctx == nil {
return
}
fullPlanText := buildPinnedPlanText(steps)
if strings.TrimSpace(fullPlanText) != "" {
ctx.UpsertPinnedBlock(newagentmodel.ContextBlock{
ctx.UpsertPinnedBlock(agentmodel.ContextBlock{
Key: planPinnedKey,
Title: planFullPlanTitle,
Content: fullPlanText,
@@ -360,14 +360,14 @@ func writePlanPinnedBlocks(ctx *newagentmodel.ConversationContext, steps []newag
if strings.TrimSpace(steps[0].DoneWhen) != "" {
firstStep = fmt.Sprintf("%s\n完成判定%s", firstStep, strings.TrimSpace(steps[0].DoneWhen))
}
ctx.UpsertPinnedBlock(newagentmodel.ContextBlock{
ctx.UpsertPinnedBlock(agentmodel.ContextBlock{
Key: planCurrentStepKey,
Title: planCurrentStepTitle,
Content: firstStep,
})
}
func buildPinnedPlanText(steps []newagentmodel.PlanStep) string {
func buildPinnedPlanText(steps []agentmodel.PlanStep) string {
if len(steps) == 0 {
return ""
}

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"context"
@@ -9,11 +9,11 @@ import (
"time"
taskmodel "github.com/LoveLosita/smartflow/backend/model"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentrouter "github.com/LoveLosita/smartflow/backend/newAgent/router"
newagentshared "github.com/LoveLosita/smartflow/backend/newAgent/shared"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentrouter "github.com/LoveLosita/smartflow/backend/services/agent/router"
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
)
@@ -27,13 +27,13 @@ const (
// QuickTaskNodeInput 描述快捷任务节点的输入。
type QuickTaskNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
RuntimeState *agentmodel.AgentRuntimeState
ConversationContext *agentmodel.ConversationContext
UserInput string
Client *llmservice.Client
ChunkEmitter *newagentstream.ChunkEmitter
QuickTaskDeps newagentmodel.QuickTaskDeps
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
ChunkEmitter *agentstream.ChunkEmitter
QuickTaskDeps agentmodel.QuickTaskDeps
PersistVisibleMessage agentmodel.PersistVisibleMessageFunc
}
// quickTaskDecision 是从 LLM 输出中解析的结构化意图。
@@ -65,7 +65,7 @@ type quickTaskDecision struct {
// 3. 不负责直接发射,发射时机由 RunQuickTaskNode 统一控制。
type quickTaskActionResult struct {
AssistantText string
BusinessCard *newagentstream.StreamBusinessCardExtra
BusinessCard *agentstream.StreamBusinessCardExtra
}
// RunQuickTaskNode 执行快捷任务节点:流式 LLM 提取意图 → 直接调 service → 追加结果。
@@ -74,7 +74,7 @@ func RunQuickTaskNode(ctx context.Context, input QuickTaskNodeInput) error {
emitter := input.ChunkEmitter
// 1. 构造 messages。
messages := newagentprompt.BuildQuickTaskMessagesSimple(input.UserInput)
messages := agentprompt.BuildQuickTaskMessagesSimple(input.UserInput)
// 2. 真流式调用 LLM。
reader, err := input.Client.Stream(ctx, messages, llmservice.GenerateOptions{
@@ -84,12 +84,12 @@ func RunQuickTaskNode(ctx context.Context, input QuickTaskNodeInput) error {
if err != nil {
log.Printf("[WARN] quick_task: Stream 调用失败 chat=%s err=%v", flowState.ConversationID, err)
_ = emitter.EmitAssistantText(quickTaskBlockID, quickTaskStageName, "抱歉,处理任务时出了点问题,请重试。", true)
flowState.Phase = newagentmodel.PhaseDone
flowState.Phase = agentmodel.PhaseDone
return nil
}
// 3. 两阶段流式解析。
parser := newagentrouter.NewStreamDecisionParser()
parser := agentrouter.NewStreamDecisionParser()
firstChunk := true
var decision *quickTaskDecision
var fullText strings.Builder
@@ -181,7 +181,7 @@ func RunQuickTaskNode(ctx context.Context, input QuickTaskNodeInput) error {
msg := schema.AssistantMessage(finalText, nil)
input.ConversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
flowState.Phase = newagentmodel.PhaseDone
flowState.Phase = agentmodel.PhaseDone
return nil
}
@@ -235,7 +235,7 @@ func RunQuickTaskNode(ctx context.Context, input QuickTaskNodeInput) error {
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
flowState.Phase = newagentmodel.PhaseDone
flowState.Phase = agentmodel.PhaseDone
return nil
}
@@ -244,7 +244,7 @@ func handleQuickTaskCreate(
ctx context.Context,
input QuickTaskNodeInput,
decision *quickTaskDecision,
flowState *newagentmodel.CommonState,
flowState *agentmodel.CommonState,
) quickTaskActionResult {
_ = ctx
title := strings.TrimSpace(decision.Title)
@@ -254,7 +254,7 @@ func handleQuickTaskCreate(
var deadline *time.Time
if raw := strings.TrimSpace(decision.DeadlineAt); raw != "" {
parsed, err := newagentshared.ParseOptionalDeadline(raw)
parsed, err := agentshared.ParseOptionalDeadline(raw)
if err != nil {
return quickTaskActionResult{AssistantText: fmt.Sprintf("截止时间格式不太对(%s不过我先把任务记下来啦。", err)}
}
@@ -262,7 +262,7 @@ func handleQuickTaskCreate(
}
priorityGroup := 0
if decision.PriorityGroup != nil && newagentshared.IsValidTaskPriority(*decision.PriorityGroup) {
if decision.PriorityGroup != nil && agentshared.IsValidTaskPriority(*decision.PriorityGroup) {
priorityGroup = *decision.PriorityGroup
}
if priorityGroup == 0 {
@@ -272,7 +272,7 @@ func handleQuickTaskCreate(
var urgencyThreshold *time.Time
if raw := strings.TrimSpace(decision.UrgencyThresholdAt); raw != "" {
parsed, err := newagentshared.ParseOptionalDeadline(raw)
parsed, err := agentshared.ParseOptionalDeadline(raw)
if err == nil {
urgencyThreshold = parsed
}
@@ -302,9 +302,9 @@ func handleQuickTaskQuery(
ctx context.Context,
input QuickTaskNodeInput,
decision *quickTaskDecision,
flowState *newagentmodel.CommonState,
flowState *agentmodel.CommonState,
) quickTaskActionResult {
params := newagentmodel.TaskQueryParams{
params := agentmodel.TaskQueryParams{
SortBy: "deadline",
Order: "asc",
Limit: 5,
@@ -354,13 +354,13 @@ func handleQuickTaskQuery(
}
}
func buildTaskRecordBusinessCard(taskID int, title string, priorityGroup int, estimatedSections int, deadline *time.Time, urgencyThreshold *time.Time) *newagentstream.StreamBusinessCardExtra {
func buildTaskRecordBusinessCard(taskID int, title string, priorityGroup int, estimatedSections int, deadline *time.Time, urgencyThreshold *time.Time) *agentstream.StreamBusinessCardExtra {
data := map[string]any{
"id": taskID,
"title": strings.TrimSpace(title),
"priority_group": priorityGroup,
"estimated_sections": estimatedSections,
"priority_label": newagentshared.PriorityLabelCN(priorityGroup),
"priority_label": agentshared.PriorityLabelCN(priorityGroup),
"status": "todo",
}
if formatted := formatQuickTaskTime(deadline); formatted != "" {
@@ -374,7 +374,7 @@ func buildTaskRecordBusinessCard(taskID int, title string, priorityGroup int, es
// 1. quick_task 当前只有 action=create未显式区分“随口记 / 正式创建任务”;
// 2. 仅凭当前 prompt 决策无法稳定判断 source=create_task会引入误判
// 3. 本轮按最小安全口径固定为 quick_note等后续补稳定判别字段再切分。
return &newagentstream.StreamBusinessCardExtra{
return &agentstream.StreamBusinessCardExtra{
CardType: "task_record",
Title: "已帮你记下",
Summary: "一条轻量提醒已写入任务系统",
@@ -383,7 +383,7 @@ func buildTaskRecordBusinessCard(taskID int, title string, priorityGroup int, es
}
}
func buildTaskQueryBusinessCard(params newagentmodel.TaskQueryParams, results []newagentmodel.TaskQueryResult) *newagentstream.StreamBusinessCardExtra {
func buildTaskQueryBusinessCard(params agentmodel.TaskQueryParams, results []agentmodel.TaskQueryResult) *agentstream.StreamBusinessCardExtra {
taskItems := make([]map[string]any, 0, len(results))
for _, task := range results {
item := map[string]any{
@@ -391,7 +391,7 @@ func buildTaskQueryBusinessCard(params newagentmodel.TaskQueryParams, results []
"title": strings.TrimSpace(task.Title),
"priority_group": task.PriorityGroup,
"estimated_sections": task.EstimatedSections,
"priority_label": newagentshared.PriorityLabelCN(task.PriorityGroup),
"priority_label": agentshared.PriorityLabelCN(task.PriorityGroup),
"is_completed": task.IsCompleted,
}
if deadline := strings.TrimSpace(task.DeadlineAt); deadline != "" {
@@ -419,7 +419,7 @@ func buildTaskQueryBusinessCard(params newagentmodel.TaskQueryParams, results []
data["query_summary"] = querySummary
}
return &newagentstream.StreamBusinessCardExtra{
return &agentstream.StreamBusinessCardExtra{
CardType: "task_query",
Title: title,
Summary: querySummary,
@@ -446,10 +446,10 @@ func buildTaskQueryFilter(key string, label string, value any, operator string,
return filter
}
func buildTaskQueryFilters(params newagentmodel.TaskQueryParams) []map[string]any {
func buildTaskQueryFilters(params agentmodel.TaskQueryParams) []map[string]any {
filters := make([]map[string]any, 0, 6)
if params.Quadrant != nil && *params.Quadrant >= 1 && *params.Quadrant <= 4 {
label := newagentshared.PriorityLabelCN(*params.Quadrant)
label := agentshared.PriorityLabelCN(*params.Quadrant)
filters = append(filters, buildTaskQueryFilter(
"quadrant",
"象限",
@@ -548,12 +548,12 @@ func buildTaskQuerySummary(filters []map[string]any) string {
// 1. 只负责把 query 的 deadline_after/deadline_before 文本解析成时间;
// 2. 解析失败时仅记录日志并返回 nil不中断查询主链路
// 3. 不负责时间窗合法性校验(如 before<=after该校验由调用方统一处理。
func parseQuickTaskQueryDeadlineBoundary(raw string, field string, flowState *newagentmodel.CommonState) *time.Time {
func parseQuickTaskQueryDeadlineBoundary(raw string, field string, flowState *agentmodel.CommonState) *time.Time {
value := strings.TrimSpace(raw)
if value == "" {
return nil
}
parsed, err := newagentshared.ParseOptionalDeadline(value)
parsed, err := agentshared.ParseOptionalDeadline(value)
if err != nil {
chatID := ""
if flowState != nil {
@@ -569,16 +569,16 @@ func formatQuickTaskTime(t *time.Time) string {
if t == nil {
return ""
}
return t.In(newagentshared.ShanghaiLocation()).Format("2006-01-02 15:04")
return t.In(agentshared.ShanghaiLocation()).Format("2006-01-02 15:04")
}
// quickNoteFallbackPriority 根据截止时间推断默认优先级。
func quickNoteFallbackPriority(deadline *time.Time) int {
if deadline != nil {
if time.Until(*deadline) <= 48*time.Hour {
return newagentshared.QuickNotePriorityImportantUrgent
return agentshared.QuickNotePriorityImportantUrgent
}
return newagentshared.QuickNotePriorityImportantNotUrgent
return agentshared.QuickNotePriorityImportantNotUrgent
}
return newagentshared.QuickNotePrioritySimpleNotImportant
return agentshared.QuickNotePrioritySimpleNotImportant
}

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"context"
@@ -7,9 +7,9 @@ import (
"strconv"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
"github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
"github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
)
const (
@@ -38,7 +38,7 @@ type roughBuildApplyStats struct {
// 7. 否则按“是否需要粗排后立即微调”分流:
// - 无明确微调诉求:直接 Done -> Deliver
// - 有明确微调诉求:进入 Execute。
func RunRoughBuildNode(ctx context.Context, st *newagentmodel.AgentGraphState) error {
func RunRoughBuildNode(ctx context.Context, st *agentmodel.AgentGraphState) error {
if st == nil {
return fmt.Errorf("rough build node: state is nil")
}
@@ -64,7 +64,7 @@ func RunRoughBuildNode(ctx context.Context, st *newagentmodel.AgentGraphState) e
taskClassIDs := flowState.TaskClassIDs
if len(taskClassIDs) == 0 {
// 没有任务类 ID 时静默跳过粗排,直接进入执行阶段。
flowState.Phase = newagentmodel.PhaseExecuting
flowState.Phase = agentmodel.PhaseExecuting
flowState.NeedsRoughBuild = false
flowState.NeedsRefineAfterRoughBuild = false
return nil
@@ -211,7 +211,7 @@ func RunRoughBuildNode(ctx context.Context, st *newagentmodel.AgentGraphState) e
} else {
pinnedContent += "\n当前未收到明确微调偏好流程将先收口如需进一步优化请基于本次结果提出调整要求。"
}
st.EnsureConversationContext().UpsertPinnedBlock(newagentmodel.ContextBlock{
st.EnsureConversationContext().UpsertPinnedBlock(agentmodel.ContextBlock{
Key: "rough_build_done",
Title: "粗排已完成",
Content: pinnedContent,
@@ -242,15 +242,15 @@ func RunRoughBuildNode(ctx context.Context, st *newagentmodel.AgentGraphState) e
// 1. 目的:即使这条链路不回 plan也能在 execute 首轮拿到建议工具面analyze + mutation
// 2. 边界:这里只写“建议激活域/包”,不直接执行 context_tools_add仍由 execute 按统一入口消费。
// 3. 回退hook 无效时 execute 会自动忽略并清空,不影响主流程。
flowState.PendingContextHook = &newagentmodel.ContextHook{
Domain: newagenttools.ToolDomainSchedule,
flowState.PendingContextHook = &agentmodel.ContextHook{
Domain: agenttools.ToolDomainSchedule,
Packs: []string{
newagenttools.ToolPackAnalyze,
newagenttools.ToolPackMutation,
agenttools.ToolPackAnalyze,
agenttools.ToolPackMutation,
},
Reason: "rough_build_post_refine",
}
flowState.Phase = newagentmodel.PhaseExecuting
flowState.Phase = agentmodel.PhaseExecuting
return nil
}
@@ -290,7 +290,7 @@ func countPendingTasks(state *schedule.ScheduleState, taskClassIDs []int) int {
// 5. 转换失败的条目静默跳过,不中断整体流程。
func applyRoughBuildPlacements(
state *schedule.ScheduleState,
placements []newagentmodel.RoughBuildPlacement,
placements []agentmodel.RoughBuildPlacement,
) roughBuildApplyStats {
stats := roughBuildApplyStats{}
if state == nil {
@@ -334,7 +334,7 @@ func applyRoughBuildPlacements(
}
// appendPlacementSample 记录有限数量的 miss 样本,避免 debug 日志爆量。
func appendPlacementSample(samples []string, placement newagentmodel.RoughBuildPlacement) []string {
func appendPlacementSample(samples []string, placement agentmodel.RoughBuildPlacement) []string {
if len(samples) >= roughBuildSampleLimit {
return samples
}

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"regexp"

View File

@@ -1,4 +1,4 @@
package newagentnode
package agentnode
import (
"context"
@@ -6,10 +6,10 @@ import (
"fmt"
"log"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
"github.com/LoveLosita/smartflow/backend/pkg"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
)
@@ -24,11 +24,11 @@ type UnifiedCompactInput struct {
// Client 用于调用 LLM 压缩 msg1/msg2。
Client *llmservice.Client
// CompactionStore 用于持久化压缩摘要和 token 统计,为 nil 时跳过持久化。
CompactionStore newagentmodel.CompactionStore
CompactionStore agentmodel.CompactionStore
// FlowState 提供 userID / chatID / roundUsed 等定位信息。
FlowState *newagentmodel.CommonState
FlowState *agentmodel.CommonState
// Emitter 用于推送压缩进度 SSE 事件。
Emitter *newagentstream.ChunkEmitter
Emitter *agentstream.ChunkEmitter
// StageName 标识当前阶段(如 "execute"/"plan"/"chat"/"deliver"),用于日志和缓存 key。
StageName string
// StatusBlockID 是 SSE 状态推送的 block ID各节点使用自己的 block ID。
@@ -207,7 +207,7 @@ func compactUnifiedMsg1(
)
// 4. 调用 LLM 压缩:将 msg1 全文 + 已有摘要合并为一份紧凑摘要。
newSummary, err := newagentprompt.CompactMsg1(ctx, input.Client, msg1, existingSummary)
newSummary, err := agentprompt.CompactMsg1(ctx, input.Client, msg1, existingSummary)
if err != nil {
log.Printf("[COMPACT:%s] compact msg1 failed: %v", input.StageName, err)
_ = input.Emitter.EmitStatus(
@@ -254,7 +254,7 @@ func compactUnifiedMsg2(
)
// 2. 调用 LLM 压缩。
compressed, err := newagentprompt.CompactMsg2(ctx, input.Client, msg2)
compressed, err := agentprompt.CompactMsg2(ctx, input.Client, msg2)
if err != nil {
log.Printf("[COMPACT:%s] compact msg2 failed: %v", input.StageName, err)
_ = input.Emitter.EmitStatus(

View File

@@ -1,11 +1,11 @@
package newagentnode
package agentnode
import (
"context"
"log"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -17,8 +17,8 @@ import (
// 3. 具体的 Redis / MySQL / 乐观缓存写入由 service 回调统一完成。
func persistVisibleAssistantMessage(
ctx context.Context,
persist newagentmodel.PersistVisibleMessageFunc,
state *newagentmodel.CommonState,
persist agentmodel.PersistVisibleMessageFunc,
state *agentmodel.CommonState,
msg *schema.Message,
) {
if persist == nil || state == nil || msg == nil {

View File

@@ -1,10 +1,10 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -17,7 +17,7 @@ import (
// 4. pinned blocks当前计划、当前步骤、粗排结果等最新约束— 紧贴 user prompt
// 利用近因效应让 LLM 优先关注本轮最相关的约束,而非被历史消息分散注意力;
// 5. user prompt阶段性指令— 始终在末尾,是本轮回答的核心触发。
func buildStageMessages(stageSystemPrompt string, ctx *newagentmodel.ConversationContext, runtimeUserPrompt string) []*schema.Message {
func buildStageMessages(stageSystemPrompt string, ctx *agentmodel.ConversationContext, runtimeUserPrompt string) []*schema.Message {
messages := make([]*schema.Message, 0, 4)
// 1. 合并 system prompt基础角色约束 + 阶段规则,始终在最顶部。
@@ -66,7 +66,7 @@ func buildStageMessages(stageSystemPrompt string, ctx *newagentmodel.Conversatio
}
// renderStateSummary 把当前流程状态渲染成简洁文本。
func renderStateSummary(state *newagentmodel.CommonState) string {
func renderStateSummary(state *agentmodel.CommonState) string {
if state == nil {
return "当前状态state 缺失,请先做兜底处理。"
}
@@ -155,7 +155,7 @@ func defaultSemanticValue(value string) string {
}
// renderPinnedBlocks 把 ConversationContext 中的置顶块渲染成独立的 system 文本。
func renderPinnedBlocks(ctx *newagentmodel.ConversationContext) string {
func renderPinnedBlocks(ctx *agentmodel.ConversationContext) string {
if ctx == nil {
return ""
}
@@ -184,7 +184,7 @@ func renderPinnedBlocks(ctx *newagentmodel.ConversationContext) string {
}
// renderToolSchemas 把工具摘要渲染成独立文本块。
func renderToolSchemas(ctx *newagentmodel.ConversationContext) string {
func renderToolSchemas(ctx *agentmodel.ConversationContext) string {
if ctx == nil {
return ""
}
@@ -221,7 +221,7 @@ func renderToolSchemas(ctx *newagentmodel.ConversationContext) string {
return strings.TrimSpace(sb.String())
}
func mergeSystemPrompts(ctx *newagentmodel.ConversationContext, stageSystemPrompt string) string {
func mergeSystemPrompts(ctx *agentmodel.ConversationContext, stageSystemPrompt string) string {
base := ""
if ctx != nil {
base = strings.TrimSpace(ctx.SystemPrompt)

View File

@@ -1,11 +1,11 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
"time"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -98,7 +98,7 @@ func BuildChatRoutingSystemPrompt() string {
}
// BuildChatRoutingMessages 组装路由阶段的 messages。
func BuildChatRoutingMessages(ctx *newagentmodel.ConversationContext, userInput string, state *newagentmodel.CommonState, nonce string) []*schema.Message {
func BuildChatRoutingMessages(ctx *agentmodel.ConversationContext, userInput string, state *agentmodel.CommonState, nonce string) []*schema.Message {
return buildUnifiedStageMessages(
ctx,
StageMessagesConfig{
@@ -147,7 +147,7 @@ func BuildDeepAnswerSystemPrompt() string {
}
// BuildDeepAnswerMessages 组装深度回答阶段的 messages。
func BuildDeepAnswerMessages(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext, userInput string) []*schema.Message {
func BuildDeepAnswerMessages(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext, userInput string) []*schema.Message {
return buildUnifiedStageMessages(
ctx,
StageMessagesConfig{

View File

@@ -1,13 +1,13 @@
package newagentprompt
package agentprompt
import (
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
// buildChatConversationMessage 生成 chat / deep_answer 共用的真实对话视图。
func buildChatConversationMessage(ctx *newagentmodel.ConversationContext) string {
func buildChatConversationMessage(ctx *agentmodel.ConversationContext) string {
return buildConversationHistoryMessage(ctx, "真实对话记录")
}
@@ -17,7 +17,7 @@ func buildChatConversationMessage(ctx *newagentmodel.ConversationContext) string
// 1. chat 只保留与路由判断直接相关的最小流程标记;
// 2. rough_build_done 仍需显式暴露,否则路由层会丢掉“不要重复粗排”的关键信号;
// 3. 不再展示轮次、阶段锚点、ReAct 摘要等 execute 专属信息。
func buildChatRoutingWorkspace(ctx *newagentmodel.ConversationContext) string {
func buildChatRoutingWorkspace(ctx *agentmodel.ConversationContext) string {
lines := []string{"路由补充:"}
if hasExecuteRoughBuildDone(ctx) {
lines = append(lines, "- 已存在 rough_build_done除非用户明确要求重新粗排否则不要再次触发 rough_build。")

View File

@@ -1,4 +1,4 @@
package newagentprompt
package agentprompt
import (
"context"

View File

@@ -1,4 +1,4 @@
package newagentprompt
package agentprompt
import (
"context"

View File

@@ -1,9 +1,9 @@
package newagentprompt
package agentprompt
import (
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
// buildConversationHistoryMessage 将“真实对话流”渲染成节点可直接复用的 msg1。
@@ -12,7 +12,7 @@ import (
// 1. 只负责把 user + assistant speak 组织成稳定文本;
// 2. 不拼接 tool_call / tool observation这些不属于“真实对话”
// 3. 不做长度裁剪,长度预算交给统一压缩层处理。
func buildConversationHistoryMessage(ctx *newagentmodel.ConversationContext, title string) string {
func buildConversationHistoryMessage(ctx *agentmodel.ConversationContext, title string) string {
title = strings.TrimSpace(title)
if title == "" {
title = "真实对话记录"

View File

@@ -1,10 +1,10 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -28,7 +28,7 @@ func BuildDeliverSystemPrompt() string {
}
// BuildDeliverMessages 组装交付阶段 messages。
func BuildDeliverMessages(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) []*schema.Message {
func BuildDeliverMessages(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext) []*schema.Message {
roughBuildPrefix := buildDeliverRoughBuildPrefix(ctx, state)
return buildUnifiedStageMessages(
ctx,
@@ -44,7 +44,7 @@ func BuildDeliverMessages(state *newagentmodel.CommonState, ctx *newagentmodel.C
}
// BuildDeliverUserPrompt 构造交付阶段的用户提示词。
func BuildDeliverUserPrompt(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) string {
func BuildDeliverUserPrompt(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext) string {
var sb strings.Builder
sb.WriteString("请基于最近对话和交付工作区,生成一段自然、诚实的完成总结。\n")

View File

@@ -1,10 +1,10 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
// buildDeliverConversationMessage 生成 deliver 节点看到的轻量历史提示。
@@ -13,7 +13,7 @@ import (
// 1. 这里不再承载完整历史,也不再把旧轮次对话重新灌回 deliver
// 2. 真正可供收口的本轮 execute 窗口放到 msg2由工作区统一呈现
// 3. 这里只给模型一个明确提示:历史已经折叠,请不要主动回顾旧轮次。
func buildDeliverConversationMessage(ctx *newagentmodel.ConversationContext) string {
func buildDeliverConversationMessage(ctx *agentmodel.ConversationContext) string {
return "历史视图:已折叠到交付工作区的本轮 execute 窗口,请仅依据 msg2 收口,不要回顾旧轮次。"
}
@@ -23,7 +23,7 @@ func buildDeliverConversationMessage(ctx *newagentmodel.ConversationContext) str
// 1. 这里只负责把粗排相关的任务类信息补进 msg3 前缀,不改写交付总结本身;
// 2. 只有在上下文里明确存在 rough_build_done 时才注入,避免普通交付场景被额外信息污染;
// 3. 这段前缀用于补齐第一次粗排没有正式计划时的任务类详情,优先让 deliver 看到 task_class_ids 和任务类约束。
func buildDeliverRoughBuildPrefix(ctx *newagentmodel.ConversationContext, state *newagentmodel.CommonState) string {
func buildDeliverRoughBuildPrefix(ctx *agentmodel.ConversationContext, state *agentmodel.CommonState) string {
if !hasExecuteRoughBuildDone(ctx) {
return ""
}
@@ -54,7 +54,7 @@ func buildDeliverRoughBuildPrefix(ctx *newagentmodel.ConversationContext, state
// 1. 先保留 deliver 原本依赖的结果态信息terminal outcome、计划进度、步骤简表
// 2. 再把基于 execute_loop_closed 切出来的“本轮 execute 窗口”拼到 msg2作为唯一的本轮事实视图
// 3. 没有正式计划时也保留 execute 窗口,保证 deliver 仍能基于当前轮活跃上下文诚实收口。
func buildDeliverWorkspace(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) string {
func buildDeliverWorkspace(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext) string {
lines := []string{"交付工作区:"}
if state == nil {
lines = append(lines, "- 当前缺少流程状态,请仅基于可见结果态与本轮 execute 窗口诚实收口。")
@@ -80,7 +80,7 @@ func buildDeliverWorkspace(state *newagentmodel.CommonState, ctx *newagentmodel.
}
// renderDeliverTerminalSummary 返回 deliver 节点需要知道的收口状态。
func renderDeliverTerminalSummary(state *newagentmodel.CommonState) string {
func renderDeliverTerminalSummary(state *agentmodel.CommonState) string {
if state == nil || !state.HasTerminalOutcome() || state.TerminalOutcome == nil {
return "- 当前没有正式终止结果,请按最近对话和计划进度自然总结。"
}
@@ -97,7 +97,7 @@ func renderDeliverTerminalSummary(state *newagentmodel.CommonState) string {
}
// renderDeliverStepOutline 生成 deliver 节点使用的步骤简表。
func renderDeliverStepOutline(state *newagentmodel.CommonState, completed int) string {
func renderDeliverStepOutline(state *agentmodel.CommonState, completed int) string {
if state == nil || len(state.PlanSteps) == 0 {
return "- 暂无。"
}
@@ -123,7 +123,7 @@ func renderDeliverStepOutline(state *newagentmodel.CommonState, completed int) s
}
// countCompletedPlanSteps 统计当前已经完成的计划步骤数。
func countCompletedPlanSteps(state *newagentmodel.CommonState) int {
func countCompletedPlanSteps(state *agentmodel.CommonState) int {
if state == nil {
return 0
}

View File

@@ -1,10 +1,10 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -17,7 +17,7 @@ const deliverHistoryKindExecuteLoopClosed = "execute_loop_closed"
// 2. 从后往前找最后一个 execute_loop_closed确保拿到的是“最近一次已正常收口”的边界
// 3. 命中边界后只返回边界之后的消息,这样 deliver 看到的就是当前活跃轮次;
// 4. 若完全没有边界,说明会话尚未形成稳定闭环,此时退回全量 history避免误丢当前活跃上下文。
func sliceHistoryAfterLastExecuteLoopClosed(ctx *newagentmodel.ConversationContext) []*schema.Message {
func sliceHistoryAfterLastExecuteLoopClosed(ctx *agentmodel.ConversationContext) []*schema.Message {
if ctx == nil {
return nil
}
@@ -67,7 +67,7 @@ func isDeliverExecuteLoopClosedMarker(msg *schema.Message) bool {
// 2. 再分别抽取“本轮真实对话流”和“本轮 ReAct 工具事实链”,避免 deliver 回看旧 deliver 总结;
// 3. 若本轮还没有工具调用,也要明确告诉模型“当前无工具事实”,避免它擅自脑补;
// 4. 整段文本只服务 deliver.msg2不改变四段式骨架也不回写任何状态。
func buildDeliverExecuteWindow(ctx *newagentmodel.ConversationContext) string {
func buildDeliverExecuteWindow(ctx *agentmodel.ConversationContext) string {
lines := []string{"本轮 execute 窗口:"}
historyWindow := sliceHistoryAfterLastExecuteLoopClosed(ctx)

View File

@@ -1,10 +1,10 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -19,7 +19,7 @@ func BuildExecuteReActSystemPrompt() string {
}
// BuildExecuteMessages 组装执行阶段消息。
func BuildExecuteMessages(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) []*schema.Message {
func BuildExecuteMessages(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext) []*schema.Message {
if state != nil && state.HasPlan() {
return buildExecuteStageMessages(
BuildExecuteSystemPrompt(),
@@ -43,7 +43,7 @@ func BuildExecuteMessages(state *newagentmodel.CommonState, ctx *newagentmodel.C
// 1. 负责把"当前是第几步、当前步骤内容、done_when 判定"明确写进用户指令;
// 2. 不负责替代系统提示词中的工具规则和安全边界;
// 3. 当 state 无法提供有效当前步骤时,仅追加兜底提示,不在此处推进流程状态。
func buildExecuteStrictJSONUserPromptWithPlan(state *newagentmodel.CommonState) string {
func buildExecuteStrictJSONUserPromptWithPlan(state *agentmodel.CommonState) string {
base := buildExecuteStrictJSONUserPrompt()
if state == nil || !state.HasPlan() {
return base

View File

@@ -1,4 +1,4 @@
package newagentprompt
package agentprompt
import (
"encoding/json"
@@ -7,7 +7,7 @@ import (
"strconv"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -48,8 +48,8 @@ type executeLatestToolRecord struct {
// 4. msg3执行状态、阶段约束、记忆和本轮指令。
func buildExecuteStageMessages(
stageSystemPrompt string,
state *newagentmodel.CommonState,
ctx *newagentmodel.ConversationContext,
state *agentmodel.CommonState,
ctx *agentmodel.ConversationContext,
runtimeUserPrompt string,
) []*schema.Message {
msg0 := buildExecuteMessage0(stageSystemPrompt, state, ctx)
@@ -70,7 +70,7 @@ func buildExecuteStageMessages(
// 1. 先拼基础 system prompt保证身份和输出协议稳定。
// 2. 再按当前 domain / packs 注入动态规则包,让模型先读到边界。
// 3. 最后再附工具简表,避免模型只看到工具不看到纪律。
func buildExecuteMessage0(stageSystemPrompt string, state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) string {
func buildExecuteMessage0(stageSystemPrompt string, state *agentmodel.CommonState, ctx *agentmodel.ConversationContext) string {
base := strings.TrimSpace(mergeSystemPrompts(ctx, stageSystemPrompt))
if base == "" {
base = "你是 SmartMate 执行器,请继续当前执行阶段。"
@@ -89,7 +89,7 @@ func buildExecuteMessage0(stageSystemPrompt string, state *newagentmodel.CommonS
}
// buildExecuteMessage1V3 只渲染真实对话流,不混入 tool observation。
func buildExecuteMessage1V3(ctx *newagentmodel.ConversationContext) string {
func buildExecuteMessage1V3(ctx *agentmodel.ConversationContext) string {
lines := []string{"历史上下文:"}
if ctx == nil {
lines = append(lines,
@@ -123,7 +123,7 @@ func buildExecuteMessage1V3(ctx *newagentmodel.ConversationContext) string {
//
// 1. 每条记录固定展示 thought / tool_call / observation方便模型做局部闭环。
// 2. 如果当前还没有任何 tool loop明确给“新一轮”占位避免模型误判缺上下文。
func buildExecuteMessage2V3(ctx *newagentmodel.ConversationContext) string {
func buildExecuteMessage2V3(ctx *agentmodel.ConversationContext) string {
lines := []string{"当轮 ReAct Loop 记录:"}
if ctx == nil {
lines = append(lines, "- 暂无可用 ReAct 记录。")
@@ -149,11 +149,11 @@ func buildExecuteMessage2V3(ctx *newagentmodel.ConversationContext) string {
// 1. 这里只放“当前轮真正会影响决策”的状态,避免 msg3 继续膨胀。
// 2. 读工具最近结果只给最新一条摘要,避免旧 observation 重复占上下文。
// 3. 最后一行固定落到“本轮指令”,保证模型收尾时注意力还在执行目标上。
func buildExecuteMessage3(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext, runtimeUserPrompt string) string {
func buildExecuteMessage3(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext, runtimeUserPrompt string) string {
lines := []string{"当前执行状态:"}
roughBuildDone := hasExecuteRoughBuildDone(ctx)
roundUsed, maxRounds := 0, newagentmodel.DefaultMaxRounds
roundUsed, maxRounds := 0, agentmodel.DefaultMaxRounds
modeText := "自由执行(无预定义步骤)"
activeDomain := ""
activePacks := []string{}
@@ -270,7 +270,7 @@ func buildExecuteMessage3(state *newagentmodel.CommonState, ctx *newagentmodel.C
// 1. 这里只给模型最低必要的参数和返回值感知,不重复塞完整 schema JSON。
// 2. 对复杂工具额外给一条调用示例,降低“参数字段写错”的概率。
// 3. 这里只展示当前真实可用工具,避免历史残留能力继续污染工具面。
func renderExecuteToolCatalogCompact(ctx *newagentmodel.ConversationContext, state *newagentmodel.CommonState) string {
func renderExecuteToolCatalogCompact(ctx *agentmodel.ConversationContext, state *agentmodel.CommonState) string {
if ctx == nil {
return ""
}
@@ -539,7 +539,7 @@ func renderExecuteToolCallText(toolName, toolArgs string) string {
return toolName + "(" + toolArgs + ")"
}
func hasExecuteRoughBuildDone(ctx *newagentmodel.ConversationContext) bool {
func hasExecuteRoughBuildDone(ctx *agentmodel.ConversationContext) bool {
if ctx == nil {
return false
}
@@ -551,7 +551,7 @@ func hasExecuteRoughBuildDone(ctx *newagentmodel.ConversationContext) bool {
return false
}
func renderExecuteLatestAnalyzeSummary(ctx *newagentmodel.ConversationContext) string {
func renderExecuteLatestAnalyzeSummary(ctx *agentmodel.ConversationContext) string {
record, ok := findExecuteLatestToolRecord(ctx, map[string]struct{}{
"analyze_health": {},
"analyze_rhythm": {},
@@ -562,7 +562,7 @@ func renderExecuteLatestAnalyzeSummary(ctx *newagentmodel.ConversationContext) s
return fmt.Sprintf("%s -> %s", record.ToolName, record.Observation)
}
func renderExecuteLatestMutationSummary(ctx *newagentmodel.ConversationContext) string {
func renderExecuteLatestMutationSummary(ctx *agentmodel.ConversationContext) string {
record, ok := findExecuteLatestToolRecord(ctx, map[string]struct{}{
"place": {},
"move": {},
@@ -577,7 +577,7 @@ func renderExecuteLatestMutationSummary(ctx *newagentmodel.ConversationContext)
return fmt.Sprintf("%s -> %s", record.ToolName, record.Observation)
}
func findExecuteLatestToolRecord(ctx *newagentmodel.ConversationContext, allowSet map[string]struct{}) (executeLatestToolRecord, bool) {
func findExecuteLatestToolRecord(ctx *agentmodel.ConversationContext, allowSet map[string]struct{}) (executeLatestToolRecord, bool) {
if ctx == nil || len(allowSet) == 0 {
return executeLatestToolRecord{}, false
}
@@ -734,7 +734,7 @@ func asExecuteString(value any) string {
return ""
}
func renderExecuteTaskClassIDs(state *newagentmodel.CommonState) string {
func renderExecuteTaskClassIDs(state *agentmodel.CommonState) string {
if state == nil || len(state.TaskClassIDs) == 0 {
return ""
}
@@ -747,11 +747,11 @@ func renderExecuteTaskClassIDs(state *newagentmodel.CommonState) string {
}
// renderExecuteMemoryContext 复用统一记忆入口,避免 execute 私自拼接其他 pinned block。
func renderExecuteMemoryContext(ctx *newagentmodel.ConversationContext) string {
func renderExecuteMemoryContext(ctx *agentmodel.ConversationContext) string {
return renderUnifiedMemoryContext(ctx)
}
func renderTaskClassUpsertRuntime(state *newagentmodel.CommonState) string {
func renderTaskClassUpsertRuntime(state *agentmodel.CommonState) string {
if state == nil || !state.TaskClassUpsertLastTried {
return ""
}

View File

@@ -1,4 +1,4 @@
package newagentprompt
package agentprompt
import (
"fmt"

View File

@@ -1,4 +1,4 @@
package newagentprompt
package agentprompt
import (
"fmt"

View File

@@ -1,11 +1,11 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
)
// renderExecuteNextStepHintV2 生成 execute.msg3 的轻量方向提示。
@@ -15,7 +15,7 @@ import (
// 2. 普通链路仍保留必要的业务引导,避免误伤用户明确提出的普通调整请求。
// 3. 提示只给方向,不替模型代填最终写参数。
func renderExecuteNextStepHintV2(
state *newagentmodel.CommonState,
state *agentmodel.CommonState,
latestAnalyze string,
latestMutation string,
roughBuildDone bool,
@@ -25,7 +25,7 @@ func renderExecuteNextStepHintV2(
}
activeDomain := strings.TrimSpace(state.ActiveToolDomain)
activePacks := newagenttools.ResolveEffectiveToolPacks(state.ActiveToolDomain, state.ActiveToolPacks)
activePacks := agenttools.ResolveEffectiveToolPacks(state.ActiveToolDomain, state.ActiveToolPacks)
if state.ActiveOptimizeOnly {
switch {
@@ -90,7 +90,7 @@ func renderExecuteNextStepHintV2(
if activeDomain == "schedule" &&
latestAnalyze != "" &&
strings.Contains(latestAnalyze, "metrics") &&
!containsExecutePack(activePacks, newagenttools.ToolPackQueue) {
!containsExecutePack(activePacks, agenttools.ToolPackQueue) {
return `若诊断已经完成,下一步应转入读事实或写操作,不要重复 analyze_health涉及同类批量任务时优先考虑 packs=["queue"]。`
}

View File

@@ -1,12 +1,12 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
"time"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
)
const (
@@ -48,7 +48,7 @@ type executeRulePack struct {
// 1. 这里负责“选哪些包 + 以什么顺序展示”,不负责工具目录本身。
// 2. 固定先放通用硬约束,再放 mode/domain/micro 包,保证模型先读边界后读特例。
// 3. 如果没有任何可展示规则包,则直接返回空串,避免无意义占位。
func renderExecuteRulePackSection(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) (string, []string) {
func renderExecuteRulePackSection(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext) (string, []string) {
packs := selectExecuteRulePacks(state, ctx)
if len(packs) == 0 {
return "", nil
@@ -71,7 +71,7 @@ func renderExecuteRulePackSection(state *newagentmodel.CommonState, ctx *newagen
return strings.Join(lines, "\n"), names
}
func selectExecuteRulePacks(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) []executeRulePack {
func selectExecuteRulePacks(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext) []executeRulePack {
selected := make([]executeRulePack, 0, 8)
seen := map[string]bool{}
@@ -98,16 +98,16 @@ func selectExecuteRulePacks(state *newagentmodel.CommonState, ctx *newagentmodel
case "schedule":
activePacks := readExecuteActiveToolPacks(state)
appendPack(buildExecuteSchedulePack())
if hasExecutePack(activePacks, newagenttools.ToolPackQueue) {
if hasExecutePack(activePacks, agenttools.ToolPackQueue) {
appendPack(buildExecuteQueueMicroPack())
}
if hasExecutePack(activePacks, newagenttools.ToolPackMutation) {
if hasExecutePack(activePacks, agenttools.ToolPackMutation) {
appendPack(buildExecuteScheduleMutationPack())
}
if hasExecutePack(activePacks, newagenttools.ToolPackAnalyze) {
if hasExecutePack(activePacks, agenttools.ToolPackAnalyze) {
appendPack(buildExecuteScheduleAnalyzePackV2())
}
if hasExecutePack(activePacks, newagenttools.ToolPackWeb) {
if hasExecutePack(activePacks, agenttools.ToolPackWeb) {
appendPack(buildExecuteScheduleWebPack())
}
case "taskclass":
@@ -127,18 +127,18 @@ func selectExecuteRulePacks(state *newagentmodel.CommonState, ctx *newagentmodel
return selected
}
func readExecuteActiveToolDomain(state *newagentmodel.CommonState) string {
func readExecuteActiveToolDomain(state *agentmodel.CommonState) string {
if state == nil {
return ""
}
return strings.TrimSpace(state.ActiveToolDomain)
}
func readExecuteActiveToolPacks(state *newagentmodel.CommonState) []string {
func readExecuteActiveToolPacks(state *agentmodel.CommonState) []string {
if state == nil {
return nil
}
return newagenttools.ResolveEffectiveToolPacks(state.ActiveToolDomain, state.ActiveToolPacks)
return agenttools.ResolveEffectiveToolPacks(state.ActiveToolDomain, state.ActiveToolPacks)
}
func hasExecutePack(packs []string, target string) bool {
@@ -337,7 +337,7 @@ func buildExecuteTaskClassRetryMicroPack() executeRulePack {
}
}
func shouldInjectExecuteDiagLoopPack(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) bool {
func shouldInjectExecuteDiagLoopPack(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext) bool {
if state == nil || !hasExecuteRoughBuildDone(ctx) {
return false
}
@@ -345,6 +345,6 @@ func shouldInjectExecuteDiagLoopPack(state *newagentmodel.CommonState, ctx *newa
return false
}
activePacks := readExecuteActiveToolPacks(state)
return hasExecutePack(activePacks, newagenttools.ToolPackAnalyze) &&
hasExecutePack(activePacks, newagenttools.ToolPackMutation)
return hasExecutePack(activePacks, agenttools.ToolPackAnalyze) &&
hasExecutePack(activePacks, agenttools.ToolPackMutation)
}

View File

@@ -1,4 +1,4 @@
package newagentprompt
package agentprompt
import "strings"

View File

@@ -1,10 +1,10 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -42,7 +42,7 @@ func BuildPlanSystemPrompt() string {
}
// BuildPlanMessages 组装规划阶段的 messages。
func BuildPlanMessages(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext, userInput string) []*schema.Message {
func BuildPlanMessages(state *agentmodel.CommonState, ctx *agentmodel.ConversationContext, userInput string) []*schema.Message {
return buildUnifiedStageMessages(
ctx,
StageMessagesConfig{
@@ -58,7 +58,7 @@ func BuildPlanMessages(state *newagentmodel.CommonState, ctx *newagentmodel.Conv
}
// BuildPlanUserPrompt 构造规划阶段的用户提示词。
func BuildPlanUserPrompt(state *newagentmodel.CommonState, userInput string) string {
func BuildPlanUserPrompt(state *agentmodel.CommonState, userInput string) string {
var sb strings.Builder
sb.WriteString("请继续当前任务规划,只输出一组 SMARTFLOW_DECISION 决策。\n")
@@ -112,9 +112,9 @@ func BuildPlanDecisionContractText() string {
"- step 的 done_when 应优先锚定查询结果已返回、validation 已通过、写工具已成功回执、粗排标记已产生、分析结论已可直接支撑下一步",
"- 例:\"我要复习离散数学,基础较差,大概学 8 节课,不要早上第 1-2 节和晚上第 11-12 节学习,周末也不想学,每节课内容你自己来\"——应规划为 taskclass而不是 schedule也通常不需要 ask_user",
}, "\n"),
newagentmodel.PlanActionContinue,
newagentmodel.PlanActionAskUser,
newagentmodel.PlanActionDone,
newagentmodel.PlanActionDone,
agentmodel.PlanActionContinue,
agentmodel.PlanActionAskUser,
agentmodel.PlanActionDone,
agentmodel.PlanActionDone,
))
}

View File

@@ -1,15 +1,15 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strconv"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
// buildPlanConversationMessage 生成 plan 节点看到的真实对话视图。
func buildPlanConversationMessage(ctx *newagentmodel.ConversationContext) string {
func buildPlanConversationMessage(ctx *agentmodel.ConversationContext) string {
return buildConversationHistoryMessage(ctx, "规划参考对话")
}
@@ -19,7 +19,7 @@ func buildPlanConversationMessage(ctx *newagentmodel.ConversationContext) string
// 1. 这里既保留“当前已有计划/任务类约束”,也显式补充“规划视角的工具摘要”;
// 2. planner 需要先理解工具边界,才能把步骤收敛到最小闭环,而不是按抽象语义乱拆;
// 3. 工具摘要不展开全量 schema只提供规划真正需要的负责什么、不负责什么、常见闭环、完成证据、域切换条件。
func buildPlanWorkspace(state *newagentmodel.CommonState) string {
func buildPlanWorkspace(state *agentmodel.CommonState) string {
lines := []string{"规划工作区:"}
if state == nil {
lines = append(lines, "- 当前缺少流程状态,请主要依据最近对话与本轮输入继续规划。")
@@ -49,7 +49,7 @@ func buildPlanWorkspace(state *newagentmodel.CommonState) string {
}
// renderPlanCurrentStepSummary 返回 plan 节点需要知道的当前步骤进度。
func renderPlanCurrentStepSummary(state *newagentmodel.CommonState) string {
func renderPlanCurrentStepSummary(state *agentmodel.CommonState) string {
if state == nil || !state.HasPlan() {
return "- 当前步骤:暂无。"
}
@@ -73,7 +73,7 @@ func renderPlanCurrentStepSummary(state *newagentmodel.CommonState) string {
}
// renderPlanStepOutline 将完整计划压成 plan 节点可读的简表。
func renderPlanStepOutline(steps []newagentmodel.PlanStep) string {
func renderPlanStepOutline(steps []agentmodel.PlanStep) string {
if len(steps) == 0 {
return "- 暂无。"
}
@@ -94,7 +94,7 @@ func renderPlanStepOutline(steps []newagentmodel.PlanStep) string {
}
// renderPlanTaskClassIDs 返回批量排课场景下的 task_class_ids 简表。
func renderPlanTaskClassIDs(state *newagentmodel.CommonState) string {
func renderPlanTaskClassIDs(state *agentmodel.CommonState) string {
if state == nil || len(state.TaskClassIDs) == 0 {
return ""
}
@@ -112,7 +112,7 @@ func renderPlanTaskClassIDs(state *newagentmodel.CommonState) string {
// 1. 这里只保留名称、策略、总时段、日期范围这类规划相关信息;
// 2. 不再把所有字段原样平铺,避免工作区过胖;
// 3. 若某项字段为空,则直接省略,不制造噪声。
func renderPlanTaskClassMeta(state *newagentmodel.CommonState) string {
func renderPlanTaskClassMeta(state *agentmodel.CommonState) string {
if state == nil || len(state.TaskClasses) == 0 {
return ""
}

View File

@@ -1,11 +1,11 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
"time"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -80,9 +80,9 @@ func buildQuickTaskUserPrompt(userInput string) string {
// BuildQuickTaskMessages 组装快捷任务阶段的完整 messages含对话历史
func BuildQuickTaskMessages(
ctx *newagentmodel.ConversationContext,
ctx *agentmodel.ConversationContext,
userInput string,
toolSchemas []newagentmodel.ToolSchemaContext,
toolSchemas []agentmodel.ToolSchemaContext,
) []*schema.Message {
return buildUnifiedStageMessages(
ctx,
@@ -96,7 +96,7 @@ func BuildQuickTaskMessages(
)
}
func buildQuickTaskWorkspace(toolSchemas []newagentmodel.ToolSchemaContext) string {
func buildQuickTaskWorkspace(toolSchemas []agentmodel.ToolSchemaContext) string {
var sb strings.Builder
sb.WriteString("可用工具:\n")
for _, ts := range toolSchemas {

View File

@@ -1,4 +1,4 @@
package newagentprompt
package agentprompt
import (
"encoding/json"

View File

@@ -1,4 +1,4 @@
package newagentprompt
package agentprompt
const (
// SystemPrompt 全局系统人设:定义 SmartMate 的基本调性

View File

@@ -1,10 +1,10 @@
package newagentprompt
package agentprompt
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -63,7 +63,7 @@ type StageMessagesConfig struct {
// 3. msg2(assistant):节点自定义的工作区;
// 4. msg3(user/system):节点自定义前后缀 + 统一 memory_context。
func buildUnifiedStageMessages(
ctx *newagentmodel.ConversationContext,
ctx *agentmodel.ConversationContext,
config StageMessagesConfig,
) []*schema.Message {
msg0 := buildUnifiedMsg0(config.SystemPrompt, ctx, config.SkipBaseSystemPrompt, config.UseLiteToolCatalogMsg)
@@ -93,7 +93,7 @@ func buildUnifiedMsg3Message(content string, role schema.RoleType) *schema.Messa
// 1. 先合并基础系统提示与节点系统提示,保证模型身份稳定;
// 2. 若当前节点注入了工具 schema则附加紧凑工具目录
// 3. 若两部分都为空,则回退到最小兜底提示,避免出现空消息。
func buildUnifiedMsg0(stageSystemPrompt string, ctx *newagentmodel.ConversationContext, skipBaseSystemPrompt bool, useLiteToolCatalog bool) string {
func buildUnifiedMsg0(stageSystemPrompt string, ctx *agentmodel.ConversationContext, skipBaseSystemPrompt bool, useLiteToolCatalog bool) string {
base := ""
if skipBaseSystemPrompt {
base = strings.TrimSpace(stageSystemPrompt)
@@ -119,7 +119,7 @@ func buildUnifiedMsg0(stageSystemPrompt string, ctx *newagentmodel.ConversationC
// 1. 只展示工具名和一句话职责,避免把 execute 的参数/返回示例污染到 plan/chat/deliver。
// 2. 目录信息仅用于“能力边界感知”,不承担具体参数指导。
// 3. 当工具数量过多时保留前若干项并给出省略提示,控制 msg0 体积。
func renderUnifiedToolCatalogLite(ctx *newagentmodel.ConversationContext) string {
func renderUnifiedToolCatalogLite(ctx *agentmodel.ConversationContext) string {
if ctx == nil {
return ""
}
@@ -192,7 +192,7 @@ func buildUnifiedMsg2(content string) string {
// 1. 前缀由节点决定,适合放轻量状态或阶段约束;
// 2. memory_context 只在这里注入一次,避免 pinned block 多入口重复出现;
// 3. 后缀由节点决定。对于 user-role 节点,通常把最终用户指令放在这里,保证消息末尾仍是用户输入。
func buildUnifiedMsg3(ctx *newagentmodel.ConversationContext, config StageMessagesConfig) string {
func buildUnifiedMsg3(ctx *agentmodel.ConversationContext, config StageMessagesConfig) string {
var sections []string
if prefix := strings.TrimSpace(config.Msg3Prefix); prefix != "" {
@@ -216,8 +216,8 @@ func buildUnifiedMsg3(ctx *newagentmodel.ConversationContext, config StageMessag
// 步骤化说明:
// 1. 只消费 memory_context避免把 execution_context / current_step 等阶段专属块混回 prompt
// 2. block 不存在或正文为空时直接返回空串;
// 3. 这里只读取 agentsvc 已经产出的最终文本,不在这里重新拼装记忆。
func renderUnifiedMemoryContext(ctx *newagentmodel.ConversationContext) string {
// 3. 这里只读取 agent/sv 已经产出的最终文本,不在这里重新拼装记忆。
func renderUnifiedMemoryContext(ctx *agentmodel.ConversationContext) string {
if ctx == nil {
return ""
}

View File

@@ -1,11 +1,11 @@
package newagentrouter
package agentrouter
import (
"fmt"
"regexp"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
var (
@@ -45,7 +45,7 @@ type StreamRouteParser struct {
buf strings.Builder
nonce string
routeFound bool
decision *newagentmodel.ChatRoutingDecision
decision *agentmodel.ChatRoutingDecision
}
// NewStreamRouteParser 创建流式路由解析器。
@@ -79,8 +79,8 @@ func (p *StreamRouteParser) Feed(content string) (visible string, routeReady boo
if len(text) > 500 {
// 超过 500 字符仍未匹配到控制码 -> fallback 到 plan。
p.routeFound = true
p.decision = &newagentmodel.ChatRoutingDecision{
Route: newagentmodel.ChatRoutePlan,
p.decision = &agentmodel.ChatRoutingDecision{
Route: agentmodel.ChatRoutePlan,
Raw: text,
}
return text, true, fmt.Errorf("控制码解析超时fallback 到 plan")
@@ -101,7 +101,7 @@ func (p *StreamRouteParser) Feed(content string) (visible string, routeReady boo
}
// 解析 route。
route := newagentmodel.ChatRoute(strings.TrimSpace(groups[2]))
route := agentmodel.ChatRoute(strings.TrimSpace(groups[2]))
// 解析可选布尔属性(默认 false
roughBuild := parseOptionalBool(groups, 3)
@@ -109,7 +109,7 @@ func (p *StreamRouteParser) Feed(content string) (visible string, routeReady boo
reorder := parseOptionalBool(groups, 5)
thinking := parseOptionalBool(groups, 6)
p.decision = &newagentmodel.ChatRoutingDecision{
p.decision = &agentmodel.ChatRoutingDecision{
Route: route,
NeedsRoughBuild: roughBuild,
NeedsRefineAfterRoughBuild: refine,
@@ -121,7 +121,7 @@ func (p *StreamRouteParser) Feed(content string) (visible string, routeReady boo
// 归一化与校验。
if validateErr := p.decision.Validate(); validateErr != nil {
// 校验失败 -> fallback 到 plan。
p.decision.Route = newagentmodel.ChatRoutePlan
p.decision.Route = agentmodel.ChatRoutePlan
p.decision.NeedsRoughBuild = false
p.decision.NeedsRefineAfterRoughBuild = false
p.decision.AllowReorder = false
@@ -150,7 +150,7 @@ func (p *StreamRouteParser) RouteReady() bool {
}
// Decision 返回已解析的路由决策RouteReady=true 后可用)。
func (p *StreamRouteParser) Decision() *newagentmodel.ChatRoutingDecision {
func (p *StreamRouteParser) Decision() *agentmodel.ChatRoutingDecision {
return p.decision
}

View File

@@ -1,4 +1,4 @@
package newagentrouter
package agentrouter
import (
"fmt"

View File

@@ -1,4 +1,4 @@
package newagentshared
package agentshared
import "github.com/LoveLosita/smartflow/backend/model"

View File

@@ -1,4 +1,4 @@
package newagentshared
package agentshared
import (
"fmt"

View File

@@ -1,10 +1,10 @@
package newagentshared
package agentshared
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -25,7 +25,7 @@ const (
// - llmOutput: LLM 的原始输出内容,会作为 assistant 消息追加;
// - validOptionsDesc: 合法选项的描述,用于构造纠正提示。
func AppendLLMCorrection(
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
llmOutput string,
validOptionsDesc string,
) {
@@ -54,7 +54,7 @@ func AppendLLMCorrection(
// 相比 AppendLLMCorrection该函数允许调用方提供更详细的错误描述
// 适用于需要明确告知 LLM 具体哪里出错的场景。
func AppendLLMCorrectionWithHint(
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
llmOutput string,
errorDesc string,
validOptionsDesc string,
@@ -86,7 +86,7 @@ func AppendLLMCorrectionWithHint(
// 2. 若与“最近一条 assistant 文本”完全一致则跳过,避免同句反复回灌;
// 3. 仅负责“是否回灌”判定,不负责生成纠错 user 提示。
func appendCorrectionAssistantIfNeeded(
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
assistantContent string,
) {
if conversationContext == nil {

View File

@@ -1,4 +1,4 @@
package newagentshared
package agentshared
import (
"encoding/json"
@@ -6,7 +6,7 @@ import (
"log"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -19,7 +19,7 @@ import (
func LogNodeLLMContext(
stage string,
phase string,
flowState *newagentmodel.CommonState,
flowState *agentmodel.CommonState,
messages []*schema.Message,
) {
chatID := ""

View File

@@ -1,4 +1,4 @@
package newagentshared
package agentshared
import llmservice "github.com/LoveLosita/smartflow/backend/services/llm"

View File

@@ -1,4 +1,4 @@
package newagentshared
package agentshared
import (
"context"
@@ -6,10 +6,10 @@ import (
"fmt"
"log"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
"github.com/LoveLosita/smartflow/backend/pkg"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
)
@@ -24,11 +24,11 @@ type UnifiedCompactInput struct {
// Client 用于调用 LLM 压缩 msg1/msg2。
Client *llmservice.Client
// CompactionStore 用于持久化压缩摘要和 token 统计,为 nil 时跳过持久化。
CompactionStore newagentmodel.CompactionStore
CompactionStore agentmodel.CompactionStore
// FlowState 提供 userID / conversationID / roundUsed 等定位信息。
FlowState *newagentmodel.CommonState
FlowState *agentmodel.CommonState
// Emitter 用于推送压缩进度 SSE 事件。
Emitter *newagentstream.ChunkEmitter
Emitter *agentstream.ChunkEmitter
// StageName 标识当前阶段,如 execute / plan / chat / deliver。
StageName string
// StatusBlockID 是 SSE 状态推送的 block ID各节点使用自己的 block ID。
@@ -201,7 +201,7 @@ func compactUnifiedMsg1(
false,
)
newSummary, err := newagentprompt.CompactMsg1(ctx, input.Client, msg1, existingSummary)
newSummary, err := agentprompt.CompactMsg1(ctx, input.Client, msg1, existingSummary)
if err != nil {
log.Printf("[COMPACT:%s] compact msg1 failed: %v", input.StageName, err)
_ = input.Emitter.EmitStatus(
@@ -244,7 +244,7 @@ func compactUnifiedMsg2(
false,
)
compressed, err := newagentprompt.CompactMsg2(ctx, input.Client, msg2)
compressed, err := agentprompt.CompactMsg2(ctx, input.Client, msg2)
if err != nil {
log.Printf("[COMPACT:%s] compact msg2 failed: %v", input.StageName, err)
_ = input.Emitter.EmitStatus(

View File

@@ -1,11 +1,11 @@
package newagentshared
package agentshared
import (
"context"
"log"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/cloudwego/eino/schema"
)
@@ -17,8 +17,8 @@ import (
// 3. 具体的 Redis / MySQL / 乐观缓存写入由 service 回调统一完成。
func PersistVisibleAssistantMessage(
ctx context.Context,
persist newagentmodel.PersistVisibleMessageFunc,
state *newagentmodel.CommonState,
persist agentmodel.PersistVisibleMessageFunc,
state *agentmodel.CommonState,
msg *schema.Message,
) {
if persist == nil || state == nil || msg == nil {

View File

@@ -1,4 +1,4 @@
package newagentshared
package agentshared
import (
"context"

View File

@@ -1,4 +1,4 @@
package newagentshared
package agentshared
const (
TaskPriorityImportantUrgent = 1

View File

@@ -1,4 +1,4 @@
package newagentshared
package agentshared
import (
"sync"

View File

@@ -1,4 +1,4 @@
package newagentstream
package agentstream
import (
"context"
@@ -52,7 +52,7 @@ func DefaultPseudoStreamOptions() PseudoStreamOptions {
}
}
// ChunkEmitter 是 newAgent 统一的 SSE chunk 发射器。
// ChunkEmitter 是 agent 统一的 SSE chunk 发射器。
//
// 职责边界:
// 1. 负责把"正文 / 思考 / 工具事件 / 确认请求 / 中断提示"统一转换成 OpenAI 兼容 payload

View File

@@ -1,4 +1,4 @@
package newagentstream
package agentstream
import (
"encoding/json"

View File

@@ -1,4 +1,4 @@
package newagentstream
package agentstream
import (
"context"

View File

@@ -1,4 +1,4 @@
package newagentstream
package agentstream
import "log"

View File

@@ -1,4 +1,4 @@
package newagentstream
package agentstream
import "github.com/cloudwego/eino/schema"

View File

@@ -1,4 +1,4 @@
package agentsvc
package sv
import (
"context"
@@ -15,11 +15,11 @@ import (
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
"github.com/LoveLosita/smartflow/backend/model"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
"github.com/LoveLosita/smartflow/backend/pkg"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
@@ -58,12 +58,12 @@ type AgentService struct {
// 未注入时QueryTasksForTool 回退到旧逻辑(纯内存提升,不持久化)。
GetTasksWithUrgencyPromotionFunc func(ctx context.Context, userID int) ([]model.Task, error)
// ── newAgent 依赖(由 cmd/start.go 通过 Set* 方法注入)──
toolRegistry *newagenttools.ToolRegistry
scheduleProvider newagentmodel.ScheduleStateProvider
agentStateStore newagentmodel.AgentStateStore
compactionStore newagentmodel.CompactionStore
quickTaskDeps newagentmodel.QuickTaskDeps
// ── agent 依赖(由 cmd/start.go 通过 Set* 方法注入)──
toolRegistry *agenttools.ToolRegistry
scheduleProvider agentmodel.ScheduleStateProvider
agentStateStore agentmodel.AgentStateStore
compactionStore agentmodel.CompactionStore
quickTaskDeps agentmodel.QuickTaskDeps
memoryReader MemoryReader
memoryCfg memorymodel.Config
memoryObserver memoryobserve.Observer
@@ -121,7 +121,7 @@ func thinkingModeToBool(mode string) bool {
// pickChatModel 根据请求选择模型。
// 当前约定:
// - 旧链路已全面切到 newAgent graph这里仅作为 runNormalChatFlow 回退时的模型选择入口;
// - 旧链路已全面切到 agent graph这里仅作为 runNormalChatFlow 回退时的模型选择入口;
// - 统一返回 Pro 模型,旧 strategist 参数不再生效。
func (s *AgentService) pickChatModel(requestModel string) (*llmservice.Client, string) {
if s == nil || s.llmService == nil {
@@ -343,7 +343,7 @@ func (s *AgentService) runNormalChatFlow(
// 3. 计算本次请求可用的历史 token 预算,并执行历史裁剪。
// 这样可以在上下文增长时稳定控制模型窗口,避免超长上下文引发报错或高延迟。
historyBudget := pkg.HistoryTokenBudgetByModel(resolvedModelName, newagentprompt.SystemPrompt, userMessage)
historyBudget := pkg.HistoryTokenBudgetByModel(resolvedModelName, agentprompt.SystemPrompt, userMessage)
trimmedHistory, totalHistoryTokens, keptHistoryTokens, droppedCount := pkg.TrimHistoryByTokenBudget(chatHistory, historyBudget)
chatHistory = trimmedHistory
@@ -488,7 +488,7 @@ func (s *AgentService) AgentChat(ctx context.Context, userMessage string, thinki
go func() {
defer close(outChan)
s.runNewAgentGraph(ctx, userMessage, thinkingMode, modelName, userID, chatID, extra, traceID, requestStart, outChan, errChan)
s.runAgentGraph(ctx, userMessage, thinkingMode, modelName, userID, chatID, extra, traceID, requestStart, outChan, errChan)
}()
return outChan, errChan

View File

@@ -1,4 +1,4 @@
package agentsvc
package sv
import (
"context"
@@ -8,7 +8,7 @@ import (
"time"
"github.com/LoveLosita/smartflow/backend/model"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
"github.com/cloudwego/eino/schema"
)
@@ -34,7 +34,7 @@ type ActiveScheduleSessionRerunFunc func(
// 3. AssistantText 为空时,调用方可降级为使用卡片摘要。
type ActiveScheduleSessionRerunResult struct {
AssistantText string
BusinessCard *newagentstream.StreamBusinessCardExtra
BusinessCard *agentstream.StreamBusinessCardExtra
SessionState model.ActiveScheduleSessionState
SessionStatus string
PreviewID string
@@ -142,7 +142,7 @@ func (s *AgentService) persistActiveScheduleTriggerPreviewBestEffort(ctx context
// 2. 占管期间先把用户消息写入历史和时间线,保证会话内容不丢失;
// 3. waiting_user_reply 进入 rerunning并同步调用主动调度 rerun
// 4. rerunning 则只提示“正在重跑”,避免同一 conversation 被并发重复推进;
// 5. 终态或非占管态直接放行普通 newAgent。
// 5. 终态或非占管态直接放行普通 agent。
func (s *AgentService) handleActiveScheduleSessionChat(
ctx context.Context,
userMessage string,
@@ -355,8 +355,8 @@ func isActiveScheduleSessionBlockingStatus(status string) bool {
}
}
func emitActiveScheduleAssistantChunk(outChan chan<- string, traceID string, modelName string, requestStart time.Time, text string, extra *newagentstream.OpenAIChunkExtra) {
payload, err := newagentstream.ToOpenAIAssistantChunkWithExtra(traceID, modelName, requestStart.Unix(), strings.TrimSpace(text), true, extra)
func emitActiveScheduleAssistantChunk(outChan chan<- string, traceID string, modelName string, requestStart time.Time, text string, extra *agentstream.OpenAIChunkExtra) {
payload, err := agentstream.ToOpenAIAssistantChunkWithExtra(traceID, modelName, requestStart.Unix(), strings.TrimSpace(text), true, extra)
if err != nil {
log.Printf("构造主动调度 assistant chunk 失败 trace=%s err=%v", traceID, err)
return
@@ -364,11 +364,11 @@ func emitActiveScheduleAssistantChunk(outChan chan<- string, traceID string, mod
pushChunkNonBlocking(outChan, payload)
}
func emitActiveScheduleBusinessCardChunk(outChan chan<- string, blockID string, traceID string, modelName string, requestStart time.Time, card *newagentstream.StreamBusinessCardExtra) {
func emitActiveScheduleBusinessCardChunk(outChan chan<- string, blockID string, traceID string, modelName string, requestStart time.Time, card *agentstream.StreamBusinessCardExtra) {
if card == nil {
return
}
payload, err := newagentstream.ToOpenAIStreamWithExtra(nil, traceID, modelName, requestStart.Unix(), true, newagentstream.NewBusinessCardExtra(blockID, "active_schedule_session", card))
payload, err := agentstream.ToOpenAIStreamWithExtra(nil, traceID, modelName, requestStart.Unix(), true, agentstream.NewBusinessCardExtra(blockID, "active_schedule_session", card))
if err != nil {
log.Printf("构造主动调度 business card chunk 失败 trace=%s err=%v", traceID, err)
return

View File

@@ -1,4 +1,4 @@
package agentsvc
package sv
import (
"context"
@@ -8,29 +8,29 @@ import (
"strings"
"time"
newagentconv "github.com/LoveLosita/smartflow/backend/newAgent/conv"
newagentgraph "github.com/LoveLosita/smartflow/backend/newAgent/graph"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
schedule "github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
agentconv "github.com/LoveLosita/smartflow/backend/services/agent/conv"
agentgraph "github.com/LoveLosita/smartflow/backend/services/agent/graph"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
"github.com/cloudwego/eino/schema"
"github.com/spf13/viper"
"github.com/LoveLosita/smartflow/backend/conv"
"github.com/LoveLosita/smartflow/backend/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
"github.com/LoveLosita/smartflow/backend/pkg"
"github.com/LoveLosita/smartflow/backend/respond"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
)
const (
newAgentHistoryKindKey = "newagent_history_kind"
newAgentHistoryKindLoopClosed = "execute_loop_closed"
agentHistoryKindKey = "newagent_history_kind"
agentHistoryKindLoopClosed = "execute_loop_closed"
)
// runNewAgentGraph 运行 newAgent 通用 graph直接替换旧 agent 路由逻辑。
// runAgentGraph 运行 agent 通用 graph直接替换旧 agent 路由逻辑。
//
// 职责边界:
// 1. 负责构造 AgentGraphRunInputRuntimeState、ConversationContext、Request、Deps
@@ -39,10 +39,10 @@ const (
// 4. 负责持久化聊天历史(复用现有逻辑)。
//
// 设计原则:
// 1. 直接走 newAgent graph不再经过旧的 agentrouter 路由决策;
// 1. 直接走 agent graph不再经过旧的 agentrouter 路由决策;
// 2. 所有任务类型chat、task、quick_note都由 graph 内部 LLM 决策;
// 3. 状态恢复、工具执行、确认流程全部由 graph 节点处理。
func (s *AgentService) runNewAgentGraph(
func (s *AgentService) runAgentGraph(
ctx context.Context,
userMessage string,
thinkingMode string,
@@ -57,7 +57,7 @@ func (s *AgentService) runNewAgentGraph(
) {
requestCtx, _ := withRequestTokenMeter(ctx)
if s == nil || s.llmService == nil {
// 0. newAgent 主链强依赖 llm-service装配漏传时直接返回错误避免 nil receiver panic。
// 0. agent 主链强依赖 llm-service装配漏传时直接返回错误避免 nil receiver panic。
pushErrNonBlocking(errChan, errors.New("agent llm service is not initialized"))
return
}
@@ -91,7 +91,7 @@ func (s *AgentService) runNewAgentGraph(
// 3. retry 机制已下线,不再构建重试元数据。
// 4. 如果当前 conversation 被主动调度 session 占管,先走 session 分支,不进入普通 newAgent。
// 4. 如果当前 conversation 被主动调度 session 占管,先走 session 分支,不进入普通 agent。
// 这样 waiting_user_reply / rerunning 期间,用户消息会先推动主动调度闭环,而不是误进自由聊天。
if handled, sessionErr := s.handleActiveScheduleSessionChat(requestCtx, userMessage, traceID, requestStart, userID, chatID, resolvedModelName, outChan, errChan); sessionErr != nil {
pushErrNonBlocking(errChan, sessionErr)
@@ -108,7 +108,7 @@ func (s *AgentService) runNewAgentGraph(
// 6. 构造 ConversationContext。
// 优先使用快照中恢复的 ConversationContext含工具调用/结果),
// 无快照时从 Redis LLM 历史缓存加载。
var conversationContext *newagentmodel.ConversationContext
var conversationContext *agentmodel.ConversationContext
if savedConversationContext != nil {
conversationContext = savedConversationContext
// 把用户本轮输入追加到恢复的上下文中(与 loadConversationContext 行为一致)。
@@ -155,7 +155,7 @@ func (s *AgentService) runNewAgentGraph(
return
}
persistVisibleMessage := func(persistCtx context.Context, state *newagentmodel.CommonState, msg *schema.Message) error {
persistVisibleMessage := func(persistCtx context.Context, state *agentmodel.CommonState, msg *schema.Message) error {
targetState := state
if targetState == nil {
targetState = runtimeState.EnsureCommonState()
@@ -180,7 +180,7 @@ func (s *AgentService) runNewAgentGraph(
confirmAction = readAgentExtraString(extra, "confirm_action")
resumeInteractionID = readAgentExtraString(extra, "resume_interaction_id")
}
graphRequest := newagentmodel.AgentGraphRequest{
graphRequest := agentmodel.AgentGraphRequest{
UserInput: userMessage,
ConfirmAction: confirmAction,
ResumeInteractionID: resumeInteractionID,
@@ -188,7 +188,7 @@ func (s *AgentService) runNewAgentGraph(
}
graphRequest.Normalize()
// 8. 适配 LLM clients统一从 llm-service 取出 newAgent 图所需模型,不再直接碰 AIHub
// 8. 适配 LLM clients统一从 llm-service 取出 agent 图所需模型,不再直接碰 AIHub
// 8.1 Chat/Deliver 使用 Pro 模型:路由分流、闲聊、交付总结属于标准复杂度。
// 8.2 Plan/Execute 使用 Max 模型:规划和 ReAct 循环需要深度推理能力。
llmClients := s.llmService.NewAgentModelClients()
@@ -199,16 +199,16 @@ func (s *AgentService) runNewAgentGraph(
summaryClient := llmClients.Summary
// 9. 适配 SSE emitter。
sseEmitter := newagentstream.NewSSEPayloadEmitter(outChan)
chunkEmitter := newagentstream.NewChunkEmitter(sseEmitter, traceID, resolvedModelName, requestStart.Unix())
sseEmitter := agentstream.NewSSEPayloadEmitter(outChan)
chunkEmitter := agentstream.NewChunkEmitter(sseEmitter, traceID, resolvedModelName, requestStart.Unix())
chunkEmitter.SetReasoningSummaryFunc(s.makeReasoningSummaryFunc(summaryClient))
// 关键卡片事件走统一时间线持久化,保证刷新后可重建。
chunkEmitter.SetExtraEventHook(func(extra *newagentstream.OpenAIChunkExtra) {
s.persistNewAgentTimelineExtraEvent(context.Background(), userID, chatID, extra)
chunkEmitter.SetExtraEventHook(func(extra *agentstream.OpenAIChunkExtra) {
s.persistAgentTimelineExtraEvent(context.Background(), userID, chatID, extra)
})
// 10. 构造 AgentGraphDeps由 cmd/start.go 注入的依赖)。
deps := newagentmodel.AgentGraphDeps{
deps := agentmodel.AgentGraphDeps{
ChatClient: chatClient,
PlanClient: planClient,
ExecuteClient: executeClient,
@@ -229,7 +229,7 @@ func (s *AgentService) runNewAgentGraph(
}
// 11. 构造 AgentGraphRunInput 并运行 graph。
runInput := newagentmodel.AgentGraphRunInput{
runInput := agentmodel.AgentGraphRunInput{
RuntimeState: runtimeState,
ConversationContext: conversationContext,
ScheduleState: savedScheduleState,
@@ -238,15 +238,15 @@ func (s *AgentService) runNewAgentGraph(
Deps: deps,
}
finalState, graphErr := newagentgraph.RunAgentGraph(requestCtx, runInput)
finalState, graphErr := agentgraph.RunAgentGraph(requestCtx, runInput)
if graphErr != nil {
// 1. 客户端断连导致的 context 取消,属于正常场景,不推错误通道也不跑 fallback。
// 否则会刷 "错误通道已满" 日志噪音,且 fallback 在 ctx 已取消时也会失败。
if errors.Is(graphErr, context.Canceled) || requestCtx.Err() != nil {
log.Printf("[WARN] newAgent graph 因客户端断连中止 trace=%s chat=%s", traceID, chatID)
log.Printf("[WARN] agent graph 因客户端断连中止 trace=%s chat=%s", traceID, chatID)
return
}
log.Printf("[ERROR] newAgent graph 执行失败 trace=%s chat=%s: %v", traceID, chatID, graphErr)
log.Printf("[ERROR] agent graph 执行失败 trace=%s chat=%s: %v", traceID, chatID, graphErr)
pushErrNonBlocking(errChan, fmt.Errorf("graph 执行失败: %w", graphErr))
// Graph 出错时回退普通聊天,保证可用性。回退使用 llm-service 的 Pro 模型。
@@ -256,11 +256,11 @@ func (s *AgentService) runNewAgentGraph(
// 12. 持久化聊天历史(用户消息 + 助手回复)。
requestTotalTokens := snapshotRequestTokenMeter(requestCtx).TotalTokens
s.adjustNewAgentRequestTokenUsage(requestCtx, userID, chatID, requestTotalTokens)
s.adjustAgentRequestTokenUsage(requestCtx, userID, chatID, requestTotalTokens)
// 12.5. 将最终状态快照异步写入 MySQL通过 outbox
// Deliver 节点已将快照保存到 Redis2h TTL此处通过 outbox 异步写入 MySQL 做永久存储。
if finalState != nil {
snapshot := &newagentmodel.AgentStateSnapshot{
snapshot := &agentmodel.AgentStateSnapshot{
RuntimeState: finalState.EnsureRuntimeState(),
ConversationContext: finalState.EnsureConversationContext(),
}
@@ -302,9 +302,9 @@ func (s *AgentService) runNewAgentGraph(
// 这些消息不会出现在 Redis LLM 历史缓存中;
// 2. 恢复场景confirm/ask_user必须使用快照中的 ConversationContext否则工具结果丢失
// 导致后续 LLM 调用收到非法的裸 Tool 消息API 拒绝请求、连接断开。
func (s *AgentService) loadOrCreateRuntimeState(ctx context.Context, chatID string, userID int) (*newagentmodel.AgentRuntimeState, *newagentmodel.ConversationContext, *schedule.ScheduleState, *schedule.ScheduleState) {
newRT := func() (*newagentmodel.AgentRuntimeState, *newagentmodel.ConversationContext, *schedule.ScheduleState, *schedule.ScheduleState) {
rt := newagentmodel.NewAgentRuntimeState(nil)
func (s *AgentService) loadOrCreateRuntimeState(ctx context.Context, chatID string, userID int) (*agentmodel.AgentRuntimeState, *agentmodel.ConversationContext, *schedule.ScheduleState, *schedule.ScheduleState) {
newRT := func() (*agentmodel.AgentRuntimeState, *agentmodel.ConversationContext, *schedule.ScheduleState, *schedule.ScheduleState) {
rt := agentmodel.NewAgentRuntimeState(nil)
cs := rt.EnsureCommonState()
cs.UserID = userID
cs.ConversationID = chatID // saveAgentState 依赖此字段决定是否持久化
@@ -337,13 +337,13 @@ func (s *AgentService) loadOrCreateRuntimeState(ctx context.Context, chatID stri
// 1. 冷加载兜底:若上一轮已经收口且当前没有待恢复交互,说明本次是新一轮请求;
// 2. 这里先重置执行期临时字段,避免旧 round/terminal 状态污染 chat 路由和后续 execute
// 3. 即使 chat 节点也有同条件重置,这里仍保留兜底,覆盖断线恢复或入口绕行场景。
if !snapshot.RuntimeState.HasPendingInteraction() && cs.Phase == newagentmodel.PhaseDone {
if !snapshot.RuntimeState.HasPendingInteraction() && cs.Phase == agentmodel.PhaseDone {
terminalBefore := cs.TerminalStatus()
roundBefore := cs.RoundUsed
// 1. 仅"正常完成(completed)"写 loop 收口 marker
// 1.1 下一轮执行时prompt 会把上一轮 loop 从 msg2 归档到 msg1
// 1.2 异常中断aborted/exhausted不写 marker保留 msg2 便于后续续跑。
if terminalBefore == newagentmodel.FlowTerminalStatusCompleted {
if terminalBefore == agentmodel.FlowTerminalStatusCompleted {
appendExecuteLoopClosedMarker(snapshot.ConversationContext)
}
cs.ResetForNextRun()
@@ -376,7 +376,7 @@ func (s *AgentService) loadOrCreateRuntimeState(ctx context.Context, chatID stri
// 1. 只追加轻量 marker 供 prompt 分层,不做历史摘要或裁剪;
// 2. 若末尾已是同类 marker则幂等跳过
// 3. context 为空时直接返回,避免冷启动异常。
func appendExecuteLoopClosedMarker(conversationContext *newagentmodel.ConversationContext) {
func appendExecuteLoopClosedMarker(conversationContext *agentmodel.ConversationContext) {
if conversationContext == nil {
return
}
@@ -384,7 +384,7 @@ func appendExecuteLoopClosedMarker(conversationContext *newagentmodel.Conversati
if len(history) > 0 {
last := history[len(history)-1]
if last != nil && last.Extra != nil {
if kind, ok := last.Extra[newAgentHistoryKindKey].(string); ok && strings.TrimSpace(kind) == newAgentHistoryKindLoopClosed {
if kind, ok := last.Extra[agentHistoryKindKey].(string); ok && strings.TrimSpace(kind) == agentHistoryKindLoopClosed {
return
}
}
@@ -394,13 +394,13 @@ func appendExecuteLoopClosedMarker(conversationContext *newagentmodel.Conversati
Role: schema.Assistant,
Content: "",
Extra: map[string]any{
newAgentHistoryKindKey: newAgentHistoryKindLoopClosed,
agentHistoryKindKey: agentHistoryKindLoopClosed,
},
})
}
// loadConversationContext 加载对话历史,构造 ConversationContext。
func (s *AgentService) loadConversationContext(ctx context.Context, chatID, userMessage string) *newagentmodel.ConversationContext {
func (s *AgentService) loadConversationContext(ctx context.Context, chatID, userMessage string) *agentmodel.ConversationContext {
// 从 Redis 加载历史。
history, err := s.agentCache.GetHistory(ctx, chatID)
if err != nil {
@@ -423,7 +423,7 @@ func (s *AgentService) loadConversationContext(ctx context.Context, chatID, user
}
// 构造 ConversationContext。
conversationContext := newagentmodel.NewConversationContext(newagentprompt.SystemPrompt)
conversationContext := agentmodel.NewConversationContext(agentprompt.SystemPrompt)
if history != nil {
conversationContext.ReplaceHistory(history)
}
@@ -436,11 +436,11 @@ func (s *AgentService) loadConversationContext(ctx context.Context, chatID, user
return conversationContext
}
// persistNewAgentConversationMessage 负责把 newAgent 链路里"真正对用户可见"的消息统一落到 Redis + MySQL。
// persistNewAgentConversationMessage 负责把 agent 链路里"真正对用户可见"的消息统一落到 Redis + MySQL。
//
// 职责边界:
// 1. 只做单条消息的持久化,不做 graph 流程控制;
// 2. TokensConsumed 由调用方显式传入,newAgent 逐条可见消息默认写 0
// 2. TokensConsumed 由调用方显式传入,agent 逐条可见消息默认写 0
// 3. Redis 失败只记日志DB 失败返回错误,便于调用方决定是否中止当前链路。
func (s *AgentService) persistNewAgentConversationMessage(
ctx context.Context,
@@ -458,7 +458,7 @@ func (s *AgentService) persistNewAgentConversationMessage(
return nil
}
if userID <= 0 || strings.TrimSpace(chatID) == "" {
return fmt.Errorf("newAgent visible message persist: invalid conversation identity")
return fmt.Errorf("agent visible message persist: invalid conversation identity")
}
if ctx == nil {
ctx = context.Background()
@@ -479,7 +479,7 @@ func (s *AgentService) persistNewAgentConversationMessage(
}
if err := s.agentCache.PushMessage(ctx, chatID, persistMsg); err != nil {
log.Printf("写入 newAgent 可见消息到 Redis 失败 chat=%s role=%s: %v", chatID, role, err)
log.Printf("写入 agent 可见消息到 Redis 失败 chat=%s role=%s: %v", chatID, role, err)
}
reasoningDurationSeconds := 0
@@ -535,7 +535,7 @@ func (s *AgentService) persistNewAgentConversationMessage(
}
// makeRoughBuildFunc 把 AgentService 上的 HybridScheduleWithPlanMultiFunc 封装成
// newAgent 层的 RoughBuildFunc将 HybridScheduleWithPlanMultiFunc 的结果转换为 RoughBuildPlacement。
// agent 层的 RoughBuildFunc将 HybridScheduleWithPlanMultiFunc 的结果转换为 RoughBuildPlacement。
// HybridScheduleWithPlanMultiFunc 未注入时返回 nilRoughBuild 节点会静默跳过粗排。
//
// 修复说明:
@@ -543,13 +543,13 @@ func (s *AgentService) persistNewAgentConversationMessage(
// placement普通时段放置的任务全部被丢弃。
// 正确做法:使用第一个返回值 []HybridScheduleEntry过滤 Status="suggested" 且 TaskItemID>0 的条目,
// 这样嵌入和非嵌入的粗排结果都能正确写入 ScheduleState。
// adjustNewAgentRequestTokenUsage 负责把本轮 graph 的请求级 token 一次性回写到账本。
// adjustAgentRequestTokenUsage 负责把本轮 graph 的请求级 token 一次性回写到账本。
//
// 说明:
// 1. newAgent 逐条可见消息都按 0 token 落库,最终统一在这里补记整轮消耗;
// 1. agent 逐条可见消息都按 0 token 落库,最终统一在这里补记整轮消耗;
// 2. 如果启用了 outbox就沿用异步 token 调整事件,保持写账口径一致;
// 3. 该步骤属于请求收尾,不应反过来打断用户已看到的回复。
func (s *AgentService) adjustNewAgentRequestTokenUsage(ctx context.Context, userID int, chatID string, deltaTokens int) {
func (s *AgentService) adjustAgentRequestTokenUsage(ctx context.Context, userID int, chatID string, deltaTokens int) {
if s == nil || userID <= 0 || strings.TrimSpace(chatID) == "" || deltaTokens <= 0 {
return
}
@@ -565,31 +565,31 @@ func (s *AgentService) adjustNewAgentRequestTokenUsage(ctx context.Context, user
Reason: "new_agent_request",
TriggeredAt: time.Now(),
}); err != nil {
log.Printf("写入 newAgent 请求级 token 调整事件失败 chat=%s tokens=%d err=%v", chatID, deltaTokens, err)
log.Printf("写入 agent 请求级 token 调整事件失败 chat=%s tokens=%d err=%v", chatID, deltaTokens, err)
}
return
}
if err := s.repo.AdjustTokenUsage(ctx, userID, chatID, deltaTokens, ""); err != nil {
log.Printf("同步写入 newAgent 请求级 token 调整失败 chat=%s tokens=%d err=%v", chatID, deltaTokens, err)
log.Printf("同步写入 agent 请求级 token 调整失败 chat=%s tokens=%d err=%v", chatID, deltaTokens, err)
}
}
func (s *AgentService) makeRoughBuildFunc() newagentmodel.RoughBuildFunc {
func (s *AgentService) makeRoughBuildFunc() agentmodel.RoughBuildFunc {
if s.HybridScheduleWithPlanMultiFunc == nil {
return nil
}
return func(ctx context.Context, userID int, taskClassIDs []int) ([]newagentmodel.RoughBuildPlacement, error) {
return func(ctx context.Context, userID int, taskClassIDs []int) ([]agentmodel.RoughBuildPlacement, error) {
entries, _, err := s.HybridScheduleWithPlanMultiFunc(ctx, userID, taskClassIDs)
if err != nil {
return nil, err
}
placements := make([]newagentmodel.RoughBuildPlacement, 0, len(entries))
placements := make([]agentmodel.RoughBuildPlacement, 0, len(entries))
for _, entry := range entries {
if entry.Status != "suggested" || entry.TaskItemID == 0 {
continue
}
placements = append(placements, newagentmodel.RoughBuildPlacement{
placements = append(placements, agentmodel.RoughBuildPlacement{
TaskItemID: entry.TaskItemID,
Week: entry.Week,
DayOfWeek: entry.DayOfWeek,
@@ -602,13 +602,13 @@ func (s *AgentService) makeRoughBuildFunc() newagentmodel.RoughBuildFunc {
}
// makeWriteSchedulePreviewFunc 封装 cacheDAO 写排程预览缓存的操作,供 Execute/Deliver 节点复用。
func (s *AgentService) makeWriteSchedulePreviewFunc() newagentmodel.WriteSchedulePreviewFunc {
func (s *AgentService) makeWriteSchedulePreviewFunc() agentmodel.WriteSchedulePreviewFunc {
if s.cacheDAO == nil {
return nil
}
return func(ctx context.Context, state *schedule.ScheduleState, userID int, conversationID string, taskClassIDs []int) error {
stateDigest := summarizeScheduleStateForPreviewDebug(state)
preview := newagentconv.ScheduleStateToPreview(state, userID, conversationID, taskClassIDs, "")
preview := agentconv.ScheduleStateToPreview(state, userID, conversationID, taskClassIDs, "")
if preview == nil {
log.Printf("[WARN] schedule preview skipped chat=%s user=%d state=%s", conversationID, userID, stateDigest)
return nil
@@ -702,26 +702,26 @@ func summarizeHybridEntriesForPreviewDebug(entries []model.HybridScheduleEntry)
// --- 依赖注入字段 ---
// toolRegistry 由 cmd/start.go 注入
func (s *AgentService) SetToolRegistry(registry *newagenttools.ToolRegistry) {
func (s *AgentService) SetToolRegistry(registry *agenttools.ToolRegistry) {
s.toolRegistry = registry
}
// scheduleProvider 由 cmd/start.go 注入
func (s *AgentService) SetScheduleProvider(provider newagentmodel.ScheduleStateProvider) {
func (s *AgentService) SetScheduleProvider(provider agentmodel.ScheduleStateProvider) {
s.scheduleProvider = provider
}
// agentStateStore 由 cmd/start.go 注入
func (s *AgentService) SetAgentStateStore(store newagentmodel.AgentStateStore) {
func (s *AgentService) SetAgentStateStore(store agentmodel.AgentStateStore) {
s.agentStateStore = store
}
// compactionStore 由 cmd/start.go 注入
func (s *AgentService) SetCompactionStore(store newagentmodel.CompactionStore) {
func (s *AgentService) SetCompactionStore(store agentmodel.CompactionStore) {
s.compactionStore = store
}
// quickTaskDeps 由 cmd/start.go 注入
func (s *AgentService) SetQuickTaskDeps(deps newagentmodel.QuickTaskDeps) {
func (s *AgentService) SetQuickTaskDeps(deps agentmodel.QuickTaskDeps) {
s.quickTaskDeps = deps
}

View File

@@ -1,4 +1,4 @@
package agentsvc
package sv
import (
"context"
@@ -8,15 +8,15 @@ import (
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
const (
newAgentMemoryRetrieveLimit = 10
newAgentMemoryIntroLine = "以下是与当前对话相关的用户记忆,仅在自然且确实有帮助时参考,不要生硬复述。"
agentMemoryRetrieveLimit = 10
agentMemoryIntroLine = "以下是与当前对话相关的用户记忆,仅在自然且确实有帮助时参考,不要生硬复述。"
)
// MemoryReader 描述 newAgent 主链路读取记忆所需的最小能力。
// MemoryReader 描述 agent 主链路读取记忆所需的最小能力。
//
// 职责边界:
// 1. 只负责"按当前输入取回候选记忆"
@@ -31,7 +31,7 @@ type memoryObserveProvider interface {
MemoryMetrics() memoryobserve.MetricsRecorder
}
// SetMemoryReader 注入 newAgent 主链路读取记忆所需的薄接口与渲染配置。
// SetMemoryReader 注入 agent 主链路读取记忆所需的薄接口与渲染配置。
func (s *AgentService) SetMemoryReader(reader MemoryReader, cfg memorymodel.Config) {
s.memoryReader = reader
s.memoryCfg = cfg
@@ -51,7 +51,7 @@ func (s *AgentService) SetMemoryReader(reader MemoryReader, cfg memorymodel.Conf
// 3. Chat 节点直接用缓存记忆启动首字节零延迟Execute/Plan 通过 channel 消费最新结果。
func (s *AgentService) injectMemoryContext(
ctx context.Context,
conversationContext *newagentmodel.ConversationContext,
conversationContext *agentmodel.ConversationContext,
userID int,
chatID string,
userMessage string,
@@ -64,7 +64,7 @@ func (s *AgentService) injectMemoryContext(
// 1. 门控检查:无 reader 或无效用户时清掉旧 block 并返回空 channel。
if s.memoryReader == nil || userID <= 0 {
conversationContext.RemovePinnedBlock(newagentmodel.MemoryContextBlockKey)
conversationContext.RemovePinnedBlock(agentmodel.MemoryContextBlockKey)
return memoryFuture
}
@@ -73,9 +73,9 @@ func (s *AgentService) injectMemoryContext(
if len(cachedItems) > 0 {
content := renderMemoryPinnedContentByMode(cachedItems, s.memoryCfg.EffectiveInjectRenderMode())
if content != "" {
conversationContext.UpsertPinnedBlock(newagentmodel.ContextBlock{
Key: newagentmodel.MemoryContextBlockKey,
Title: newagentmodel.MemoryContextBlockTitle,
conversationContext.UpsertPinnedBlock(agentmodel.ContextBlock{
Key: agentmodel.MemoryContextBlockKey,
Title: agentmodel.MemoryContextBlockTitle,
Content: content,
})
s.recordMemoryInject(ctx, userID, len(cachedItems), true, nil, "prefetch_cache")
@@ -110,7 +110,7 @@ func (s *AgentService) prefetchMemoryForNextTurn(userID int, chatID, userMessage
Query: strings.TrimSpace(userMessage),
UserID: userID,
ConversationID: strings.TrimSpace(chatID),
Limit: newAgentMemoryRetrieveLimit,
Limit: agentMemoryRetrieveLimit,
Now: time.Now(),
})
if err != nil {

View File

@@ -1,4 +1,4 @@
package agentsvc
package sv
import (
"fmt"
@@ -24,7 +24,7 @@ func RenderFlatMemoryContent(items []memorymodel.ItemDTO) string {
}
var sb strings.Builder
sb.WriteString(newAgentMemoryIntroLine)
sb.WriteString(agentMemoryIntroLine)
seen := make(map[string]struct{}, len(items))
written := 0
@@ -110,7 +110,7 @@ func RenderTypedMemoryContent(items []memorymodel.ItemDTO) string {
}
var sb strings.Builder
sb.WriteString(newAgentMemoryIntroLine)
sb.WriteString(agentMemoryIntroLine)
for _, section := range sections {
sb.WriteString("\n\n【")
sb.WriteString(section.Title)

View File

@@ -1,4 +1,4 @@
package agentsvc
package sv
import (
"context"

View File

@@ -1,4 +1,4 @@
package agentsvc
package sv
import (
"context"
@@ -8,8 +8,8 @@ import (
"time"
"github.com/LoveLosita/smartflow/backend/model"
newagentshared "github.com/LoveLosita/smartflow/backend/newAgent/shared"
"github.com/LoveLosita/smartflow/backend/respond"
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
)
// GetSchedulePlanPreview 按 conversation_id 读取结构化排程预览。
@@ -38,7 +38,7 @@ func (s *AgentService) GetSchedulePlanPreview(ctx context.Context, userID int, c
if preview.UserID > 0 && preview.UserID != userID {
return nil, respond.SchedulePlanPreviewNotFound
}
plans := newagentshared.CloneWeekSchedules(preview.CandidatePlans)
plans := agentshared.CloneWeekSchedules(preview.CandidatePlans)
if plans == nil {
plans = make([]model.UserWeekSchedule, 0)
}
@@ -47,7 +47,7 @@ func (s *AgentService) GetSchedulePlanPreview(ctx context.Context, userID int, c
TraceID: strings.TrimSpace(preview.TraceID),
Summary: strings.TrimSpace(preview.Summary),
CandidatePlans: plans,
HybridEntries: newagentshared.CloneHybridEntries(preview.HybridEntries),
HybridEntries: agentshared.CloneHybridEntries(preview.HybridEntries),
TaskClassIDs: preview.TaskClassIDs,
GeneratedAt: preview.GeneratedAt,
}, nil
@@ -89,10 +89,10 @@ func snapshotToSchedulePlanPreviewCache(snapshot *model.SchedulePlanStateSnapsho
ConversationID: snapshot.ConversationID,
TraceID: strings.TrimSpace(snapshot.TraceID),
Summary: schedulePlanSummaryOrFallback(strings.TrimSpace(snapshot.FinalSummary)),
CandidatePlans: newagentshared.CloneWeekSchedules(snapshot.CandidatePlans),
CandidatePlans: agentshared.CloneWeekSchedules(snapshot.CandidatePlans),
TaskClassIDs: append([]int(nil), snapshot.TaskClassIDs...),
HybridEntries: newagentshared.CloneHybridEntries(snapshot.HybridEntries),
AllocatedItems: newagentshared.CloneTaskClassItems(snapshot.AllocatedItems),
HybridEntries: agentshared.CloneHybridEntries(snapshot.HybridEntries),
AllocatedItems: agentshared.CloneTaskClassItems(snapshot.AllocatedItems),
GeneratedAt: generatedAt,
}
}
@@ -102,7 +102,7 @@ func snapshotToSchedulePlanPreviewResponse(snapshot *model.SchedulePlanStateSnap
if snapshot == nil {
return nil
}
plans := newagentshared.CloneWeekSchedules(snapshot.CandidatePlans)
plans := agentshared.CloneWeekSchedules(snapshot.CandidatePlans)
if plans == nil {
plans = make([]model.UserWeekSchedule, 0)
}
@@ -115,7 +115,7 @@ func snapshotToSchedulePlanPreviewResponse(snapshot *model.SchedulePlanStateSnap
TraceID: strings.TrimSpace(snapshot.TraceID),
Summary: schedulePlanSummaryOrFallback(strings.TrimSpace(snapshot.FinalSummary)),
CandidatePlans: plans,
HybridEntries: newagentshared.CloneHybridEntries(snapshot.HybridEntries),
HybridEntries: agentshared.CloneHybridEntries(snapshot.HybridEntries),
TaskClassIDs: snapshot.TaskClassIDs,
GeneratedAt: generatedAt,
}

View File

@@ -1,4 +1,4 @@
package agentsvc
package sv
import (
"context"
@@ -8,10 +8,10 @@ import (
"strings"
"github.com/LoveLosita/smartflow/backend/model"
newagentconv "github.com/LoveLosita/smartflow/backend/newAgent/conv"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentshared "github.com/LoveLosita/smartflow/backend/newAgent/shared"
"github.com/LoveLosita/smartflow/backend/respond"
agentconv "github.com/LoveLosita/smartflow/backend/services/agent/conv"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
)
// SaveScheduleState 处理前端拖拽后的“暂存排程状态”请求。
@@ -50,7 +50,7 @@ func (s *AgentService) SaveScheduleState(
// 3.1 这里只修改 source=task_item 任务;
// 3.2 source=event 课程位保持不变;
// 3.3 坐标非法时由 ApplyPlacedItems 返回明确错误。
if err := newagentconv.ApplyPlacedItems(snapshot.ScheduleState, items); err != nil {
if err := agentconv.ApplyPlacedItems(snapshot.ScheduleState, items); err != nil {
return err
}
@@ -78,7 +78,7 @@ func (s *AgentService) refreshSchedulePreviewAfterStateSave(
ctx context.Context,
userID int,
conversationID string,
snapshot *newagentmodel.AgentStateSnapshot,
snapshot *agentmodel.AgentStateSnapshot,
) error {
// 1. 依赖不完整时直接跳过,避免写入不完整缓存。
if s == nil || s.cacheDAO == nil || snapshot == nil || snapshot.ScheduleState == nil {
@@ -97,7 +97,7 @@ func (s *AgentService) refreshSchedulePreviewAfterStateSave(
}
// 3. 基于最新 ScheduleState 生成预览主干hybrid_entries 为最新真值)。
preview := newagentconv.ScheduleStateToPreview(
preview := agentconv.ScheduleStateToPreview(
snapshot.ScheduleState,
userID,
normalizedConversationID,
@@ -116,10 +116,10 @@ func (s *AgentService) refreshSchedulePreviewAfterStateSave(
if existingPreview != nil {
preview.TraceID = strings.TrimSpace(existingPreview.TraceID)
if len(existingPreview.CandidatePlans) > 0 {
preview.CandidatePlans = newagentshared.CloneWeekSchedules(existingPreview.CandidatePlans)
preview.CandidatePlans = agentshared.CloneWeekSchedules(existingPreview.CandidatePlans)
}
if len(existingPreview.AllocatedItems) > 0 {
preview.AllocatedItems = newagentshared.CloneTaskClassItems(existingPreview.AllocatedItems)
preview.AllocatedItems = agentshared.CloneTaskClassItems(existingPreview.AllocatedItems)
}
if len(preview.TaskClassIDs) == 0 && len(existingPreview.TaskClassIDs) > 0 {
preview.TaskClassIDs = append([]int(nil), existingPreview.TaskClassIDs...)

View File

@@ -1,4 +1,4 @@
package agentsvc
package sv
import (
"context"
@@ -6,8 +6,8 @@ import (
"strings"
"time"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
@@ -28,7 +28,7 @@ func (s *AgentService) streamChatFallback(
chatID string,
) (string, string, int, *schema.TokenUsage, error) {
messages := make([]*schema.Message, 0, len(chatHistory)+2)
messages = append(messages, schema.SystemMessage(newagentprompt.SystemPrompt))
messages = append(messages, schema.SystemMessage(agentprompt.SystemPrompt))
if len(chatHistory) > 0 {
messages = append(messages, chatHistory...)
}
@@ -40,14 +40,14 @@ func (s *AgentService) streamChatFallback(
requestID := "chatcmpl-" + uuid.NewString()
created := time.Now().Unix()
firstChunk := true
chunkEmitter := newagentstream.NewChunkEmitter(newagentstream.NewSSEPayloadEmitter(outChan), requestID, modelName, created)
chunkEmitter := agentstream.NewChunkEmitter(agentstream.NewSSEPayloadEmitter(outChan), requestID, modelName, created)
reasoningSummaryClient := s.llmService.LiteClient()
if reasoningSummaryClient == nil {
reasoningSummaryClient = s.llmService.ProClient()
}
chunkEmitter.SetReasoningSummaryFunc(s.makeReasoningSummaryFunc(reasoningSummaryClient))
chunkEmitter.SetExtraEventHook(func(extra *newagentstream.OpenAIChunkExtra) {
s.persistNewAgentTimelineExtraEvent(context.Background(), userID, chatID, extra)
chunkEmitter.SetExtraEventHook(func(extra *agentstream.OpenAIChunkExtra) {
s.persistAgentTimelineExtraEvent(context.Background(), userID, chatID, extra)
})
reasoningDigestor, digestorErr := chunkEmitter.NewReasoningDigestor(ctx, "fallback.speak", "fallback")
if digestorErr != nil {
@@ -95,7 +95,7 @@ func (s *AgentService) streamChatFallback(
}
if chunk != nil && chunk.ResponseMeta != nil && chunk.ResponseMeta.Usage != nil {
tokenUsage = newagentstream.MergeUsage(tokenUsage, chunk.ResponseMeta.Usage)
tokenUsage = agentstream.MergeUsage(tokenUsage, chunk.ResponseMeta.Usage)
}
if chunk != nil {

Some files were not shown because too many files have changed in this diff Show More