Version: 0.9.76.dev.260505
后端: 1.阶段 6 agent / memory 服务化收口 - 新增 cmd/agent 独立进程入口,承载 agent zrpc server、agent outbox relay / consumer 和运行时依赖初始化 - 补齐 services/agent/rpc 的 Chat stream 与 conversation meta/list/timeline、schedule-preview、context-stats、schedule-state unary RPC - 新增 gateway/client/agent 与 shared/contracts/agent,将 /api/v1/agent chat 和非 chat 门面切到 agent zrpc - 收缩 gateway 本地 AgentService 装配,双 RPC 开关开启时不再初始化本地 agent 编排、LLM、RAG 和 memory reader fallback - 将 backend/memory 物理迁入 services/memory,私有实现收入 internal,保留 module/model/observe 作为 memory 服务门面 - 调整 memory outbox、memory reader 和 agent 记忆渲染链路的 import 与服务边界,cmd/memory 独占 memory worker / consumer - 关闭 gateway 侧 agent outbox worker 所有权,agent relay / consumer 由 cmd/agent 独占,gateway 仅保留 HTTP/SSE 门面与迁移期开关回退 - 更新阶段 6 文档,记录 agent / memory 当前切流点、smoke 结果,以及 backend/client 与 gateway/shared 的目录收口口径
This commit is contained in:
269
backend/cmd/agent/active_schedule_rerun.go
Normal file
269
backend/cmd/agent/active_schedule_rerun.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
activesel "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
|
||||
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
|
||||
activeTrigger "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
)
|
||||
|
||||
func buildActiveSchedulePreviewConfirmService(activeDAO *rootdao.ActiveScheduleDAO, dryRun *activesvc.DryRunService, scheduleApplyAdapter interface {
|
||||
ApplyActiveScheduleChanges(context.Context, activeapplyadapter.ApplyActiveScheduleRequest) (activeapplyadapter.ApplyActiveScheduleResult, error)
|
||||
}) (*activesvc.PreviewConfirmService, error) {
|
||||
previewService, err := activepreview.NewService(activeDAO)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, scheduleApplyAdapter)
|
||||
}
|
||||
|
||||
// buildActiveScheduleSessionRerunFunc 把主动调度定位器 / graph / preview 能力装成聊天入口可调用的 rerun 闭包。
|
||||
//
|
||||
// 说明:
|
||||
// 1. 这里只做最小接线:复用现有定位器 -> trigger -> graph -> preview 组件,不把 worker/notification 再搬一遍;
|
||||
// 2. 成功时返回 session 状态、assistant 文本和业务卡片数据;
|
||||
// 3. 失败时直接把 error 交回聊天入口,由上层统一写失败日志和 SSE 错误。
|
||||
func buildActiveScheduleSessionRerunFunc(
|
||||
activeDAO *rootdao.ActiveScheduleDAO,
|
||||
graphRunner *activegraph.Runner,
|
||||
previewConfirm *activesvc.PreviewConfirmService,
|
||||
feedbackLocator *activefeedbacklocate.Service,
|
||||
) agentsv.ActiveScheduleSessionRerunFunc {
|
||||
return func(
|
||||
ctx context.Context,
|
||||
session *model.ActiveScheduleSessionSnapshot,
|
||||
userMessage string,
|
||||
traceID string,
|
||||
requestStart time.Time,
|
||||
) (*agentsv.ActiveScheduleSessionRerunResult, error) {
|
||||
if activeDAO == nil || graphRunner == nil || previewConfirm == nil {
|
||||
return nil, fmt.Errorf("主动调度 rerun 依赖未初始化")
|
||||
}
|
||||
if session == nil {
|
||||
return nil, fmt.Errorf("主动调度 session 不能为空")
|
||||
}
|
||||
|
||||
triggerRow, err := activeDAO.GetTriggerByID(ctx, session.TriggerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolvedTargetType := activeTrigger.TargetType(triggerRow.TargetType)
|
||||
resolvedTargetID := triggerRow.TargetID
|
||||
needsFeedbackLocate := activeTrigger.TriggerType(triggerRow.TriggerType) == activeTrigger.TriggerTypeUnfinishedFeedback &&
|
||||
(resolvedTargetID <= 0 || containsString(session.State.MissingInfo, "feedback_target"))
|
||||
|
||||
if needsFeedbackLocate {
|
||||
if feedbackLocator == nil {
|
||||
question := firstNonEmptyString(
|
||||
activefeedbacklocate.BuildAskUserQuestion(session.State.MissingInfo),
|
||||
session.State.PendingQuestion,
|
||||
)
|
||||
nextState := session.State
|
||||
nextState.PendingQuestion = question
|
||||
nextState.MissingInfo = appendMissingString(nextState.MissingInfo, "feedback_target")
|
||||
nextState.LastCandidateID = ""
|
||||
nextState.LastNotificationID = ""
|
||||
nextState.FailedReason = ""
|
||||
nextState.ExpiresAt = nil
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: question,
|
||||
SessionState: nextState,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
|
||||
}, nil
|
||||
}
|
||||
locateResult, locateErr := feedbackLocator.Resolve(ctx, activefeedbacklocate.Request{
|
||||
UserID: triggerRow.UserID,
|
||||
UserMessage: userMessage,
|
||||
PendingQuestion: session.State.PendingQuestion,
|
||||
MissingInfo: cloneStringSlice(session.State.MissingInfo),
|
||||
})
|
||||
if locateErr != nil {
|
||||
return nil, locateErr
|
||||
}
|
||||
if locateResult.ShouldAskUser() {
|
||||
question := firstNonEmptyString(
|
||||
locateResult.AskUserQuestion,
|
||||
activefeedbacklocate.BuildAskUserQuestion(session.State.MissingInfo),
|
||||
session.State.PendingQuestion,
|
||||
)
|
||||
nextState := session.State
|
||||
nextState.PendingQuestion = question
|
||||
nextState.MissingInfo = appendMissingString(nextState.MissingInfo, "feedback_target")
|
||||
nextState.LastCandidateID = ""
|
||||
nextState.LastNotificationID = ""
|
||||
nextState.FailedReason = ""
|
||||
nextState.ExpiresAt = nil
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: question,
|
||||
SessionState: nextState,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
|
||||
}, nil
|
||||
}
|
||||
resolvedTargetType = activeTrigger.TargetType(locateResult.TargetType)
|
||||
resolvedTargetID = locateResult.TargetID
|
||||
}
|
||||
|
||||
domainTrigger := activeTrigger.ActiveScheduleTrigger{
|
||||
TriggerID: triggerRow.ID,
|
||||
UserID: triggerRow.UserID,
|
||||
TriggerType: activeTrigger.TriggerType(triggerRow.TriggerType),
|
||||
Source: activeTrigger.SourceUserFeedback,
|
||||
TargetType: resolvedTargetType,
|
||||
TargetID: resolvedTargetID,
|
||||
FeedbackID: triggerRow.FeedbackID,
|
||||
IdempotencyKey: triggerRow.IdempotencyKey,
|
||||
MockNow: nil,
|
||||
IsMockTime: false,
|
||||
RequestedAt: requestStart,
|
||||
TraceID: traceID,
|
||||
}
|
||||
if err := domainTrigger.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
graphResult, err := graphRunner.Run(ctx, domainTrigger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if graphResult == nil || graphResult.DryRunData == nil || graphResult.DryRunData.Context == nil {
|
||||
return nil, fmt.Errorf("主动调度 graph 返回空结果")
|
||||
}
|
||||
|
||||
selectionResult := graphResult.SelectionResult
|
||||
state := session.State
|
||||
state.LastCandidateID = strings.TrimSpace(selectionResult.SelectedCandidateID)
|
||||
state.LastNotificationID = ""
|
||||
state.FailedReason = ""
|
||||
state.MissingInfo = cloneStringSlice(graphResult.DryRunData.Context.DerivedFacts.MissingInfo)
|
||||
|
||||
switch selectionResult.Action {
|
||||
case activesel.ActionSelectCandidate:
|
||||
if !graphResult.DryRunData.Observation.Decision.ShouldWritePreview {
|
||||
return nil, fmt.Errorf("主动调度 graph 选择了候选,但未产出可写 preview")
|
||||
}
|
||||
previewResp, err := previewConfirm.CreatePreviewFromDryRun(ctx, activepreview.CreatePreviewRequest{
|
||||
ActiveContext: graphResult.DryRunData.Context,
|
||||
Observation: graphResult.DryRunData.Observation,
|
||||
Candidates: graphResult.DryRunData.Candidates,
|
||||
TriggerID: triggerRow.ID,
|
||||
GeneratedAt: requestStart,
|
||||
SelectedCandidateID: selectionResult.SelectedCandidateID,
|
||||
ExplanationText: selectionResult.ExplanationText,
|
||||
NotificationSummary: selectionResult.NotificationSummary,
|
||||
FallbackUsed: selectionResult.FallbackUsed,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.PendingQuestion = ""
|
||||
state.MissingInfo = nil
|
||||
state.FailedReason = ""
|
||||
expiresAt := previewResp.Detail.ExpiresAt
|
||||
state.ExpiresAt = &expiresAt
|
||||
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: firstNonEmptyString(selectionResult.ExplanationText, selectionResult.NotificationSummary, previewResp.Detail.Explanation, previewResp.Detail.Notification, "主动调度建议已更新。"),
|
||||
BusinessCard: &agentstream.StreamBusinessCardExtra{
|
||||
CardType: "active_schedule_preview",
|
||||
Title: "SmartFlow 日程调整建议",
|
||||
Summary: firstNonEmptyString(selectionResult.NotificationSummary, previewResp.Detail.Notification, previewResp.Detail.Explanation),
|
||||
Data: previewDetailToMap(previewResp.Detail),
|
||||
},
|
||||
SessionState: state,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusReadyPreview,
|
||||
PreviewID: previewResp.Detail.PreviewID,
|
||||
}, nil
|
||||
|
||||
case activesel.ActionAskUser:
|
||||
question := firstNonEmptyString(selectionResult.AskUserQuestion, selectionResult.ExplanationText, "请继续补充主动调度需要的信息。")
|
||||
state.PendingQuestion = question
|
||||
state.ExpiresAt = nil
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: question,
|
||||
SessionState: state,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
|
||||
}, nil
|
||||
|
||||
default:
|
||||
assistantText := firstNonEmptyString(selectionResult.ExplanationText, selectionResult.NotificationSummary, "当前主动调度暂时没有需要继续处理的内容。")
|
||||
state.PendingQuestion = ""
|
||||
state.MissingInfo = nil
|
||||
state.ExpiresAt = nil
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: assistantText,
|
||||
SessionState: state,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusIgnored,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func previewDetailToMap(detail activepreview.ActiveSchedulePreviewDetail) map[string]any {
|
||||
raw, err := json.Marshal(detail)
|
||||
if err != nil {
|
||||
return map[string]any{}
|
||||
}
|
||||
var output map[string]any
|
||||
if err := json.Unmarshal(raw, &output); err != nil {
|
||||
return map[string]any{}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func firstNonEmptyString(values ...string) string {
|
||||
for _, value := range values {
|
||||
if trimmed := strings.TrimSpace(value); trimmed != "" {
|
||||
return trimmed
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func cloneStringSlice(values []string) []string {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
copied := make([]string, len(values))
|
||||
copy(copied, values)
|
||||
return copied
|
||||
}
|
||||
|
||||
func appendMissingString(values []string, next string) []string {
|
||||
trimmed := strings.TrimSpace(next)
|
||||
if trimmed == "" {
|
||||
return cloneStringSlice(values)
|
||||
}
|
||||
for _, value := range values {
|
||||
if strings.TrimSpace(value) == trimmed {
|
||||
return cloneStringSlice(values)
|
||||
}
|
||||
}
|
||||
result := cloneStringSlice(values)
|
||||
return append(result, trimmed)
|
||||
}
|
||||
|
||||
func containsString(values []string, target string) bool {
|
||||
trimmed := strings.TrimSpace(target)
|
||||
if trimmed == "" {
|
||||
return false
|
||||
}
|
||||
for _, value := range values {
|
||||
if strings.TrimSpace(value) == trimmed {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
50
backend/cmd/agent/main.go
Normal file
50
backend/cmd/agent/main.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
agentrpc "github.com/LoveLosita/smartflow/backend/services/agent/rpc"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := bootstrap.LoadConfig(); err != nil {
|
||||
log.Fatalf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
runtime, err := buildAgentRuntime(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize agent runtime: %v", err)
|
||||
}
|
||||
defer runtime.close()
|
||||
|
||||
if err := runtime.startWorkers(ctx); err != nil {
|
||||
log.Fatalf("failed to start agent workers: %v", err)
|
||||
}
|
||||
|
||||
server, listenOn, err := agentrpc.NewServer(agentrpc.ServerOptions{
|
||||
ListenOn: viper.GetString("agent.rpc.listenOn"),
|
||||
Timeout: viper.GetDuration("agent.rpc.timeout"),
|
||||
Service: runtime.service,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build agent zrpc server: %v", err)
|
||||
}
|
||||
defer server.Stop()
|
||||
|
||||
go func() {
|
||||
log.Printf("agent zrpc service starting on %s", listenOn)
|
||||
server.Start()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
log.Println("agent service stopping")
|
||||
}
|
||||
527
backend/cmd/agent/runtime.go
Normal file
527
backend/cmd/agent/runtime.go
Normal file
@@ -0,0 +1,527 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
gatewaymemory "github.com/LoveLosita/smartflow/backend/gateway/client/memory"
|
||||
gatewayschedule "github.com/LoveLosita/smartflow/backend/gateway/client/schedule"
|
||||
gatewaytask "github.com/LoveLosita/smartflow/backend/gateway/client/task"
|
||||
gatewaytaskclass "github.com/LoveLosita/smartflow/backend/gateway/client/taskclass"
|
||||
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
rootsvc "github.com/LoveLosita/smartflow/backend/service"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
activesel "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
|
||||
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
|
||||
"github.com/LoveLosita/smartflow/backend/services/agent/tools/web"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
|
||||
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/spf13/viper"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type agentRuntime struct {
|
||||
redisClient *redis.Client
|
||||
eventBus eventsvc.OutboxBus
|
||||
outboxRepo *outboxinfra.Repository
|
||||
repoManager *rootdao.RepoManager
|
||||
agentRepo *rootdao.AgentDAO
|
||||
cacheRepo *rootdao.CacheDAO
|
||||
userAuthClient *gatewayuserauth.Client
|
||||
service *agentsv.AgentService
|
||||
workersStarted bool
|
||||
}
|
||||
|
||||
func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
db, err := openAgentDBFromConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("connect agent database failed: %w", err)
|
||||
}
|
||||
|
||||
redisClient, err := inits.OpenRedisFromConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("connect agent redis failed: %w", err)
|
||||
}
|
||||
fail := func(cause error) (*agentRuntime, error) {
|
||||
_ = redisClient.Close()
|
||||
return nil, cause
|
||||
}
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err = db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
return fail(fmt.Errorf("initialize agent cache deleter failed: %w", err))
|
||||
}
|
||||
|
||||
// 说明:
|
||||
// 1. 本轮先在 cmd/agent 内平移一份启动装配,不直接改 cmd/start.go 的旧 gateway 本地链路。
|
||||
// 2. 这样可以把独立进程入口先稳定下来,同时避免和主代理并行接的 rpc/pb 改动发生交叉覆盖。
|
||||
// 3. 等阶段 6 的 agent/memory 启动边界都收稳后,再统一评估是否把 LLM/RAG/bootstrap 抽公共层。
|
||||
llmService, err := buildAgentLLMService()
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize agent llm service failed: %w", err))
|
||||
}
|
||||
ragService, err := buildAgentRAGService(ctx)
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
ragRuntime := ragService.Runtime()
|
||||
|
||||
memoryCfg := memorymodule.LoadConfigFromViper()
|
||||
memoryObserver := memoryobserve.NewLoggerObserver(log.Default())
|
||||
memoryMetrics := memoryobserve.NewMetricsRegistry()
|
||||
|
||||
manager := rootdao.NewManager(db)
|
||||
agentRepo := rootdao.NewAgentDAO(db)
|
||||
taskRepo := rootdao.NewTaskDAO(db)
|
||||
taskClassRepo := rootdao.NewTaskClassDAO(db)
|
||||
scheduleRepo := rootdao.NewScheduleDAO(db)
|
||||
agentCacheRepo := rootdao.NewAgentCache(redisClient)
|
||||
outboxRepo := outboxinfra.NewRepository(db)
|
||||
|
||||
eventBus, err := buildAgentEventBus(outboxRepo)
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
if err = eventsvc.RegisterTaskUrgencyPromoteRoute(); err != nil {
|
||||
return fail(fmt.Errorf("register task outbox route failed: %w", err))
|
||||
}
|
||||
|
||||
eventPublisher := buildAgentOutboxPublisher(outboxRepo)
|
||||
taskOutboxPublisher := buildTaskOutboxPublisher(outboxRepo)
|
||||
|
||||
var userAuthClient *gatewayuserauth.Client
|
||||
if eventBus != nil {
|
||||
userAuthClient, err = gatewayuserauth.NewClient(gatewayuserauth.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("userauth.rpc.endpoints"),
|
||||
Target: viper.GetString("userauth.rpc.target"),
|
||||
Timeout: viper.GetDuration("userauth.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize userauth zrpc client failed: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
taskClient, err := gatewaytask.NewClient(gatewaytask.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task zrpc client failed: %w", err))
|
||||
}
|
||||
taskClassClient, err := gatewaytaskclass.NewClient(gatewaytaskclass.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("taskClass.rpc.endpoints"),
|
||||
Target: viper.GetString("taskClass.rpc.target"),
|
||||
Timeout: viper.GetDuration("taskClass.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task-class zrpc client failed: %w", err))
|
||||
}
|
||||
scheduleClient, err := gatewayschedule.NewClient(gatewayschedule.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize schedule zrpc client failed: %w", err))
|
||||
}
|
||||
memoryClient, err := gatewaymemory.NewClient(gatewaymemory.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("memory.rpc.endpoints"),
|
||||
Target: viper.GetString("memory.rpc.target"),
|
||||
Timeout: viper.GetDuration("memory.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize memory zrpc client failed: %w", err))
|
||||
}
|
||||
|
||||
taskService := rootsvc.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskService.SetActiveScheduleDAO(manager.ActiveSchedule)
|
||||
scheduleService := rootsvc.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
|
||||
agentService := agentsv.NewAgentService(
|
||||
llmService,
|
||||
agentRepo,
|
||||
taskRepo,
|
||||
cacheRepo,
|
||||
agentCacheRepo,
|
||||
manager.ActiveSchedule,
|
||||
manager.ActiveScheduleSession,
|
||||
eventPublisher,
|
||||
)
|
||||
|
||||
// 1. 迁移期仍由独立入口注入旧 schedule/task 领域能力,避免 agent/sv 反向 import 旧 service 形成循环依赖。
|
||||
// 2. 等阶段 6 后续把这些残留 DAO 适配继续切成 RPC/read-model,再从这里移除注入点。
|
||||
agentService.SmartPlanningMultiRawFunc = scheduleService.SmartPlanningMultiRaw
|
||||
agentService.HybridScheduleWithPlanMultiFunc = scheduleService.HybridScheduleWithPlanMulti
|
||||
agentService.ResolvePlanningWindowFunc = scheduleService.ResolvePlanningWindowByTaskClasses
|
||||
agentService.GetTasksWithUrgencyPromotionFunc = taskService.GetTasksWithUrgencyPromotion
|
||||
|
||||
configureAgentService(
|
||||
agentService,
|
||||
ragRuntime,
|
||||
agentRepo,
|
||||
cacheRepo,
|
||||
taskClient,
|
||||
taskClassClient,
|
||||
scheduleClient,
|
||||
memoryClient,
|
||||
memoryCfg,
|
||||
memoryObserver,
|
||||
memoryMetrics,
|
||||
)
|
||||
|
||||
activeTaskAdapter, err := activeadapters.NewTaskRPCAdapter(activeadapters.TaskRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task rpc adapter for agent rerun failed: %w", err))
|
||||
}
|
||||
activeScheduleAdapter, err := activeadapters.NewScheduleRPCAdapter(activeadapters.ScheduleRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize schedule rpc adapter for agent rerun failed: %w", err))
|
||||
}
|
||||
activeScheduleDryRun, err := activesvc.NewDryRunService(activeadapters.ReadersWithScheduleRPC(activeTaskAdapter, activeScheduleAdapter))
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
activeSchedulePreviewConfirm, err := buildActiveSchedulePreviewConfirmService(manager.ActiveSchedule, activeScheduleDryRun, activeScheduleAdapter)
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
activeScheduleLLMClient := llmService.ProClient()
|
||||
activeScheduleSelector := activesel.NewService(activeScheduleLLMClient)
|
||||
activeScheduleFeedbackLocator := activefeedbacklocate.NewService(activeScheduleAdapter, activeScheduleLLMClient)
|
||||
activeScheduleGraphRunner, err := activegraph.NewRunner(activeScheduleDryRun.AsGraphDryRunFunc(), activeScheduleSelector)
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
agentService.SetActiveScheduleSessionRerunFunc(buildActiveScheduleSessionRerunFunc(
|
||||
manager.ActiveSchedule,
|
||||
activeScheduleGraphRunner,
|
||||
activeSchedulePreviewConfirm,
|
||||
activeScheduleFeedbackLocator,
|
||||
))
|
||||
|
||||
return &agentRuntime{
|
||||
redisClient: redisClient,
|
||||
eventBus: eventBus,
|
||||
outboxRepo: outboxRepo,
|
||||
repoManager: manager,
|
||||
agentRepo: agentRepo,
|
||||
cacheRepo: cacheRepo,
|
||||
userAuthClient: userAuthClient,
|
||||
service: agentService,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *agentRuntime) startWorkers(ctx context.Context) error {
|
||||
if r == nil || r.workersStarted {
|
||||
return nil
|
||||
}
|
||||
if r.eventBus == nil {
|
||||
log.Println("Agent outbox consumer is disabled")
|
||||
return nil
|
||||
}
|
||||
if r.userAuthClient == nil {
|
||||
return fmt.Errorf("agent outbox consumer requires userauth zrpc client")
|
||||
}
|
||||
|
||||
// 1. 先登记 agent 自己消费的 handler,同时补齐 memory.extract.requested 的服务路由。
|
||||
// 2. 这里明确只接 agent 边界;memory 消费仍归 cmd/memory,task 事件仍是 publish-only 写入 task outbox。
|
||||
// 3. 注册完成后再启动总线,避免服务一起来就抢先消费到尚未挂 handler 的消息。
|
||||
if err := eventsvc.RegisterCoreOutboxHandlers(
|
||||
r.eventBus,
|
||||
r.outboxRepo,
|
||||
r.repoManager,
|
||||
r.agentRepo,
|
||||
r.cacheRepo,
|
||||
nil,
|
||||
r.userAuthClient,
|
||||
); err != nil {
|
||||
return fmt.Errorf("register agent outbox handlers failed: %w", err)
|
||||
}
|
||||
|
||||
r.eventBus.Start(ctx)
|
||||
r.workersStarted = true
|
||||
log.Println("Agent outbox consumer started")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *agentRuntime) close() {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
if r.eventBus != nil {
|
||||
r.eventBus.Close()
|
||||
}
|
||||
if r.redisClient != nil {
|
||||
_ = r.redisClient.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func openAgentDBFromConfig() (*gorm.DB, error) {
|
||||
db, err := inits.OpenDBFromConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = autoMigrateAgentOwnedTables(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = autoMigrateAgentOutboxTable(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ensureAgentRuntimeDependencyTables(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func autoMigrateAgentOwnedTables(db *gorm.DB) error {
|
||||
if db == nil {
|
||||
return fmt.Errorf("agent database is not initialized")
|
||||
}
|
||||
|
||||
// 1. 独立 agent 进程启动时只负责补齐自有表结构,不在历史库上强制补外键约束。
|
||||
// 2. 线上/本地历史数据可能存在旧 chat_history 记录找不到 agent_chat 的情况,硬补 FK 会阻断服务启动。
|
||||
// 3. 迁移期保留应用层按 chat_id 关联的读写语义;真正清理孤儿历史和补 FK 应走单独数据治理脚本。
|
||||
originalDisableFK := db.Config.DisableForeignKeyConstraintWhenMigrating
|
||||
db.Config.DisableForeignKeyConstraintWhenMigrating = true
|
||||
defer func() {
|
||||
db.Config.DisableForeignKeyConstraintWhenMigrating = originalDisableFK
|
||||
}()
|
||||
|
||||
if err := db.AutoMigrate(
|
||||
&model.AgentChat{},
|
||||
&model.ChatHistory{},
|
||||
&model.AgentTimelineEvent{},
|
||||
&model.AgentScheduleState{},
|
||||
&model.ActiveScheduleSession{},
|
||||
&model.AgentStateSnapshotRecord{},
|
||||
); err != nil {
|
||||
return fmt.Errorf("auto migrate agent owned tables failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoMigrateAgentOutboxTable(db *gorm.DB) error {
|
||||
cfg, ok := outboxinfra.ResolveServiceConfig(outboxinfra.ServiceAgent)
|
||||
if !ok {
|
||||
return fmt.Errorf("resolve agent outbox config failed")
|
||||
}
|
||||
if err := db.Table(cfg.TableName).AutoMigrate(&model.AgentOutboxMessage{}); err != nil {
|
||||
return fmt.Errorf("auto migrate agent outbox table failed for %s (%s): %w", cfg.Name, cfg.TableName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureAgentRuntimeDependencyTables(db *gorm.DB) error {
|
||||
// 1. agent 独立进程当前仍复用 task/schedule/active-scheduler 的部分读写表,不在这里越权迁移这些表。
|
||||
// 2. 这里只做存在性检查,缺表时直接 fail fast,避免聊天请求进入半初始化状态。
|
||||
// 3. 等阶段 6 后续把这些直连改成 RPC/read-model 后,应同步缩减这份依赖清单。
|
||||
for _, dependency := range []struct {
|
||||
name string
|
||||
model any
|
||||
}{
|
||||
{name: "tasks", model: &model.Task{}},
|
||||
{name: "task_classes", model: &model.TaskClass{}},
|
||||
{name: "task_items", model: &model.TaskClassItem{}},
|
||||
{name: "schedules", model: &model.Schedule{}},
|
||||
{name: "schedule_events", model: &model.ScheduleEvent{}},
|
||||
{name: "active_schedule_triggers", model: &model.ActiveScheduleTrigger{}},
|
||||
{name: "active_schedule_previews", model: &model.ActiveSchedulePreview{}},
|
||||
} {
|
||||
if !db.Migrator().HasTable(dependency.model) {
|
||||
return fmt.Errorf("agent runtime dependency table missing: %s", dependency.name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildAgentLLMService() (*llmservice.Service, error) {
|
||||
aiHub, err := inits.InitEino()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return llmservice.New(llmservice.Options{
|
||||
AIHub: aiHub,
|
||||
APIKey: os.Getenv("ARK_API_KEY"),
|
||||
BaseURL: viper.GetString("agent.baseURL"),
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
}), nil
|
||||
}
|
||||
|
||||
func buildAgentRAGService(ctx context.Context) (*ragservice.Service, error) {
|
||||
ragCfg := ragconfig.LoadFromViper()
|
||||
if !ragCfg.Enabled {
|
||||
log.Println("RAG service is disabled for agent")
|
||||
return ragservice.New(ragservice.Options{}), nil
|
||||
}
|
||||
|
||||
ragLogger := log.Default()
|
||||
ragService, err := ragservice.NewFromConfig(ctx, ragCfg, ragservice.FactoryDeps{
|
||||
Logger: ragLogger,
|
||||
Observer: ragservice.NewLoggerObserver(ragLogger),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize agent RAG service: %w", err)
|
||||
}
|
||||
log.Printf("Agent RAG runtime initialized: store=%s embed=%s reranker=%s", ragCfg.Store, ragCfg.EmbedProvider, ragCfg.RerankerProvider)
|
||||
return ragService, nil
|
||||
}
|
||||
|
||||
func buildAgentEventBus(outboxRepo *outboxinfra.Repository) (eventsvc.OutboxBus, error) {
|
||||
kafkaCfg := kafkabus.LoadConfig()
|
||||
bus, err := eventsvc.NewServiceOutboxBus(outboxRepo, kafkaCfg, outboxinfra.ServiceAgent)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize outbox event bus for service %s: %w", outboxinfra.ServiceAgent, err)
|
||||
}
|
||||
serviceBuses := make(map[string]eventsvc.OutboxBus, 1)
|
||||
if bus != nil {
|
||||
serviceBuses[outboxinfra.ServiceAgent] = bus
|
||||
}
|
||||
|
||||
eventBus := eventsvc.NewRoutedOutboxBus(serviceBuses)
|
||||
if eventBus == nil {
|
||||
log.Println("Agent outbox event bus is disabled")
|
||||
}
|
||||
return eventBus, nil
|
||||
}
|
||||
|
||||
func buildAgentOutboxPublisher(outboxRepo *outboxinfra.Repository) outboxinfra.EventPublisher {
|
||||
kafkaCfg := kafkabus.LoadConfig()
|
||||
if !kafkaCfg.Enabled || outboxRepo == nil {
|
||||
return nil
|
||||
}
|
||||
return &repositoryOutboxPublisher{
|
||||
repo: outboxRepo,
|
||||
maxRetry: kafkaCfg.MaxRetry,
|
||||
}
|
||||
}
|
||||
|
||||
func buildTaskOutboxPublisher(outboxRepo *outboxinfra.Repository) outboxinfra.EventPublisher {
|
||||
kafkaCfg := kafkabus.LoadConfig()
|
||||
if !kafkaCfg.Enabled || outboxRepo == nil {
|
||||
return nil
|
||||
}
|
||||
return &repositoryOutboxPublisher{
|
||||
repo: outboxRepo,
|
||||
maxRetry: kafkaCfg.MaxRetry,
|
||||
}
|
||||
}
|
||||
|
||||
type repositoryOutboxPublisher struct {
|
||||
repo *outboxinfra.Repository
|
||||
maxRetry int
|
||||
}
|
||||
|
||||
func (p *repositoryOutboxPublisher) Publish(ctx context.Context, req outboxinfra.PublishRequest) error {
|
||||
if p == nil || p.repo == nil {
|
||||
return fmt.Errorf("outbox publisher is not initialized")
|
||||
}
|
||||
|
||||
eventType := strings.TrimSpace(req.EventType)
|
||||
if eventType == "" {
|
||||
return fmt.Errorf("eventType is empty")
|
||||
}
|
||||
eventVersion := strings.TrimSpace(req.EventVersion)
|
||||
if eventVersion == "" {
|
||||
eventVersion = outboxinfra.DefaultEventVersion
|
||||
}
|
||||
messageKey := strings.TrimSpace(req.MessageKey)
|
||||
aggregateID := strings.TrimSpace(req.AggregateID)
|
||||
if aggregateID == "" {
|
||||
aggregateID = messageKey
|
||||
}
|
||||
|
||||
payloadJSON, err := json.Marshal(req.Payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = p.repo.CreateMessage(ctx, eventType, messageKey, outboxinfra.OutboxEventPayload{
|
||||
EventID: strings.TrimSpace(req.EventID),
|
||||
EventType: eventType,
|
||||
EventVersion: eventVersion,
|
||||
AggregateID: aggregateID,
|
||||
Payload: payloadJSON,
|
||||
}, p.maxRetry)
|
||||
return err
|
||||
}
|
||||
|
||||
func configureAgentService(
|
||||
agentService *agentsv.AgentService,
|
||||
ragRuntime ragservice.Runtime,
|
||||
agentRepo *rootdao.AgentDAO,
|
||||
cacheRepo *rootdao.CacheDAO,
|
||||
taskClient agentsv.TaskRPCClient,
|
||||
taskClassClient agentsv.TaskClassAgentRPCClient,
|
||||
scheduleClient agentsv.ScheduleAgentRPCClient,
|
||||
memoryReaderClient ports.MemoryReaderClient,
|
||||
memoryCfg memorymodel.Config,
|
||||
memoryObserver memoryobserve.Observer,
|
||||
memoryMetrics memoryobserve.MetricsRecorder,
|
||||
) {
|
||||
if agentService == nil {
|
||||
return
|
||||
}
|
||||
|
||||
agentService.SetAgentStateStore(rootdao.NewAgentStateStoreAdapter(cacheRepo))
|
||||
|
||||
var webSearchProvider web.SearchProvider
|
||||
webProvider := viper.GetString("websearch.provider")
|
||||
switch webProvider {
|
||||
case "bocha":
|
||||
bochaKey := viper.GetString("websearch.apiKey")
|
||||
if bochaKey == "" {
|
||||
log.Println("WebSearch: 博查 API Key 为空,降级为 mock")
|
||||
webSearchProvider = &web.MockProvider{}
|
||||
} else {
|
||||
webSearchProvider = web.NewBochaProvider(bochaKey, "")
|
||||
log.Println("WebSearch provider: bocha")
|
||||
}
|
||||
case "mock", "":
|
||||
webSearchProvider = &web.MockProvider{}
|
||||
log.Println("WebSearch provider: mock(模拟模式)")
|
||||
default:
|
||||
log.Printf("WebSearch provider %q 未识别,降级为 mock", webProvider)
|
||||
webSearchProvider = &web.MockProvider{}
|
||||
}
|
||||
|
||||
agentService.SetToolRegistry(agenttools.NewDefaultRegistryWithDeps(agenttools.DefaultRegistryDeps{
|
||||
RAGRuntime: ragRuntime,
|
||||
WebSearchProvider: webSearchProvider,
|
||||
TaskClassWriteDeps: agenttools.TaskClassWriteDeps{
|
||||
UpsertTaskClass: agentsv.NewTaskClassRPCUpsertFunc(taskClassClient),
|
||||
},
|
||||
}))
|
||||
agentService.SetScheduleProvider(agentsv.NewScheduleRPCProvider(scheduleClient, taskClassClient))
|
||||
agentService.SetCompactionStore(agentRepo)
|
||||
agentService.SetQuickTaskDeps(agentsv.NewTaskRPCQuickTaskDeps(taskClient))
|
||||
agentService.SetMemoryReader(agentsv.NewMemoryRPCReader(memoryReaderClient, memoryObserver, memoryMetrics), memoryCfg)
|
||||
}
|
||||
Reference in New Issue
Block a user