Version: 0.9.76.dev.260505
后端: 1.阶段 6 agent / memory 服务化收口 - 新增 cmd/agent 独立进程入口,承载 agent zrpc server、agent outbox relay / consumer 和运行时依赖初始化 - 补齐 services/agent/rpc 的 Chat stream 与 conversation meta/list/timeline、schedule-preview、context-stats、schedule-state unary RPC - 新增 gateway/client/agent 与 shared/contracts/agent,将 /api/v1/agent chat 和非 chat 门面切到 agent zrpc - 收缩 gateway 本地 AgentService 装配,双 RPC 开关开启时不再初始化本地 agent 编排、LLM、RAG 和 memory reader fallback - 将 backend/memory 物理迁入 services/memory,私有实现收入 internal,保留 module/model/observe 作为 memory 服务门面 - 调整 memory outbox、memory reader 和 agent 记忆渲染链路的 import 与服务边界,cmd/memory 独占 memory worker / consumer - 关闭 gateway 侧 agent outbox worker 所有权,agent relay / consumer 由 cmd/agent 独占,gateway 仅保留 HTTP/SSE 门面与迁移期开关回退 - 更新阶段 6 文档,记录 agent / memory 当前切流点、smoke 结果,以及 backend/client 与 gateway/shared 的目录收口口径
This commit is contained in:
269
backend/cmd/agent/active_schedule_rerun.go
Normal file
269
backend/cmd/agent/active_schedule_rerun.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
activesel "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
|
||||
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
|
||||
activeTrigger "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
)
|
||||
|
||||
func buildActiveSchedulePreviewConfirmService(activeDAO *rootdao.ActiveScheduleDAO, dryRun *activesvc.DryRunService, scheduleApplyAdapter interface {
|
||||
ApplyActiveScheduleChanges(context.Context, activeapplyadapter.ApplyActiveScheduleRequest) (activeapplyadapter.ApplyActiveScheduleResult, error)
|
||||
}) (*activesvc.PreviewConfirmService, error) {
|
||||
previewService, err := activepreview.NewService(activeDAO)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, scheduleApplyAdapter)
|
||||
}
|
||||
|
||||
// buildActiveScheduleSessionRerunFunc 把主动调度定位器 / graph / preview 能力装成聊天入口可调用的 rerun 闭包。
|
||||
//
|
||||
// 说明:
|
||||
// 1. 这里只做最小接线:复用现有定位器 -> trigger -> graph -> preview 组件,不把 worker/notification 再搬一遍;
|
||||
// 2. 成功时返回 session 状态、assistant 文本和业务卡片数据;
|
||||
// 3. 失败时直接把 error 交回聊天入口,由上层统一写失败日志和 SSE 错误。
|
||||
func buildActiveScheduleSessionRerunFunc(
|
||||
activeDAO *rootdao.ActiveScheduleDAO,
|
||||
graphRunner *activegraph.Runner,
|
||||
previewConfirm *activesvc.PreviewConfirmService,
|
||||
feedbackLocator *activefeedbacklocate.Service,
|
||||
) agentsv.ActiveScheduleSessionRerunFunc {
|
||||
return func(
|
||||
ctx context.Context,
|
||||
session *model.ActiveScheduleSessionSnapshot,
|
||||
userMessage string,
|
||||
traceID string,
|
||||
requestStart time.Time,
|
||||
) (*agentsv.ActiveScheduleSessionRerunResult, error) {
|
||||
if activeDAO == nil || graphRunner == nil || previewConfirm == nil {
|
||||
return nil, fmt.Errorf("主动调度 rerun 依赖未初始化")
|
||||
}
|
||||
if session == nil {
|
||||
return nil, fmt.Errorf("主动调度 session 不能为空")
|
||||
}
|
||||
|
||||
triggerRow, err := activeDAO.GetTriggerByID(ctx, session.TriggerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolvedTargetType := activeTrigger.TargetType(triggerRow.TargetType)
|
||||
resolvedTargetID := triggerRow.TargetID
|
||||
needsFeedbackLocate := activeTrigger.TriggerType(triggerRow.TriggerType) == activeTrigger.TriggerTypeUnfinishedFeedback &&
|
||||
(resolvedTargetID <= 0 || containsString(session.State.MissingInfo, "feedback_target"))
|
||||
|
||||
if needsFeedbackLocate {
|
||||
if feedbackLocator == nil {
|
||||
question := firstNonEmptyString(
|
||||
activefeedbacklocate.BuildAskUserQuestion(session.State.MissingInfo),
|
||||
session.State.PendingQuestion,
|
||||
)
|
||||
nextState := session.State
|
||||
nextState.PendingQuestion = question
|
||||
nextState.MissingInfo = appendMissingString(nextState.MissingInfo, "feedback_target")
|
||||
nextState.LastCandidateID = ""
|
||||
nextState.LastNotificationID = ""
|
||||
nextState.FailedReason = ""
|
||||
nextState.ExpiresAt = nil
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: question,
|
||||
SessionState: nextState,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
|
||||
}, nil
|
||||
}
|
||||
locateResult, locateErr := feedbackLocator.Resolve(ctx, activefeedbacklocate.Request{
|
||||
UserID: triggerRow.UserID,
|
||||
UserMessage: userMessage,
|
||||
PendingQuestion: session.State.PendingQuestion,
|
||||
MissingInfo: cloneStringSlice(session.State.MissingInfo),
|
||||
})
|
||||
if locateErr != nil {
|
||||
return nil, locateErr
|
||||
}
|
||||
if locateResult.ShouldAskUser() {
|
||||
question := firstNonEmptyString(
|
||||
locateResult.AskUserQuestion,
|
||||
activefeedbacklocate.BuildAskUserQuestion(session.State.MissingInfo),
|
||||
session.State.PendingQuestion,
|
||||
)
|
||||
nextState := session.State
|
||||
nextState.PendingQuestion = question
|
||||
nextState.MissingInfo = appendMissingString(nextState.MissingInfo, "feedback_target")
|
||||
nextState.LastCandidateID = ""
|
||||
nextState.LastNotificationID = ""
|
||||
nextState.FailedReason = ""
|
||||
nextState.ExpiresAt = nil
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: question,
|
||||
SessionState: nextState,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
|
||||
}, nil
|
||||
}
|
||||
resolvedTargetType = activeTrigger.TargetType(locateResult.TargetType)
|
||||
resolvedTargetID = locateResult.TargetID
|
||||
}
|
||||
|
||||
domainTrigger := activeTrigger.ActiveScheduleTrigger{
|
||||
TriggerID: triggerRow.ID,
|
||||
UserID: triggerRow.UserID,
|
||||
TriggerType: activeTrigger.TriggerType(triggerRow.TriggerType),
|
||||
Source: activeTrigger.SourceUserFeedback,
|
||||
TargetType: resolvedTargetType,
|
||||
TargetID: resolvedTargetID,
|
||||
FeedbackID: triggerRow.FeedbackID,
|
||||
IdempotencyKey: triggerRow.IdempotencyKey,
|
||||
MockNow: nil,
|
||||
IsMockTime: false,
|
||||
RequestedAt: requestStart,
|
||||
TraceID: traceID,
|
||||
}
|
||||
if err := domainTrigger.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
graphResult, err := graphRunner.Run(ctx, domainTrigger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if graphResult == nil || graphResult.DryRunData == nil || graphResult.DryRunData.Context == nil {
|
||||
return nil, fmt.Errorf("主动调度 graph 返回空结果")
|
||||
}
|
||||
|
||||
selectionResult := graphResult.SelectionResult
|
||||
state := session.State
|
||||
state.LastCandidateID = strings.TrimSpace(selectionResult.SelectedCandidateID)
|
||||
state.LastNotificationID = ""
|
||||
state.FailedReason = ""
|
||||
state.MissingInfo = cloneStringSlice(graphResult.DryRunData.Context.DerivedFacts.MissingInfo)
|
||||
|
||||
switch selectionResult.Action {
|
||||
case activesel.ActionSelectCandidate:
|
||||
if !graphResult.DryRunData.Observation.Decision.ShouldWritePreview {
|
||||
return nil, fmt.Errorf("主动调度 graph 选择了候选,但未产出可写 preview")
|
||||
}
|
||||
previewResp, err := previewConfirm.CreatePreviewFromDryRun(ctx, activepreview.CreatePreviewRequest{
|
||||
ActiveContext: graphResult.DryRunData.Context,
|
||||
Observation: graphResult.DryRunData.Observation,
|
||||
Candidates: graphResult.DryRunData.Candidates,
|
||||
TriggerID: triggerRow.ID,
|
||||
GeneratedAt: requestStart,
|
||||
SelectedCandidateID: selectionResult.SelectedCandidateID,
|
||||
ExplanationText: selectionResult.ExplanationText,
|
||||
NotificationSummary: selectionResult.NotificationSummary,
|
||||
FallbackUsed: selectionResult.FallbackUsed,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.PendingQuestion = ""
|
||||
state.MissingInfo = nil
|
||||
state.FailedReason = ""
|
||||
expiresAt := previewResp.Detail.ExpiresAt
|
||||
state.ExpiresAt = &expiresAt
|
||||
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: firstNonEmptyString(selectionResult.ExplanationText, selectionResult.NotificationSummary, previewResp.Detail.Explanation, previewResp.Detail.Notification, "主动调度建议已更新。"),
|
||||
BusinessCard: &agentstream.StreamBusinessCardExtra{
|
||||
CardType: "active_schedule_preview",
|
||||
Title: "SmartFlow 日程调整建议",
|
||||
Summary: firstNonEmptyString(selectionResult.NotificationSummary, previewResp.Detail.Notification, previewResp.Detail.Explanation),
|
||||
Data: previewDetailToMap(previewResp.Detail),
|
||||
},
|
||||
SessionState: state,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusReadyPreview,
|
||||
PreviewID: previewResp.Detail.PreviewID,
|
||||
}, nil
|
||||
|
||||
case activesel.ActionAskUser:
|
||||
question := firstNonEmptyString(selectionResult.AskUserQuestion, selectionResult.ExplanationText, "请继续补充主动调度需要的信息。")
|
||||
state.PendingQuestion = question
|
||||
state.ExpiresAt = nil
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: question,
|
||||
SessionState: state,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
|
||||
}, nil
|
||||
|
||||
default:
|
||||
assistantText := firstNonEmptyString(selectionResult.ExplanationText, selectionResult.NotificationSummary, "当前主动调度暂时没有需要继续处理的内容。")
|
||||
state.PendingQuestion = ""
|
||||
state.MissingInfo = nil
|
||||
state.ExpiresAt = nil
|
||||
return &agentsv.ActiveScheduleSessionRerunResult{
|
||||
AssistantText: assistantText,
|
||||
SessionState: state,
|
||||
SessionStatus: model.ActiveScheduleSessionStatusIgnored,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func previewDetailToMap(detail activepreview.ActiveSchedulePreviewDetail) map[string]any {
|
||||
raw, err := json.Marshal(detail)
|
||||
if err != nil {
|
||||
return map[string]any{}
|
||||
}
|
||||
var output map[string]any
|
||||
if err := json.Unmarshal(raw, &output); err != nil {
|
||||
return map[string]any{}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func firstNonEmptyString(values ...string) string {
|
||||
for _, value := range values {
|
||||
if trimmed := strings.TrimSpace(value); trimmed != "" {
|
||||
return trimmed
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func cloneStringSlice(values []string) []string {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
copied := make([]string, len(values))
|
||||
copy(copied, values)
|
||||
return copied
|
||||
}
|
||||
|
||||
func appendMissingString(values []string, next string) []string {
|
||||
trimmed := strings.TrimSpace(next)
|
||||
if trimmed == "" {
|
||||
return cloneStringSlice(values)
|
||||
}
|
||||
for _, value := range values {
|
||||
if strings.TrimSpace(value) == trimmed {
|
||||
return cloneStringSlice(values)
|
||||
}
|
||||
}
|
||||
result := cloneStringSlice(values)
|
||||
return append(result, trimmed)
|
||||
}
|
||||
|
||||
func containsString(values []string, target string) bool {
|
||||
trimmed := strings.TrimSpace(target)
|
||||
if trimmed == "" {
|
||||
return false
|
||||
}
|
||||
for _, value := range values {
|
||||
if strings.TrimSpace(value) == trimmed {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
50
backend/cmd/agent/main.go
Normal file
50
backend/cmd/agent/main.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
agentrpc "github.com/LoveLosita/smartflow/backend/services/agent/rpc"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := bootstrap.LoadConfig(); err != nil {
|
||||
log.Fatalf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
runtime, err := buildAgentRuntime(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize agent runtime: %v", err)
|
||||
}
|
||||
defer runtime.close()
|
||||
|
||||
if err := runtime.startWorkers(ctx); err != nil {
|
||||
log.Fatalf("failed to start agent workers: %v", err)
|
||||
}
|
||||
|
||||
server, listenOn, err := agentrpc.NewServer(agentrpc.ServerOptions{
|
||||
ListenOn: viper.GetString("agent.rpc.listenOn"),
|
||||
Timeout: viper.GetDuration("agent.rpc.timeout"),
|
||||
Service: runtime.service,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build agent zrpc server: %v", err)
|
||||
}
|
||||
defer server.Stop()
|
||||
|
||||
go func() {
|
||||
log.Printf("agent zrpc service starting on %s", listenOn)
|
||||
server.Start()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
log.Println("agent service stopping")
|
||||
}
|
||||
527
backend/cmd/agent/runtime.go
Normal file
527
backend/cmd/agent/runtime.go
Normal file
@@ -0,0 +1,527 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
gatewaymemory "github.com/LoveLosita/smartflow/backend/gateway/client/memory"
|
||||
gatewayschedule "github.com/LoveLosita/smartflow/backend/gateway/client/schedule"
|
||||
gatewaytask "github.com/LoveLosita/smartflow/backend/gateway/client/task"
|
||||
gatewaytaskclass "github.com/LoveLosita/smartflow/backend/gateway/client/taskclass"
|
||||
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
rootsvc "github.com/LoveLosita/smartflow/backend/service"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
activesel "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
|
||||
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
|
||||
"github.com/LoveLosita/smartflow/backend/services/agent/tools/web"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
|
||||
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/spf13/viper"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type agentRuntime struct {
|
||||
redisClient *redis.Client
|
||||
eventBus eventsvc.OutboxBus
|
||||
outboxRepo *outboxinfra.Repository
|
||||
repoManager *rootdao.RepoManager
|
||||
agentRepo *rootdao.AgentDAO
|
||||
cacheRepo *rootdao.CacheDAO
|
||||
userAuthClient *gatewayuserauth.Client
|
||||
service *agentsv.AgentService
|
||||
workersStarted bool
|
||||
}
|
||||
|
||||
func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
db, err := openAgentDBFromConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("connect agent database failed: %w", err)
|
||||
}
|
||||
|
||||
redisClient, err := inits.OpenRedisFromConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("connect agent redis failed: %w", err)
|
||||
}
|
||||
fail := func(cause error) (*agentRuntime, error) {
|
||||
_ = redisClient.Close()
|
||||
return nil, cause
|
||||
}
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err = db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
return fail(fmt.Errorf("initialize agent cache deleter failed: %w", err))
|
||||
}
|
||||
|
||||
// 说明:
|
||||
// 1. 本轮先在 cmd/agent 内平移一份启动装配,不直接改 cmd/start.go 的旧 gateway 本地链路。
|
||||
// 2. 这样可以把独立进程入口先稳定下来,同时避免和主代理并行接的 rpc/pb 改动发生交叉覆盖。
|
||||
// 3. 等阶段 6 的 agent/memory 启动边界都收稳后,再统一评估是否把 LLM/RAG/bootstrap 抽公共层。
|
||||
llmService, err := buildAgentLLMService()
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize agent llm service failed: %w", err))
|
||||
}
|
||||
ragService, err := buildAgentRAGService(ctx)
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
ragRuntime := ragService.Runtime()
|
||||
|
||||
memoryCfg := memorymodule.LoadConfigFromViper()
|
||||
memoryObserver := memoryobserve.NewLoggerObserver(log.Default())
|
||||
memoryMetrics := memoryobserve.NewMetricsRegistry()
|
||||
|
||||
manager := rootdao.NewManager(db)
|
||||
agentRepo := rootdao.NewAgentDAO(db)
|
||||
taskRepo := rootdao.NewTaskDAO(db)
|
||||
taskClassRepo := rootdao.NewTaskClassDAO(db)
|
||||
scheduleRepo := rootdao.NewScheduleDAO(db)
|
||||
agentCacheRepo := rootdao.NewAgentCache(redisClient)
|
||||
outboxRepo := outboxinfra.NewRepository(db)
|
||||
|
||||
eventBus, err := buildAgentEventBus(outboxRepo)
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
if err = eventsvc.RegisterTaskUrgencyPromoteRoute(); err != nil {
|
||||
return fail(fmt.Errorf("register task outbox route failed: %w", err))
|
||||
}
|
||||
|
||||
eventPublisher := buildAgentOutboxPublisher(outboxRepo)
|
||||
taskOutboxPublisher := buildTaskOutboxPublisher(outboxRepo)
|
||||
|
||||
var userAuthClient *gatewayuserauth.Client
|
||||
if eventBus != nil {
|
||||
userAuthClient, err = gatewayuserauth.NewClient(gatewayuserauth.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("userauth.rpc.endpoints"),
|
||||
Target: viper.GetString("userauth.rpc.target"),
|
||||
Timeout: viper.GetDuration("userauth.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize userauth zrpc client failed: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
taskClient, err := gatewaytask.NewClient(gatewaytask.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task zrpc client failed: %w", err))
|
||||
}
|
||||
taskClassClient, err := gatewaytaskclass.NewClient(gatewaytaskclass.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("taskClass.rpc.endpoints"),
|
||||
Target: viper.GetString("taskClass.rpc.target"),
|
||||
Timeout: viper.GetDuration("taskClass.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task-class zrpc client failed: %w", err))
|
||||
}
|
||||
scheduleClient, err := gatewayschedule.NewClient(gatewayschedule.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize schedule zrpc client failed: %w", err))
|
||||
}
|
||||
memoryClient, err := gatewaymemory.NewClient(gatewaymemory.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("memory.rpc.endpoints"),
|
||||
Target: viper.GetString("memory.rpc.target"),
|
||||
Timeout: viper.GetDuration("memory.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize memory zrpc client failed: %w", err))
|
||||
}
|
||||
|
||||
taskService := rootsvc.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskService.SetActiveScheduleDAO(manager.ActiveSchedule)
|
||||
scheduleService := rootsvc.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
|
||||
agentService := agentsv.NewAgentService(
|
||||
llmService,
|
||||
agentRepo,
|
||||
taskRepo,
|
||||
cacheRepo,
|
||||
agentCacheRepo,
|
||||
manager.ActiveSchedule,
|
||||
manager.ActiveScheduleSession,
|
||||
eventPublisher,
|
||||
)
|
||||
|
||||
// 1. 迁移期仍由独立入口注入旧 schedule/task 领域能力,避免 agent/sv 反向 import 旧 service 形成循环依赖。
|
||||
// 2. 等阶段 6 后续把这些残留 DAO 适配继续切成 RPC/read-model,再从这里移除注入点。
|
||||
agentService.SmartPlanningMultiRawFunc = scheduleService.SmartPlanningMultiRaw
|
||||
agentService.HybridScheduleWithPlanMultiFunc = scheduleService.HybridScheduleWithPlanMulti
|
||||
agentService.ResolvePlanningWindowFunc = scheduleService.ResolvePlanningWindowByTaskClasses
|
||||
agentService.GetTasksWithUrgencyPromotionFunc = taskService.GetTasksWithUrgencyPromotion
|
||||
|
||||
configureAgentService(
|
||||
agentService,
|
||||
ragRuntime,
|
||||
agentRepo,
|
||||
cacheRepo,
|
||||
taskClient,
|
||||
taskClassClient,
|
||||
scheduleClient,
|
||||
memoryClient,
|
||||
memoryCfg,
|
||||
memoryObserver,
|
||||
memoryMetrics,
|
||||
)
|
||||
|
||||
activeTaskAdapter, err := activeadapters.NewTaskRPCAdapter(activeadapters.TaskRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task rpc adapter for agent rerun failed: %w", err))
|
||||
}
|
||||
activeScheduleAdapter, err := activeadapters.NewScheduleRPCAdapter(activeadapters.ScheduleRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize schedule rpc adapter for agent rerun failed: %w", err))
|
||||
}
|
||||
activeScheduleDryRun, err := activesvc.NewDryRunService(activeadapters.ReadersWithScheduleRPC(activeTaskAdapter, activeScheduleAdapter))
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
activeSchedulePreviewConfirm, err := buildActiveSchedulePreviewConfirmService(manager.ActiveSchedule, activeScheduleDryRun, activeScheduleAdapter)
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
activeScheduleLLMClient := llmService.ProClient()
|
||||
activeScheduleSelector := activesel.NewService(activeScheduleLLMClient)
|
||||
activeScheduleFeedbackLocator := activefeedbacklocate.NewService(activeScheduleAdapter, activeScheduleLLMClient)
|
||||
activeScheduleGraphRunner, err := activegraph.NewRunner(activeScheduleDryRun.AsGraphDryRunFunc(), activeScheduleSelector)
|
||||
if err != nil {
|
||||
return fail(err)
|
||||
}
|
||||
agentService.SetActiveScheduleSessionRerunFunc(buildActiveScheduleSessionRerunFunc(
|
||||
manager.ActiveSchedule,
|
||||
activeScheduleGraphRunner,
|
||||
activeSchedulePreviewConfirm,
|
||||
activeScheduleFeedbackLocator,
|
||||
))
|
||||
|
||||
return &agentRuntime{
|
||||
redisClient: redisClient,
|
||||
eventBus: eventBus,
|
||||
outboxRepo: outboxRepo,
|
||||
repoManager: manager,
|
||||
agentRepo: agentRepo,
|
||||
cacheRepo: cacheRepo,
|
||||
userAuthClient: userAuthClient,
|
||||
service: agentService,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *agentRuntime) startWorkers(ctx context.Context) error {
|
||||
if r == nil || r.workersStarted {
|
||||
return nil
|
||||
}
|
||||
if r.eventBus == nil {
|
||||
log.Println("Agent outbox consumer is disabled")
|
||||
return nil
|
||||
}
|
||||
if r.userAuthClient == nil {
|
||||
return fmt.Errorf("agent outbox consumer requires userauth zrpc client")
|
||||
}
|
||||
|
||||
// 1. 先登记 agent 自己消费的 handler,同时补齐 memory.extract.requested 的服务路由。
|
||||
// 2. 这里明确只接 agent 边界;memory 消费仍归 cmd/memory,task 事件仍是 publish-only 写入 task outbox。
|
||||
// 3. 注册完成后再启动总线,避免服务一起来就抢先消费到尚未挂 handler 的消息。
|
||||
if err := eventsvc.RegisterCoreOutboxHandlers(
|
||||
r.eventBus,
|
||||
r.outboxRepo,
|
||||
r.repoManager,
|
||||
r.agentRepo,
|
||||
r.cacheRepo,
|
||||
nil,
|
||||
r.userAuthClient,
|
||||
); err != nil {
|
||||
return fmt.Errorf("register agent outbox handlers failed: %w", err)
|
||||
}
|
||||
|
||||
r.eventBus.Start(ctx)
|
||||
r.workersStarted = true
|
||||
log.Println("Agent outbox consumer started")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *agentRuntime) close() {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
if r.eventBus != nil {
|
||||
r.eventBus.Close()
|
||||
}
|
||||
if r.redisClient != nil {
|
||||
_ = r.redisClient.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func openAgentDBFromConfig() (*gorm.DB, error) {
|
||||
db, err := inits.OpenDBFromConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = autoMigrateAgentOwnedTables(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = autoMigrateAgentOutboxTable(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ensureAgentRuntimeDependencyTables(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func autoMigrateAgentOwnedTables(db *gorm.DB) error {
|
||||
if db == nil {
|
||||
return fmt.Errorf("agent database is not initialized")
|
||||
}
|
||||
|
||||
// 1. 独立 agent 进程启动时只负责补齐自有表结构,不在历史库上强制补外键约束。
|
||||
// 2. 线上/本地历史数据可能存在旧 chat_history 记录找不到 agent_chat 的情况,硬补 FK 会阻断服务启动。
|
||||
// 3. 迁移期保留应用层按 chat_id 关联的读写语义;真正清理孤儿历史和补 FK 应走单独数据治理脚本。
|
||||
originalDisableFK := db.Config.DisableForeignKeyConstraintWhenMigrating
|
||||
db.Config.DisableForeignKeyConstraintWhenMigrating = true
|
||||
defer func() {
|
||||
db.Config.DisableForeignKeyConstraintWhenMigrating = originalDisableFK
|
||||
}()
|
||||
|
||||
if err := db.AutoMigrate(
|
||||
&model.AgentChat{},
|
||||
&model.ChatHistory{},
|
||||
&model.AgentTimelineEvent{},
|
||||
&model.AgentScheduleState{},
|
||||
&model.ActiveScheduleSession{},
|
||||
&model.AgentStateSnapshotRecord{},
|
||||
); err != nil {
|
||||
return fmt.Errorf("auto migrate agent owned tables failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoMigrateAgentOutboxTable(db *gorm.DB) error {
|
||||
cfg, ok := outboxinfra.ResolveServiceConfig(outboxinfra.ServiceAgent)
|
||||
if !ok {
|
||||
return fmt.Errorf("resolve agent outbox config failed")
|
||||
}
|
||||
if err := db.Table(cfg.TableName).AutoMigrate(&model.AgentOutboxMessage{}); err != nil {
|
||||
return fmt.Errorf("auto migrate agent outbox table failed for %s (%s): %w", cfg.Name, cfg.TableName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureAgentRuntimeDependencyTables(db *gorm.DB) error {
|
||||
// 1. agent 独立进程当前仍复用 task/schedule/active-scheduler 的部分读写表,不在这里越权迁移这些表。
|
||||
// 2. 这里只做存在性检查,缺表时直接 fail fast,避免聊天请求进入半初始化状态。
|
||||
// 3. 等阶段 6 后续把这些直连改成 RPC/read-model 后,应同步缩减这份依赖清单。
|
||||
for _, dependency := range []struct {
|
||||
name string
|
||||
model any
|
||||
}{
|
||||
{name: "tasks", model: &model.Task{}},
|
||||
{name: "task_classes", model: &model.TaskClass{}},
|
||||
{name: "task_items", model: &model.TaskClassItem{}},
|
||||
{name: "schedules", model: &model.Schedule{}},
|
||||
{name: "schedule_events", model: &model.ScheduleEvent{}},
|
||||
{name: "active_schedule_triggers", model: &model.ActiveScheduleTrigger{}},
|
||||
{name: "active_schedule_previews", model: &model.ActiveSchedulePreview{}},
|
||||
} {
|
||||
if !db.Migrator().HasTable(dependency.model) {
|
||||
return fmt.Errorf("agent runtime dependency table missing: %s", dependency.name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildAgentLLMService() (*llmservice.Service, error) {
|
||||
aiHub, err := inits.InitEino()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return llmservice.New(llmservice.Options{
|
||||
AIHub: aiHub,
|
||||
APIKey: os.Getenv("ARK_API_KEY"),
|
||||
BaseURL: viper.GetString("agent.baseURL"),
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
}), nil
|
||||
}
|
||||
|
||||
func buildAgentRAGService(ctx context.Context) (*ragservice.Service, error) {
|
||||
ragCfg := ragconfig.LoadFromViper()
|
||||
if !ragCfg.Enabled {
|
||||
log.Println("RAG service is disabled for agent")
|
||||
return ragservice.New(ragservice.Options{}), nil
|
||||
}
|
||||
|
||||
ragLogger := log.Default()
|
||||
ragService, err := ragservice.NewFromConfig(ctx, ragCfg, ragservice.FactoryDeps{
|
||||
Logger: ragLogger,
|
||||
Observer: ragservice.NewLoggerObserver(ragLogger),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize agent RAG service: %w", err)
|
||||
}
|
||||
log.Printf("Agent RAG runtime initialized: store=%s embed=%s reranker=%s", ragCfg.Store, ragCfg.EmbedProvider, ragCfg.RerankerProvider)
|
||||
return ragService, nil
|
||||
}
|
||||
|
||||
func buildAgentEventBus(outboxRepo *outboxinfra.Repository) (eventsvc.OutboxBus, error) {
|
||||
kafkaCfg := kafkabus.LoadConfig()
|
||||
bus, err := eventsvc.NewServiceOutboxBus(outboxRepo, kafkaCfg, outboxinfra.ServiceAgent)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize outbox event bus for service %s: %w", outboxinfra.ServiceAgent, err)
|
||||
}
|
||||
serviceBuses := make(map[string]eventsvc.OutboxBus, 1)
|
||||
if bus != nil {
|
||||
serviceBuses[outboxinfra.ServiceAgent] = bus
|
||||
}
|
||||
|
||||
eventBus := eventsvc.NewRoutedOutboxBus(serviceBuses)
|
||||
if eventBus == nil {
|
||||
log.Println("Agent outbox event bus is disabled")
|
||||
}
|
||||
return eventBus, nil
|
||||
}
|
||||
|
||||
func buildAgentOutboxPublisher(outboxRepo *outboxinfra.Repository) outboxinfra.EventPublisher {
|
||||
kafkaCfg := kafkabus.LoadConfig()
|
||||
if !kafkaCfg.Enabled || outboxRepo == nil {
|
||||
return nil
|
||||
}
|
||||
return &repositoryOutboxPublisher{
|
||||
repo: outboxRepo,
|
||||
maxRetry: kafkaCfg.MaxRetry,
|
||||
}
|
||||
}
|
||||
|
||||
func buildTaskOutboxPublisher(outboxRepo *outboxinfra.Repository) outboxinfra.EventPublisher {
|
||||
kafkaCfg := kafkabus.LoadConfig()
|
||||
if !kafkaCfg.Enabled || outboxRepo == nil {
|
||||
return nil
|
||||
}
|
||||
return &repositoryOutboxPublisher{
|
||||
repo: outboxRepo,
|
||||
maxRetry: kafkaCfg.MaxRetry,
|
||||
}
|
||||
}
|
||||
|
||||
type repositoryOutboxPublisher struct {
|
||||
repo *outboxinfra.Repository
|
||||
maxRetry int
|
||||
}
|
||||
|
||||
func (p *repositoryOutboxPublisher) Publish(ctx context.Context, req outboxinfra.PublishRequest) error {
|
||||
if p == nil || p.repo == nil {
|
||||
return fmt.Errorf("outbox publisher is not initialized")
|
||||
}
|
||||
|
||||
eventType := strings.TrimSpace(req.EventType)
|
||||
if eventType == "" {
|
||||
return fmt.Errorf("eventType is empty")
|
||||
}
|
||||
eventVersion := strings.TrimSpace(req.EventVersion)
|
||||
if eventVersion == "" {
|
||||
eventVersion = outboxinfra.DefaultEventVersion
|
||||
}
|
||||
messageKey := strings.TrimSpace(req.MessageKey)
|
||||
aggregateID := strings.TrimSpace(req.AggregateID)
|
||||
if aggregateID == "" {
|
||||
aggregateID = messageKey
|
||||
}
|
||||
|
||||
payloadJSON, err := json.Marshal(req.Payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = p.repo.CreateMessage(ctx, eventType, messageKey, outboxinfra.OutboxEventPayload{
|
||||
EventID: strings.TrimSpace(req.EventID),
|
||||
EventType: eventType,
|
||||
EventVersion: eventVersion,
|
||||
AggregateID: aggregateID,
|
||||
Payload: payloadJSON,
|
||||
}, p.maxRetry)
|
||||
return err
|
||||
}
|
||||
|
||||
func configureAgentService(
|
||||
agentService *agentsv.AgentService,
|
||||
ragRuntime ragservice.Runtime,
|
||||
agentRepo *rootdao.AgentDAO,
|
||||
cacheRepo *rootdao.CacheDAO,
|
||||
taskClient agentsv.TaskRPCClient,
|
||||
taskClassClient agentsv.TaskClassAgentRPCClient,
|
||||
scheduleClient agentsv.ScheduleAgentRPCClient,
|
||||
memoryReaderClient ports.MemoryReaderClient,
|
||||
memoryCfg memorymodel.Config,
|
||||
memoryObserver memoryobserve.Observer,
|
||||
memoryMetrics memoryobserve.MetricsRecorder,
|
||||
) {
|
||||
if agentService == nil {
|
||||
return
|
||||
}
|
||||
|
||||
agentService.SetAgentStateStore(rootdao.NewAgentStateStoreAdapter(cacheRepo))
|
||||
|
||||
var webSearchProvider web.SearchProvider
|
||||
webProvider := viper.GetString("websearch.provider")
|
||||
switch webProvider {
|
||||
case "bocha":
|
||||
bochaKey := viper.GetString("websearch.apiKey")
|
||||
if bochaKey == "" {
|
||||
log.Println("WebSearch: 博查 API Key 为空,降级为 mock")
|
||||
webSearchProvider = &web.MockProvider{}
|
||||
} else {
|
||||
webSearchProvider = web.NewBochaProvider(bochaKey, "")
|
||||
log.Println("WebSearch provider: bocha")
|
||||
}
|
||||
case "mock", "":
|
||||
webSearchProvider = &web.MockProvider{}
|
||||
log.Println("WebSearch provider: mock(模拟模式)")
|
||||
default:
|
||||
log.Printf("WebSearch provider %q 未识别,降级为 mock", webProvider)
|
||||
webSearchProvider = &web.MockProvider{}
|
||||
}
|
||||
|
||||
agentService.SetToolRegistry(agenttools.NewDefaultRegistryWithDeps(agenttools.DefaultRegistryDeps{
|
||||
RAGRuntime: ragRuntime,
|
||||
WebSearchProvider: webSearchProvider,
|
||||
TaskClassWriteDeps: agenttools.TaskClassWriteDeps{
|
||||
UpsertTaskClass: agentsv.NewTaskClassRPCUpsertFunc(taskClassClient),
|
||||
},
|
||||
}))
|
||||
agentService.SetScheduleProvider(agentsv.NewScheduleRPCProvider(scheduleClient, taskClassClient))
|
||||
agentService.SetCompactionStore(agentRepo)
|
||||
agentService.SetQuickTaskDeps(agentsv.NewTaskRPCQuickTaskDeps(taskClient))
|
||||
agentService.SetMemoryReader(agentsv.NewMemoryRPCReader(memoryReaderClient, memoryObserver, memoryMetrics), memoryCfg)
|
||||
}
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
memorymodule "github.com/LoveLosita/smartflow/backend/memory"
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
|
||||
memorydao "github.com/LoveLosita/smartflow/backend/services/memory/dao"
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||||
memoryrpc "github.com/LoveLosita/smartflow/backend/services/memory/rpc"
|
||||
memorysv "github.com/LoveLosita/smartflow/backend/services/memory/sv"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/api"
|
||||
gatewayactivescheduler "github.com/LoveLosita/smartflow/backend/gateway/client/activescheduler"
|
||||
gatewayagent "github.com/LoveLosita/smartflow/backend/gateway/client/agent"
|
||||
gatewaycourse "github.com/LoveLosita/smartflow/backend/gateway/client/course"
|
||||
gatewaymemory "github.com/LoveLosita/smartflow/backend/gateway/client/memory"
|
||||
gatewaynotification "github.com/LoveLosita/smartflow/backend/gateway/client/notification"
|
||||
@@ -26,9 +27,6 @@ import (
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
"github.com/LoveLosita/smartflow/backend/memory"
|
||||
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
|
||||
"github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
@@ -47,6 +45,9 @@ import (
|
||||
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
|
||||
"github.com/LoveLosita/smartflow/backend/services/agent/tools/web"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
"github.com/LoveLosita/smartflow/backend/services/memory"
|
||||
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
@@ -55,6 +56,11 @@ import (
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const (
|
||||
gatewayAgentRPCChatEnabledKey = "agent.rpc.chat.enabled"
|
||||
gatewayAgentRPCAPIEnabledKey = "agent.rpc.api.enabled"
|
||||
)
|
||||
|
||||
// appRuntime 承载一次进程启动所需的依赖图。
|
||||
//
|
||||
// 职责边界:
|
||||
@@ -69,8 +75,6 @@ type appRuntime struct {
|
||||
agentCache *dao.AgentCache
|
||||
manager *dao.RepoManager
|
||||
outboxRepo *outboxinfra.Repository
|
||||
eventBus eventsvc.OutboxBus
|
||||
memoryModule *memory.Module
|
||||
limiter *pkg.RateLimiter
|
||||
handlers *api.ApiHandlers
|
||||
userAuthClient *gatewayuserauth.Client
|
||||
@@ -112,8 +116,11 @@ func StartAPI() {
|
||||
runtime.startHTTP(ctx)
|
||||
}
|
||||
|
||||
// StartWorker 只启动后台异步能力,不注册 Gin 路由。
|
||||
// 当前只包含单体残留域 agent outbox relay / Kafka consumer;memory worker 已迁到 cmd/memory。
|
||||
// StartWorker 保留历史 worker 入口,但阶段 6 后不再拥有 agent / memory 消费边界。
|
||||
// 当前语义:
|
||||
// 1. agent outbox relay / consumer 已迁到 cmd/agent;
|
||||
// 2. memory worker 已迁到 cmd/memory;
|
||||
// 3. 该入口仅用于兼容旧启动命令,后续可在 gateway 收口阶段删除。
|
||||
func StartWorker() {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
@@ -138,10 +145,10 @@ func mustBuildRuntime(ctx context.Context) *appRuntime {
|
||||
// buildRuntime 装配应用依赖图,但不启动 HTTP 或后台循环。
|
||||
//
|
||||
// 步骤说明:
|
||||
// 1. 先初始化配置、数据库、Redis、模型、RAG、memory 等基础设施;
|
||||
// 2. 再构造 DAO / Service / agent 依赖;
|
||||
// 1. 先初始化配置、数据库、Redis 等 gateway 必需基础设施;
|
||||
// 2. 再构造各服务 zrpc client,并按开关决定是否装配 agent 本地 fallback;
|
||||
// 3. 最后构造 HTTP handlers,供 api/all 模式按需启动;
|
||||
// 4. worker 模式暂时也复用完整依赖图,避免同轮迁移拆出两套装配逻辑。
|
||||
// 4. worker 模式暂时也复用 gateway 依赖图,但不再启动 agent / memory worker。
|
||||
func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err := loadConfig(); err != nil {
|
||||
return nil, err
|
||||
@@ -158,54 +165,9 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
}
|
||||
limiter := pkg.NewRateLimiter(rdb)
|
||||
|
||||
aiHub, err := inits.InitEino()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize Eino: %w", err)
|
||||
}
|
||||
|
||||
llmService := llmservice.New(llmservice.Options{
|
||||
AIHub: aiHub,
|
||||
APIKey: os.Getenv("ARK_API_KEY"),
|
||||
BaseURL: viper.GetString("agent.baseURL"),
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
})
|
||||
|
||||
ragService, err := buildRAGService(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ragRuntime := ragService.Runtime()
|
||||
|
||||
memoryCfg := memory.LoadConfigFromViper()
|
||||
memoryObserver := memoryobserve.NewLoggerObserver(log.Default())
|
||||
memoryMetrics := memoryobserve.NewMetricsRegistry()
|
||||
memoryModule := memory.NewModuleWithObserve(
|
||||
db,
|
||||
llmService.ProClient(),
|
||||
ragRuntime,
|
||||
memoryCfg,
|
||||
memory.ObserveDeps{
|
||||
Observer: memoryObserver,
|
||||
Metrics: memoryMetrics,
|
||||
},
|
||||
)
|
||||
|
||||
// DAO 层初始化。
|
||||
cacheRepo := dao.NewCacheDAO(rdb)
|
||||
agentCacheRepo := dao.NewAgentCache(rdb)
|
||||
_ = db.Use(middleware.NewGormCachePlugin(cacheRepo))
|
||||
taskRepo := dao.NewTaskDAO(db)
|
||||
taskClassRepo := dao.NewTaskClassDAO(db)
|
||||
scheduleRepo := dao.NewScheduleDAO(db)
|
||||
manager := dao.NewManager(db)
|
||||
agentRepo := dao.NewAgentDAO(db)
|
||||
outboxRepo := outboxinfra.NewRepository(db)
|
||||
|
||||
eventBus, err := buildAgentEventBus(outboxRepo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventPublisher := buildCoreOutboxPublisher(outboxRepo)
|
||||
|
||||
// Service 层初始化。
|
||||
userAuthClient, err := gatewayuserauth.NewClient(gatewayuserauth.ClientConfig{
|
||||
@@ -265,6 +227,14 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize memory zrpc client: %w", err)
|
||||
}
|
||||
agentRPCClient, err := gatewayagent.NewClient(gatewayagent.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("agent.rpc.endpoints"),
|
||||
Target: viper.GetString("agent.rpc.target"),
|
||||
Timeout: viper.GetDuration("agent.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize agent zrpc client: %w", err)
|
||||
}
|
||||
activeSchedulerClient, err := gatewayactivescheduler.NewClient(gatewayactivescheduler.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("activeScheduler.rpc.endpoints"),
|
||||
Target: viper.GetString("activeScheduler.rpc.target"),
|
||||
@@ -273,81 +243,123 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize active-scheduler zrpc client: %w", err)
|
||||
}
|
||||
if err := eventsvc.RegisterTaskUrgencyPromoteRoute(); err != nil {
|
||||
return nil, fmt.Errorf("failed to register task outbox route: %w", err)
|
||||
}
|
||||
taskOutboxPublisher := buildTaskOutboxPublisher(outboxRepo)
|
||||
taskSv := service.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskSv.SetActiveScheduleDAO(manager.ActiveSchedule)
|
||||
scheduleService := service.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
|
||||
agentService := agentsv.NewAgentService(
|
||||
llmService,
|
||||
agentRepo,
|
||||
taskRepo,
|
||||
cacheRepo,
|
||||
agentCacheRepo,
|
||||
manager.ActiveSchedule,
|
||||
manager.ActiveScheduleSession,
|
||||
eventPublisher,
|
||||
)
|
||||
// 1. 仍由启动装配层注入旧 service 的排程能力,避免 agent/sv 反向 import 旧 service 形成循环依赖。
|
||||
// 2. 后续 schedule/task 完全走 RPC 后,这两个函数注入点可继续缩掉。
|
||||
agentService.SmartPlanningMultiRawFunc = scheduleService.SmartPlanningMultiRaw
|
||||
agentService.HybridScheduleWithPlanMultiFunc = scheduleService.HybridScheduleWithPlanMulti
|
||||
agentService.ResolvePlanningWindowFunc = scheduleService.ResolvePlanningWindowByTaskClasses
|
||||
agentService.GetTasksWithUrgencyPromotionFunc = taskSv.GetTasksWithUrgencyPromotion
|
||||
var agentRepo *dao.AgentDAO
|
||||
var agentCacheRepo *dao.AgentCache
|
||||
var manager *dao.RepoManager
|
||||
var outboxRepo *outboxinfra.Repository
|
||||
var agentService *agentsv.AgentService
|
||||
if shouldBuildGatewayAgentFallback() {
|
||||
log.Println("Gateway agent RPC fallback is enabled; building local AgentService compatibility path")
|
||||
|
||||
configureAgentService(
|
||||
agentService,
|
||||
ragRuntime,
|
||||
agentRepo,
|
||||
cacheRepo,
|
||||
taskClient,
|
||||
taskClassClient,
|
||||
scheduleClient,
|
||||
memoryClient,
|
||||
memoryCfg,
|
||||
memoryObserver,
|
||||
memoryMetrics,
|
||||
)
|
||||
aiHub, err := inits.InitEino()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize Eino: %w", err)
|
||||
}
|
||||
llmService := llmservice.New(llmservice.Options{
|
||||
AIHub: aiHub,
|
||||
APIKey: os.Getenv("ARK_API_KEY"),
|
||||
BaseURL: viper.GetString("agent.baseURL"),
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
})
|
||||
|
||||
// 1. task_pool facts 已统一走 task RPC,避免聊天 rerun 继续直连 tasks 表;
|
||||
// 2. schedule facts / feedback / apply 已统一走 schedule RPC,避免聊天 rerun 继续直连 schedule 表。
|
||||
activeTaskAdapter, err := activeadapters.NewTaskRPCAdapter(activeadapters.TaskRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize task rpc adapter for active-scheduler rerun: %w", err)
|
||||
ragService, err := buildRAGService(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ragRuntime := ragService.Runtime()
|
||||
memoryCfg := memory.LoadConfigFromViper()
|
||||
memoryObserver := memoryobserve.NewLoggerObserver(log.Default())
|
||||
memoryMetrics := memoryobserve.NewMetricsRegistry()
|
||||
|
||||
agentCacheRepo = dao.NewAgentCache(rdb)
|
||||
taskRepo := dao.NewTaskDAO(db)
|
||||
taskClassRepo := dao.NewTaskClassDAO(db)
|
||||
scheduleRepo := dao.NewScheduleDAO(db)
|
||||
manager = dao.NewManager(db)
|
||||
agentRepo = dao.NewAgentDAO(db)
|
||||
outboxRepo = outboxinfra.NewRepository(db)
|
||||
|
||||
// 1. fallback 仅用于 RPC 开关关闭时的迁移期回退,不再启动 agent outbox event bus。
|
||||
// 2. fallback 产生的事件仍写入服务级 outbox 表,由 cmd/agent / cmd/task 独立进程负责 relay / consume。
|
||||
eventPublisher := buildCoreOutboxPublisher(outboxRepo)
|
||||
if err := eventsvc.RegisterTaskUrgencyPromoteRoute(); err != nil {
|
||||
return nil, fmt.Errorf("failed to register task outbox route: %w", err)
|
||||
}
|
||||
taskOutboxPublisher := buildTaskOutboxPublisher(outboxRepo)
|
||||
taskSv := service.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskSv.SetActiveScheduleDAO(manager.ActiveSchedule)
|
||||
scheduleService := service.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
|
||||
agentService = agentsv.NewAgentService(
|
||||
llmService,
|
||||
agentRepo,
|
||||
taskRepo,
|
||||
cacheRepo,
|
||||
agentCacheRepo,
|
||||
manager.ActiveSchedule,
|
||||
manager.ActiveScheduleSession,
|
||||
eventPublisher,
|
||||
)
|
||||
// 1. 仍由启动装配层注入旧 service 的排程能力,避免 agent/sv 反向 import 旧 service 形成循环依赖。
|
||||
// 2. 后续 schedule/task 完全走 RPC 后,这两个函数注入点可继续缩掉。
|
||||
agentService.SmartPlanningMultiRawFunc = scheduleService.SmartPlanningMultiRaw
|
||||
agentService.HybridScheduleWithPlanMultiFunc = scheduleService.HybridScheduleWithPlanMulti
|
||||
agentService.ResolvePlanningWindowFunc = scheduleService.ResolvePlanningWindowByTaskClasses
|
||||
agentService.GetTasksWithUrgencyPromotionFunc = taskSv.GetTasksWithUrgencyPromotion
|
||||
|
||||
configureAgentService(
|
||||
agentService,
|
||||
ragRuntime,
|
||||
agentRepo,
|
||||
cacheRepo,
|
||||
taskClient,
|
||||
taskClassClient,
|
||||
scheduleClient,
|
||||
memoryClient,
|
||||
memoryCfg,
|
||||
memoryObserver,
|
||||
memoryMetrics,
|
||||
)
|
||||
|
||||
// 1. task_pool facts 已统一走 task RPC,避免聊天 rerun 继续直连 tasks 表;
|
||||
// 2. schedule facts / feedback / apply 已统一走 schedule RPC,避免聊天 rerun 继续直连 schedule 表。
|
||||
activeTaskAdapter, err := activeadapters.NewTaskRPCAdapter(activeadapters.TaskRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize task rpc adapter for active-scheduler rerun: %w", err)
|
||||
}
|
||||
activeScheduleAdapter, err := activeadapters.NewScheduleRPCAdapter(activeadapters.ScheduleRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize schedule rpc adapter for active-scheduler rerun: %w", err)
|
||||
}
|
||||
activeScheduleDryRun, err := activesvc.NewDryRunService(activeadapters.ReadersWithScheduleRPC(activeTaskAdapter, activeScheduleAdapter))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
activeSchedulePreviewConfirm, err := buildActiveSchedulePreviewConfirmService(manager.ActiveSchedule, activeScheduleDryRun, activeScheduleAdapter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// 1. 主动调度选择器单独复用 Pro 模型,LLM 失败时由 selection 层显式回退到确定性候选;
|
||||
// 2. dry-run 与 selection 通过 graph runner 串起来,避免 trigger_pipeline 再拼第二套候选逻辑。
|
||||
activeScheduleLLMClient := llmService.ProClient()
|
||||
activeScheduleSelector := activesel.NewService(activeScheduleLLMClient)
|
||||
activeScheduleFeedbackLocator := activefeedbacklocate.NewService(activeScheduleAdapter, activeScheduleLLMClient)
|
||||
activeScheduleGraphRunner, err := activegraph.NewRunner(activeScheduleDryRun.AsGraphDryRunFunc(), activeScheduleSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agentService.SetActiveScheduleSessionRerunFunc(buildActiveScheduleSessionRerunFunc(manager.ActiveSchedule, activeScheduleGraphRunner, activeSchedulePreviewConfirm, activeScheduleFeedbackLocator))
|
||||
} else {
|
||||
log.Println("Gateway agent local fallback is disabled; /agent HTTP routes use cmd/agent zrpc")
|
||||
}
|
||||
activeScheduleAdapter, err := activeadapters.NewScheduleRPCAdapter(activeadapters.ScheduleRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize schedule rpc adapter for active-scheduler rerun: %w", err)
|
||||
}
|
||||
activeScheduleDryRun, err := activesvc.NewDryRunService(activeadapters.ReadersWithScheduleRPC(activeTaskAdapter, activeScheduleAdapter))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
activeSchedulePreviewConfirm, err := buildActiveSchedulePreviewConfirmService(manager.ActiveSchedule, activeScheduleDryRun, activeScheduleAdapter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// 1. 主动调度选择器单独复用 Pro 模型,LLM 失败时由 selection 层显式回退到确定性候选;
|
||||
// 2. dry-run 与 selection 通过 graph runner 串起来,避免 trigger_pipeline 再拼第二套候选逻辑。
|
||||
activeScheduleLLMClient := llmService.ProClient()
|
||||
activeScheduleSelector := activesel.NewService(activeScheduleLLMClient)
|
||||
activeScheduleFeedbackLocator := activefeedbacklocate.NewService(activeScheduleAdapter, activeScheduleLLMClient)
|
||||
activeScheduleGraphRunner, err := activegraph.NewRunner(activeScheduleDryRun.AsGraphDryRunFunc(), activeScheduleSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agentService.SetActiveScheduleSessionRerunFunc(buildActiveScheduleSessionRerunFunc(manager.ActiveSchedule, activeScheduleGraphRunner, activeSchedulePreviewConfirm, activeScheduleFeedbackLocator))
|
||||
handlers := buildAPIHandlers(taskClient, taskClassClient, courseClient, scheduleClient, agentService, memoryClient, activeSchedulerClient, notificationClient)
|
||||
handlers := buildAPIHandlers(taskClient, taskClassClient, courseClient, scheduleClient, agentService, agentRPCClient, memoryClient, activeSchedulerClient, notificationClient)
|
||||
|
||||
runtime := &appRuntime{
|
||||
db: db,
|
||||
@@ -358,20 +370,23 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
agentCache: agentCacheRepo,
|
||||
manager: manager,
|
||||
outboxRepo: outboxRepo,
|
||||
eventBus: eventBus,
|
||||
memoryModule: memoryModule,
|
||||
limiter: limiter,
|
||||
handlers: handlers,
|
||||
userAuthClient: userAuthClient,
|
||||
}
|
||||
if runtime.eventBus != nil {
|
||||
if err := runtime.registerEventHandlers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
// shouldBuildGatewayAgentFallback 判断 gateway 是否需要保留本地 AgentService 回退面。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只读取启动期配置,不做运行时动态切换;
|
||||
// 2. chat 或非 chat 任一 RPC 开关关闭时,保守装配 fallback,避免旧环境无法启动;
|
||||
// 3. 两个开关都开启时跳过本地 agent 编排依赖,让 gateway 只保留 HTTP/SSE 门面。
|
||||
func shouldBuildGatewayAgentFallback() bool {
|
||||
return !viper.GetBool(gatewayAgentRPCChatEnabledKey) || !viper.GetBool(gatewayAgentRPCAPIEnabledKey)
|
||||
}
|
||||
|
||||
func buildRAGService(ctx context.Context) (*ragservice.Service, error) {
|
||||
ragCfg := ragconfig.LoadFromViper()
|
||||
if !ragCfg.Enabled {
|
||||
@@ -394,28 +409,6 @@ func buildRAGService(ctx context.Context) (*ragservice.Service, error) {
|
||||
return ragService, nil
|
||||
}
|
||||
|
||||
func buildAgentEventBus(outboxRepo *outboxinfra.Repository) (eventsvc.OutboxBus, error) {
|
||||
// agent outbox 消费边界装配:
|
||||
// 1. 单体残留在 CP1 后只消费 agent 自己的 outbox;
|
||||
// 2. memory.extract.requested 仍可被发布到 memory_outbox_messages,但消费与 worker 已迁往 cmd/memory;
|
||||
// 3. kafka.enabled=false 时返回 nil,业务按既有同步降级策略执行。
|
||||
kafkaCfg := kafkabus.LoadConfig()
|
||||
bus, err := eventsvc.NewServiceOutboxBus(outboxRepo, kafkaCfg, outboxinfra.ServiceAgent)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize outbox event bus for service %s: %w", outboxinfra.ServiceAgent, err)
|
||||
}
|
||||
serviceBuses := make(map[string]eventsvc.OutboxBus, 1)
|
||||
if bus != nil {
|
||||
serviceBuses[outboxinfra.ServiceAgent] = bus
|
||||
}
|
||||
|
||||
eventBus := eventsvc.NewRoutedOutboxBus(serviceBuses)
|
||||
if eventBus == nil {
|
||||
log.Println("Outbox event bus is disabled")
|
||||
}
|
||||
return eventBus, nil
|
||||
}
|
||||
|
||||
// buildCoreOutboxPublisher 构造单体残留发布器。
|
||||
//
|
||||
// 职责边界:
|
||||
@@ -823,7 +816,7 @@ func configureAgentService(
|
||||
agentService.SetQuickTaskDeps(agentsv.NewTaskRPCQuickTaskDeps(taskClient))
|
||||
// 1. agent 主链路读取记忆统一走 memory zrpc,避免 CP3 后继续直连本进程 memory.Module;
|
||||
// 2. observer / metrics 继续复用启动期装配,保证注入侧观测在 RPC 切流后不丢;
|
||||
// 3. 旧 memoryModule 仍保留在启动图中,作为迁移期依赖和后续回退面;
|
||||
// 3. gateway 不再组装 memory.Module,memory worker / 管理能力统一交给 cmd/memory;
|
||||
// 4. memory 服务暂不可用时,预取链路只记录警告并软降级,不阻断聊天主流程。
|
||||
agentService.SetMemoryReader(agentsv.NewMemoryRPCReader(memoryReaderClient, memoryObserver, memoryMetrics), memoryCfg)
|
||||
}
|
||||
@@ -834,6 +827,7 @@ func buildAPIHandlers(
|
||||
courseClient ports.CourseCommandClient,
|
||||
scheduleClient ports.ScheduleCommandClient,
|
||||
agentService *agentsv.AgentService,
|
||||
agentRPCClient *gatewayagent.Client,
|
||||
memoryClient ports.MemoryCommandClient,
|
||||
activeSchedulerClient ports.ActiveSchedulerCommandClient,
|
||||
notificationClient ports.NotificationCommandClient,
|
||||
@@ -843,7 +837,7 @@ func buildAPIHandlers(
|
||||
TaskClassHandler: api.NewTaskClassHandler(taskClassClient),
|
||||
CourseHandler: api.NewCourseHandler(courseClient),
|
||||
ScheduleHandler: api.NewScheduleAPI(scheduleClient),
|
||||
AgentHandler: api.NewAgentHandler(agentService),
|
||||
AgentHandler: api.NewAgentHandlerWithRPC(agentService, agentRPCClient),
|
||||
MemoryHandler: api.NewMemoryHandler(memoryClient),
|
||||
ActiveSchedule: api.NewActiveScheduleAPI(activeSchedulerClient),
|
||||
Notification: api.NewNotificationAPI(notificationClient),
|
||||
@@ -855,29 +849,8 @@ func (r *appRuntime) startWorkers(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if r.eventBus != nil {
|
||||
r.eventBus.Start(ctx)
|
||||
log.Println("Outbox event bus started")
|
||||
} else {
|
||||
log.Println("Outbox event bus is disabled")
|
||||
}
|
||||
log.Println("Memory worker is managed by cmd/memory in phase 6 CP1")
|
||||
}
|
||||
|
||||
func (r *appRuntime) registerEventHandlers() error {
|
||||
// 调用目的:只注册仍留在单体残留域内的 outbox handler;active-scheduler / notification 已由各自独立进程管理消费边界。
|
||||
if err := eventsvc.RegisterCoreOutboxHandlers(
|
||||
r.eventBus,
|
||||
r.outboxRepo,
|
||||
r.manager,
|
||||
r.agentRepo,
|
||||
r.cacheRepo,
|
||||
r.memoryModule,
|
||||
r.userAuthClient,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
log.Println("Gateway outbox worker is disabled; agent relay/consumer is managed by cmd/agent")
|
||||
log.Println("Memory worker is managed by cmd/memory in phase 6")
|
||||
}
|
||||
|
||||
func (r *appRuntime) startHTTP(ctx context.Context) {
|
||||
@@ -889,7 +862,4 @@ func (r *appRuntime) close() {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
if r.eventBus != nil {
|
||||
r.eventBus.Close()
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user