Version: 0.9.76.dev.260505

后端:
1.阶段 6 agent / memory 服务化收口
- 新增 cmd/agent 独立进程入口,承载 agent zrpc server、agent outbox relay / consumer 和运行时依赖初始化
- 补齐 services/agent/rpc 的 Chat stream 与 conversation meta/list/timeline、schedule-preview、context-stats、schedule-state unary RPC
- 新增 gateway/client/agent 与 shared/contracts/agent,将 /api/v1/agent chat 和非 chat 门面切到 agent zrpc
- 收缩 gateway 本地 AgentService 装配,双 RPC 开关开启时不再初始化本地 agent 编排、LLM、RAG 和 memory reader fallback
- 将 backend/memory 物理迁入 services/memory,私有实现收入 internal,保留 module/model/observe 作为 memory 服务门面
- 调整 memory outbox、memory reader 和 agent 记忆渲染链路的 import 与服务边界,cmd/memory 独占 memory worker / consumer
- 关闭 gateway 侧 agent outbox worker 所有权,agent relay / consumer 由 cmd/agent 独占,gateway 仅保留 HTTP/SSE 门面与迁移期开关回退
- 更新阶段 6 文档,记录 agent / memory 当前切流点、smoke 结果,以及 backend/client 与 gateway/shared 的目录收口口径
This commit is contained in:
Losita
2026-05-05 19:31:39 +08:00
parent d7184b776b
commit 2a96f4c6f9
72 changed files with 2775 additions and 291 deletions

View File

@@ -0,0 +1,269 @@
package main
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
rootdao "github.com/LoveLosita/smartflow/backend/dao"
"github.com/LoveLosita/smartflow/backend/model"
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
activesel "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
activeTrigger "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
)
func buildActiveSchedulePreviewConfirmService(activeDAO *rootdao.ActiveScheduleDAO, dryRun *activesvc.DryRunService, scheduleApplyAdapter interface {
ApplyActiveScheduleChanges(context.Context, activeapplyadapter.ApplyActiveScheduleRequest) (activeapplyadapter.ApplyActiveScheduleResult, error)
}) (*activesvc.PreviewConfirmService, error) {
previewService, err := activepreview.NewService(activeDAO)
if err != nil {
return nil, err
}
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, scheduleApplyAdapter)
}
// buildActiveScheduleSessionRerunFunc 把主动调度定位器 / graph / preview 能力装成聊天入口可调用的 rerun 闭包。
//
// 说明:
// 1. 这里只做最小接线:复用现有定位器 -> trigger -> graph -> preview 组件,不把 worker/notification 再搬一遍;
// 2. 成功时返回 session 状态、assistant 文本和业务卡片数据;
// 3. 失败时直接把 error 交回聊天入口,由上层统一写失败日志和 SSE 错误。
func buildActiveScheduleSessionRerunFunc(
activeDAO *rootdao.ActiveScheduleDAO,
graphRunner *activegraph.Runner,
previewConfirm *activesvc.PreviewConfirmService,
feedbackLocator *activefeedbacklocate.Service,
) agentsv.ActiveScheduleSessionRerunFunc {
return func(
ctx context.Context,
session *model.ActiveScheduleSessionSnapshot,
userMessage string,
traceID string,
requestStart time.Time,
) (*agentsv.ActiveScheduleSessionRerunResult, error) {
if activeDAO == nil || graphRunner == nil || previewConfirm == nil {
return nil, fmt.Errorf("主动调度 rerun 依赖未初始化")
}
if session == nil {
return nil, fmt.Errorf("主动调度 session 不能为空")
}
triggerRow, err := activeDAO.GetTriggerByID(ctx, session.TriggerID)
if err != nil {
return nil, err
}
resolvedTargetType := activeTrigger.TargetType(triggerRow.TargetType)
resolvedTargetID := triggerRow.TargetID
needsFeedbackLocate := activeTrigger.TriggerType(triggerRow.TriggerType) == activeTrigger.TriggerTypeUnfinishedFeedback &&
(resolvedTargetID <= 0 || containsString(session.State.MissingInfo, "feedback_target"))
if needsFeedbackLocate {
if feedbackLocator == nil {
question := firstNonEmptyString(
activefeedbacklocate.BuildAskUserQuestion(session.State.MissingInfo),
session.State.PendingQuestion,
)
nextState := session.State
nextState.PendingQuestion = question
nextState.MissingInfo = appendMissingString(nextState.MissingInfo, "feedback_target")
nextState.LastCandidateID = ""
nextState.LastNotificationID = ""
nextState.FailedReason = ""
nextState.ExpiresAt = nil
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: question,
SessionState: nextState,
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
}, nil
}
locateResult, locateErr := feedbackLocator.Resolve(ctx, activefeedbacklocate.Request{
UserID: triggerRow.UserID,
UserMessage: userMessage,
PendingQuestion: session.State.PendingQuestion,
MissingInfo: cloneStringSlice(session.State.MissingInfo),
})
if locateErr != nil {
return nil, locateErr
}
if locateResult.ShouldAskUser() {
question := firstNonEmptyString(
locateResult.AskUserQuestion,
activefeedbacklocate.BuildAskUserQuestion(session.State.MissingInfo),
session.State.PendingQuestion,
)
nextState := session.State
nextState.PendingQuestion = question
nextState.MissingInfo = appendMissingString(nextState.MissingInfo, "feedback_target")
nextState.LastCandidateID = ""
nextState.LastNotificationID = ""
nextState.FailedReason = ""
nextState.ExpiresAt = nil
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: question,
SessionState: nextState,
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
}, nil
}
resolvedTargetType = activeTrigger.TargetType(locateResult.TargetType)
resolvedTargetID = locateResult.TargetID
}
domainTrigger := activeTrigger.ActiveScheduleTrigger{
TriggerID: triggerRow.ID,
UserID: triggerRow.UserID,
TriggerType: activeTrigger.TriggerType(triggerRow.TriggerType),
Source: activeTrigger.SourceUserFeedback,
TargetType: resolvedTargetType,
TargetID: resolvedTargetID,
FeedbackID: triggerRow.FeedbackID,
IdempotencyKey: triggerRow.IdempotencyKey,
MockNow: nil,
IsMockTime: false,
RequestedAt: requestStart,
TraceID: traceID,
}
if err := domainTrigger.Validate(); err != nil {
return nil, err
}
graphResult, err := graphRunner.Run(ctx, domainTrigger)
if err != nil {
return nil, err
}
if graphResult == nil || graphResult.DryRunData == nil || graphResult.DryRunData.Context == nil {
return nil, fmt.Errorf("主动调度 graph 返回空结果")
}
selectionResult := graphResult.SelectionResult
state := session.State
state.LastCandidateID = strings.TrimSpace(selectionResult.SelectedCandidateID)
state.LastNotificationID = ""
state.FailedReason = ""
state.MissingInfo = cloneStringSlice(graphResult.DryRunData.Context.DerivedFacts.MissingInfo)
switch selectionResult.Action {
case activesel.ActionSelectCandidate:
if !graphResult.DryRunData.Observation.Decision.ShouldWritePreview {
return nil, fmt.Errorf("主动调度 graph 选择了候选,但未产出可写 preview")
}
previewResp, err := previewConfirm.CreatePreviewFromDryRun(ctx, activepreview.CreatePreviewRequest{
ActiveContext: graphResult.DryRunData.Context,
Observation: graphResult.DryRunData.Observation,
Candidates: graphResult.DryRunData.Candidates,
TriggerID: triggerRow.ID,
GeneratedAt: requestStart,
SelectedCandidateID: selectionResult.SelectedCandidateID,
ExplanationText: selectionResult.ExplanationText,
NotificationSummary: selectionResult.NotificationSummary,
FallbackUsed: selectionResult.FallbackUsed,
})
if err != nil {
return nil, err
}
state.PendingQuestion = ""
state.MissingInfo = nil
state.FailedReason = ""
expiresAt := previewResp.Detail.ExpiresAt
state.ExpiresAt = &expiresAt
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: firstNonEmptyString(selectionResult.ExplanationText, selectionResult.NotificationSummary, previewResp.Detail.Explanation, previewResp.Detail.Notification, "主动调度建议已更新。"),
BusinessCard: &agentstream.StreamBusinessCardExtra{
CardType: "active_schedule_preview",
Title: "SmartFlow 日程调整建议",
Summary: firstNonEmptyString(selectionResult.NotificationSummary, previewResp.Detail.Notification, previewResp.Detail.Explanation),
Data: previewDetailToMap(previewResp.Detail),
},
SessionState: state,
SessionStatus: model.ActiveScheduleSessionStatusReadyPreview,
PreviewID: previewResp.Detail.PreviewID,
}, nil
case activesel.ActionAskUser:
question := firstNonEmptyString(selectionResult.AskUserQuestion, selectionResult.ExplanationText, "请继续补充主动调度需要的信息。")
state.PendingQuestion = question
state.ExpiresAt = nil
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: question,
SessionState: state,
SessionStatus: model.ActiveScheduleSessionStatusWaitingUserReply,
}, nil
default:
assistantText := firstNonEmptyString(selectionResult.ExplanationText, selectionResult.NotificationSummary, "当前主动调度暂时没有需要继续处理的内容。")
state.PendingQuestion = ""
state.MissingInfo = nil
state.ExpiresAt = nil
return &agentsv.ActiveScheduleSessionRerunResult{
AssistantText: assistantText,
SessionState: state,
SessionStatus: model.ActiveScheduleSessionStatusIgnored,
}, nil
}
}
}
func previewDetailToMap(detail activepreview.ActiveSchedulePreviewDetail) map[string]any {
raw, err := json.Marshal(detail)
if err != nil {
return map[string]any{}
}
var output map[string]any
if err := json.Unmarshal(raw, &output); err != nil {
return map[string]any{}
}
return output
}
func firstNonEmptyString(values ...string) string {
for _, value := range values {
if trimmed := strings.TrimSpace(value); trimmed != "" {
return trimmed
}
}
return ""
}
func cloneStringSlice(values []string) []string {
if len(values) == 0 {
return nil
}
copied := make([]string, len(values))
copy(copied, values)
return copied
}
func appendMissingString(values []string, next string) []string {
trimmed := strings.TrimSpace(next)
if trimmed == "" {
return cloneStringSlice(values)
}
for _, value := range values {
if strings.TrimSpace(value) == trimmed {
return cloneStringSlice(values)
}
}
result := cloneStringSlice(values)
return append(result, trimmed)
}
func containsString(values []string, target string) bool {
trimmed := strings.TrimSpace(target)
if trimmed == "" {
return false
}
for _, value := range values {
if strings.TrimSpace(value) == trimmed {
return true
}
}
return false
}

50
backend/cmd/agent/main.go Normal file
View File

@@ -0,0 +1,50 @@
package main
import (
"context"
"log"
"os"
"os/signal"
"syscall"
"github.com/LoveLosita/smartflow/backend/bootstrap"
agentrpc "github.com/LoveLosita/smartflow/backend/services/agent/rpc"
"github.com/spf13/viper"
)
func main() {
if err := bootstrap.LoadConfig(); err != nil {
log.Fatalf("failed to load config: %v", err)
}
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
runtime, err := buildAgentRuntime(ctx)
if err != nil {
log.Fatalf("failed to initialize agent runtime: %v", err)
}
defer runtime.close()
if err := runtime.startWorkers(ctx); err != nil {
log.Fatalf("failed to start agent workers: %v", err)
}
server, listenOn, err := agentrpc.NewServer(agentrpc.ServerOptions{
ListenOn: viper.GetString("agent.rpc.listenOn"),
Timeout: viper.GetDuration("agent.rpc.timeout"),
Service: runtime.service,
})
if err != nil {
log.Fatalf("failed to build agent zrpc server: %v", err)
}
defer server.Stop()
go func() {
log.Printf("agent zrpc service starting on %s", listenOn)
server.Start()
}()
<-ctx.Done()
log.Println("agent service stopping")
}

View File

@@ -0,0 +1,527 @@
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"strings"
rootdao "github.com/LoveLosita/smartflow/backend/dao"
gatewaymemory "github.com/LoveLosita/smartflow/backend/gateway/client/memory"
gatewayschedule "github.com/LoveLosita/smartflow/backend/gateway/client/schedule"
gatewaytask "github.com/LoveLosita/smartflow/backend/gateway/client/task"
gatewaytaskclass "github.com/LoveLosita/smartflow/backend/gateway/client/taskclass"
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
"github.com/LoveLosita/smartflow/backend/inits"
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
"github.com/LoveLosita/smartflow/backend/model"
rootsvc "github.com/LoveLosita/smartflow/backend/service"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
activesel "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
"github.com/LoveLosita/smartflow/backend/services/agent/tools/web"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
"github.com/LoveLosita/smartflow/backend/shared/ports"
"github.com/go-redis/redis/v8"
"github.com/spf13/viper"
"gorm.io/gorm"
)
type agentRuntime struct {
redisClient *redis.Client
eventBus eventsvc.OutboxBus
outboxRepo *outboxinfra.Repository
repoManager *rootdao.RepoManager
agentRepo *rootdao.AgentDAO
cacheRepo *rootdao.CacheDAO
userAuthClient *gatewayuserauth.Client
service *agentsv.AgentService
workersStarted bool
}
func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
db, err := openAgentDBFromConfig()
if err != nil {
return nil, fmt.Errorf("connect agent database failed: %w", err)
}
redisClient, err := inits.OpenRedisFromConfig()
if err != nil {
return nil, fmt.Errorf("connect agent redis failed: %w", err)
}
fail := func(cause error) (*agentRuntime, error) {
_ = redisClient.Close()
return nil, cause
}
cacheRepo := rootdao.NewCacheDAO(redisClient)
if err = db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
return fail(fmt.Errorf("initialize agent cache deleter failed: %w", err))
}
// 说明:
// 1. 本轮先在 cmd/agent 内平移一份启动装配,不直接改 cmd/start.go 的旧 gateway 本地链路。
// 2. 这样可以把独立进程入口先稳定下来,同时避免和主代理并行接的 rpc/pb 改动发生交叉覆盖。
// 3. 等阶段 6 的 agent/memory 启动边界都收稳后,再统一评估是否把 LLM/RAG/bootstrap 抽公共层。
llmService, err := buildAgentLLMService()
if err != nil {
return fail(fmt.Errorf("initialize agent llm service failed: %w", err))
}
ragService, err := buildAgentRAGService(ctx)
if err != nil {
return fail(err)
}
ragRuntime := ragService.Runtime()
memoryCfg := memorymodule.LoadConfigFromViper()
memoryObserver := memoryobserve.NewLoggerObserver(log.Default())
memoryMetrics := memoryobserve.NewMetricsRegistry()
manager := rootdao.NewManager(db)
agentRepo := rootdao.NewAgentDAO(db)
taskRepo := rootdao.NewTaskDAO(db)
taskClassRepo := rootdao.NewTaskClassDAO(db)
scheduleRepo := rootdao.NewScheduleDAO(db)
agentCacheRepo := rootdao.NewAgentCache(redisClient)
outboxRepo := outboxinfra.NewRepository(db)
eventBus, err := buildAgentEventBus(outboxRepo)
if err != nil {
return fail(err)
}
if err = eventsvc.RegisterTaskUrgencyPromoteRoute(); err != nil {
return fail(fmt.Errorf("register task outbox route failed: %w", err))
}
eventPublisher := buildAgentOutboxPublisher(outboxRepo)
taskOutboxPublisher := buildTaskOutboxPublisher(outboxRepo)
var userAuthClient *gatewayuserauth.Client
if eventBus != nil {
userAuthClient, err = gatewayuserauth.NewClient(gatewayuserauth.ClientConfig{
Endpoints: viper.GetStringSlice("userauth.rpc.endpoints"),
Target: viper.GetString("userauth.rpc.target"),
Timeout: viper.GetDuration("userauth.rpc.timeout"),
})
if err != nil {
return fail(fmt.Errorf("initialize userauth zrpc client failed: %w", err))
}
}
taskClient, err := gatewaytask.NewClient(gatewaytask.ClientConfig{
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
Target: viper.GetString("task.rpc.target"),
Timeout: viper.GetDuration("task.rpc.timeout"),
})
if err != nil {
return fail(fmt.Errorf("initialize task zrpc client failed: %w", err))
}
taskClassClient, err := gatewaytaskclass.NewClient(gatewaytaskclass.ClientConfig{
Endpoints: viper.GetStringSlice("taskClass.rpc.endpoints"),
Target: viper.GetString("taskClass.rpc.target"),
Timeout: viper.GetDuration("taskClass.rpc.timeout"),
})
if err != nil {
return fail(fmt.Errorf("initialize task-class zrpc client failed: %w", err))
}
scheduleClient, err := gatewayschedule.NewClient(gatewayschedule.ClientConfig{
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
Target: viper.GetString("schedule.rpc.target"),
Timeout: viper.GetDuration("schedule.rpc.timeout"),
})
if err != nil {
return fail(fmt.Errorf("initialize schedule zrpc client failed: %w", err))
}
memoryClient, err := gatewaymemory.NewClient(gatewaymemory.ClientConfig{
Endpoints: viper.GetStringSlice("memory.rpc.endpoints"),
Target: viper.GetString("memory.rpc.target"),
Timeout: viper.GetDuration("memory.rpc.timeout"),
})
if err != nil {
return fail(fmt.Errorf("initialize memory zrpc client failed: %w", err))
}
taskService := rootsvc.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
taskService.SetActiveScheduleDAO(manager.ActiveSchedule)
scheduleService := rootsvc.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
agentService := agentsv.NewAgentService(
llmService,
agentRepo,
taskRepo,
cacheRepo,
agentCacheRepo,
manager.ActiveSchedule,
manager.ActiveScheduleSession,
eventPublisher,
)
// 1. 迁移期仍由独立入口注入旧 schedule/task 领域能力,避免 agent/sv 反向 import 旧 service 形成循环依赖。
// 2. 等阶段 6 后续把这些残留 DAO 适配继续切成 RPC/read-model再从这里移除注入点。
agentService.SmartPlanningMultiRawFunc = scheduleService.SmartPlanningMultiRaw
agentService.HybridScheduleWithPlanMultiFunc = scheduleService.HybridScheduleWithPlanMulti
agentService.ResolvePlanningWindowFunc = scheduleService.ResolvePlanningWindowByTaskClasses
agentService.GetTasksWithUrgencyPromotionFunc = taskService.GetTasksWithUrgencyPromotion
configureAgentService(
agentService,
ragRuntime,
agentRepo,
cacheRepo,
taskClient,
taskClassClient,
scheduleClient,
memoryClient,
memoryCfg,
memoryObserver,
memoryMetrics,
)
activeTaskAdapter, err := activeadapters.NewTaskRPCAdapter(activeadapters.TaskRPCConfig{
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
Target: viper.GetString("task.rpc.target"),
Timeout: viper.GetDuration("task.rpc.timeout"),
})
if err != nil {
return fail(fmt.Errorf("initialize task rpc adapter for agent rerun failed: %w", err))
}
activeScheduleAdapter, err := activeadapters.NewScheduleRPCAdapter(activeadapters.ScheduleRPCConfig{
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
Target: viper.GetString("schedule.rpc.target"),
Timeout: viper.GetDuration("schedule.rpc.timeout"),
})
if err != nil {
return fail(fmt.Errorf("initialize schedule rpc adapter for agent rerun failed: %w", err))
}
activeScheduleDryRun, err := activesvc.NewDryRunService(activeadapters.ReadersWithScheduleRPC(activeTaskAdapter, activeScheduleAdapter))
if err != nil {
return fail(err)
}
activeSchedulePreviewConfirm, err := buildActiveSchedulePreviewConfirmService(manager.ActiveSchedule, activeScheduleDryRun, activeScheduleAdapter)
if err != nil {
return fail(err)
}
activeScheduleLLMClient := llmService.ProClient()
activeScheduleSelector := activesel.NewService(activeScheduleLLMClient)
activeScheduleFeedbackLocator := activefeedbacklocate.NewService(activeScheduleAdapter, activeScheduleLLMClient)
activeScheduleGraphRunner, err := activegraph.NewRunner(activeScheduleDryRun.AsGraphDryRunFunc(), activeScheduleSelector)
if err != nil {
return fail(err)
}
agentService.SetActiveScheduleSessionRerunFunc(buildActiveScheduleSessionRerunFunc(
manager.ActiveSchedule,
activeScheduleGraphRunner,
activeSchedulePreviewConfirm,
activeScheduleFeedbackLocator,
))
return &agentRuntime{
redisClient: redisClient,
eventBus: eventBus,
outboxRepo: outboxRepo,
repoManager: manager,
agentRepo: agentRepo,
cacheRepo: cacheRepo,
userAuthClient: userAuthClient,
service: agentService,
}, nil
}
func (r *agentRuntime) startWorkers(ctx context.Context) error {
if r == nil || r.workersStarted {
return nil
}
if r.eventBus == nil {
log.Println("Agent outbox consumer is disabled")
return nil
}
if r.userAuthClient == nil {
return fmt.Errorf("agent outbox consumer requires userauth zrpc client")
}
// 1. 先登记 agent 自己消费的 handler同时补齐 memory.extract.requested 的服务路由。
// 2. 这里明确只接 agent 边界memory 消费仍归 cmd/memorytask 事件仍是 publish-only 写入 task outbox。
// 3. 注册完成后再启动总线,避免服务一起来就抢先消费到尚未挂 handler 的消息。
if err := eventsvc.RegisterCoreOutboxHandlers(
r.eventBus,
r.outboxRepo,
r.repoManager,
r.agentRepo,
r.cacheRepo,
nil,
r.userAuthClient,
); err != nil {
return fmt.Errorf("register agent outbox handlers failed: %w", err)
}
r.eventBus.Start(ctx)
r.workersStarted = true
log.Println("Agent outbox consumer started")
return nil
}
func (r *agentRuntime) close() {
if r == nil {
return
}
if r.eventBus != nil {
r.eventBus.Close()
}
if r.redisClient != nil {
_ = r.redisClient.Close()
}
}
func openAgentDBFromConfig() (*gorm.DB, error) {
db, err := inits.OpenDBFromConfig()
if err != nil {
return nil, err
}
if err = autoMigrateAgentOwnedTables(db); err != nil {
return nil, err
}
if err = autoMigrateAgentOutboxTable(db); err != nil {
return nil, err
}
if err = ensureAgentRuntimeDependencyTables(db); err != nil {
return nil, err
}
return db, nil
}
func autoMigrateAgentOwnedTables(db *gorm.DB) error {
if db == nil {
return fmt.Errorf("agent database is not initialized")
}
// 1. 独立 agent 进程启动时只负责补齐自有表结构,不在历史库上强制补外键约束。
// 2. 线上/本地历史数据可能存在旧 chat_history 记录找不到 agent_chat 的情况,硬补 FK 会阻断服务启动。
// 3. 迁移期保留应用层按 chat_id 关联的读写语义;真正清理孤儿历史和补 FK 应走单独数据治理脚本。
originalDisableFK := db.Config.DisableForeignKeyConstraintWhenMigrating
db.Config.DisableForeignKeyConstraintWhenMigrating = true
defer func() {
db.Config.DisableForeignKeyConstraintWhenMigrating = originalDisableFK
}()
if err := db.AutoMigrate(
&model.AgentChat{},
&model.ChatHistory{},
&model.AgentTimelineEvent{},
&model.AgentScheduleState{},
&model.ActiveScheduleSession{},
&model.AgentStateSnapshotRecord{},
); err != nil {
return fmt.Errorf("auto migrate agent owned tables failed: %w", err)
}
return nil
}
func autoMigrateAgentOutboxTable(db *gorm.DB) error {
cfg, ok := outboxinfra.ResolveServiceConfig(outboxinfra.ServiceAgent)
if !ok {
return fmt.Errorf("resolve agent outbox config failed")
}
if err := db.Table(cfg.TableName).AutoMigrate(&model.AgentOutboxMessage{}); err != nil {
return fmt.Errorf("auto migrate agent outbox table failed for %s (%s): %w", cfg.Name, cfg.TableName, err)
}
return nil
}
func ensureAgentRuntimeDependencyTables(db *gorm.DB) error {
// 1. agent 独立进程当前仍复用 task/schedule/active-scheduler 的部分读写表,不在这里越权迁移这些表。
// 2. 这里只做存在性检查,缺表时直接 fail fast避免聊天请求进入半初始化状态。
// 3. 等阶段 6 后续把这些直连改成 RPC/read-model 后,应同步缩减这份依赖清单。
for _, dependency := range []struct {
name string
model any
}{
{name: "tasks", model: &model.Task{}},
{name: "task_classes", model: &model.TaskClass{}},
{name: "task_items", model: &model.TaskClassItem{}},
{name: "schedules", model: &model.Schedule{}},
{name: "schedule_events", model: &model.ScheduleEvent{}},
{name: "active_schedule_triggers", model: &model.ActiveScheduleTrigger{}},
{name: "active_schedule_previews", model: &model.ActiveSchedulePreview{}},
} {
if !db.Migrator().HasTable(dependency.model) {
return fmt.Errorf("agent runtime dependency table missing: %s", dependency.name)
}
}
return nil
}
func buildAgentLLMService() (*llmservice.Service, error) {
aiHub, err := inits.InitEino()
if err != nil {
return nil, err
}
return llmservice.New(llmservice.Options{
AIHub: aiHub,
APIKey: os.Getenv("ARK_API_KEY"),
BaseURL: viper.GetString("agent.baseURL"),
CourseVisionModel: viper.GetString("courseImport.visionModel"),
}), nil
}
func buildAgentRAGService(ctx context.Context) (*ragservice.Service, error) {
ragCfg := ragconfig.LoadFromViper()
if !ragCfg.Enabled {
log.Println("RAG service is disabled for agent")
return ragservice.New(ragservice.Options{}), nil
}
ragLogger := log.Default()
ragService, err := ragservice.NewFromConfig(ctx, ragCfg, ragservice.FactoryDeps{
Logger: ragLogger,
Observer: ragservice.NewLoggerObserver(ragLogger),
})
if err != nil {
return nil, fmt.Errorf("failed to initialize agent RAG service: %w", err)
}
log.Printf("Agent RAG runtime initialized: store=%s embed=%s reranker=%s", ragCfg.Store, ragCfg.EmbedProvider, ragCfg.RerankerProvider)
return ragService, nil
}
func buildAgentEventBus(outboxRepo *outboxinfra.Repository) (eventsvc.OutboxBus, error) {
kafkaCfg := kafkabus.LoadConfig()
bus, err := eventsvc.NewServiceOutboxBus(outboxRepo, kafkaCfg, outboxinfra.ServiceAgent)
if err != nil {
return nil, fmt.Errorf("failed to initialize outbox event bus for service %s: %w", outboxinfra.ServiceAgent, err)
}
serviceBuses := make(map[string]eventsvc.OutboxBus, 1)
if bus != nil {
serviceBuses[outboxinfra.ServiceAgent] = bus
}
eventBus := eventsvc.NewRoutedOutboxBus(serviceBuses)
if eventBus == nil {
log.Println("Agent outbox event bus is disabled")
}
return eventBus, nil
}
func buildAgentOutboxPublisher(outboxRepo *outboxinfra.Repository) outboxinfra.EventPublisher {
kafkaCfg := kafkabus.LoadConfig()
if !kafkaCfg.Enabled || outboxRepo == nil {
return nil
}
return &repositoryOutboxPublisher{
repo: outboxRepo,
maxRetry: kafkaCfg.MaxRetry,
}
}
func buildTaskOutboxPublisher(outboxRepo *outboxinfra.Repository) outboxinfra.EventPublisher {
kafkaCfg := kafkabus.LoadConfig()
if !kafkaCfg.Enabled || outboxRepo == nil {
return nil
}
return &repositoryOutboxPublisher{
repo: outboxRepo,
maxRetry: kafkaCfg.MaxRetry,
}
}
type repositoryOutboxPublisher struct {
repo *outboxinfra.Repository
maxRetry int
}
func (p *repositoryOutboxPublisher) Publish(ctx context.Context, req outboxinfra.PublishRequest) error {
if p == nil || p.repo == nil {
return fmt.Errorf("outbox publisher is not initialized")
}
eventType := strings.TrimSpace(req.EventType)
if eventType == "" {
return fmt.Errorf("eventType is empty")
}
eventVersion := strings.TrimSpace(req.EventVersion)
if eventVersion == "" {
eventVersion = outboxinfra.DefaultEventVersion
}
messageKey := strings.TrimSpace(req.MessageKey)
aggregateID := strings.TrimSpace(req.AggregateID)
if aggregateID == "" {
aggregateID = messageKey
}
payloadJSON, err := json.Marshal(req.Payload)
if err != nil {
return err
}
_, err = p.repo.CreateMessage(ctx, eventType, messageKey, outboxinfra.OutboxEventPayload{
EventID: strings.TrimSpace(req.EventID),
EventType: eventType,
EventVersion: eventVersion,
AggregateID: aggregateID,
Payload: payloadJSON,
}, p.maxRetry)
return err
}
func configureAgentService(
agentService *agentsv.AgentService,
ragRuntime ragservice.Runtime,
agentRepo *rootdao.AgentDAO,
cacheRepo *rootdao.CacheDAO,
taskClient agentsv.TaskRPCClient,
taskClassClient agentsv.TaskClassAgentRPCClient,
scheduleClient agentsv.ScheduleAgentRPCClient,
memoryReaderClient ports.MemoryReaderClient,
memoryCfg memorymodel.Config,
memoryObserver memoryobserve.Observer,
memoryMetrics memoryobserve.MetricsRecorder,
) {
if agentService == nil {
return
}
agentService.SetAgentStateStore(rootdao.NewAgentStateStoreAdapter(cacheRepo))
var webSearchProvider web.SearchProvider
webProvider := viper.GetString("websearch.provider")
switch webProvider {
case "bocha":
bochaKey := viper.GetString("websearch.apiKey")
if bochaKey == "" {
log.Println("WebSearch: 博查 API Key 为空,降级为 mock")
webSearchProvider = &web.MockProvider{}
} else {
webSearchProvider = web.NewBochaProvider(bochaKey, "")
log.Println("WebSearch provider: bocha")
}
case "mock", "":
webSearchProvider = &web.MockProvider{}
log.Println("WebSearch provider: mock模拟模式")
default:
log.Printf("WebSearch provider %q 未识别,降级为 mock", webProvider)
webSearchProvider = &web.MockProvider{}
}
agentService.SetToolRegistry(agenttools.NewDefaultRegistryWithDeps(agenttools.DefaultRegistryDeps{
RAGRuntime: ragRuntime,
WebSearchProvider: webSearchProvider,
TaskClassWriteDeps: agenttools.TaskClassWriteDeps{
UpsertTaskClass: agentsv.NewTaskClassRPCUpsertFunc(taskClassClient),
},
}))
agentService.SetScheduleProvider(agentsv.NewScheduleRPCProvider(scheduleClient, taskClassClient))
agentService.SetCompactionStore(agentRepo)
agentService.SetQuickTaskDeps(agentsv.NewTaskRPCQuickTaskDeps(taskClient))
agentService.SetMemoryReader(agentsv.NewMemoryRPCReader(memoryReaderClient, memoryObserver, memoryMetrics), memoryCfg)
}

View File

@@ -12,10 +12,10 @@ import (
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
"github.com/LoveLosita/smartflow/backend/inits"
memorymodule "github.com/LoveLosita/smartflow/backend/memory"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
memorydao "github.com/LoveLosita/smartflow/backend/services/memory/dao"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
memoryrpc "github.com/LoveLosita/smartflow/backend/services/memory/rpc"
memorysv "github.com/LoveLosita/smartflow/backend/services/memory/sv"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"

View File

@@ -15,6 +15,7 @@ import (
"github.com/LoveLosita/smartflow/backend/dao"
"github.com/LoveLosita/smartflow/backend/gateway/api"
gatewayactivescheduler "github.com/LoveLosita/smartflow/backend/gateway/client/activescheduler"
gatewayagent "github.com/LoveLosita/smartflow/backend/gateway/client/agent"
gatewaycourse "github.com/LoveLosita/smartflow/backend/gateway/client/course"
gatewaymemory "github.com/LoveLosita/smartflow/backend/gateway/client/memory"
gatewaynotification "github.com/LoveLosita/smartflow/backend/gateway/client/notification"
@@ -26,9 +27,6 @@ import (
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
"github.com/LoveLosita/smartflow/backend/inits"
"github.com/LoveLosita/smartflow/backend/memory"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
"github.com/LoveLosita/smartflow/backend/middleware"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/pkg"
@@ -47,6 +45,9 @@ import (
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
"github.com/LoveLosita/smartflow/backend/services/agent/tools/web"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/LoveLosita/smartflow/backend/services/memory"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
"github.com/LoveLosita/smartflow/backend/shared/ports"
@@ -55,6 +56,11 @@ import (
"gorm.io/gorm"
)
const (
gatewayAgentRPCChatEnabledKey = "agent.rpc.chat.enabled"
gatewayAgentRPCAPIEnabledKey = "agent.rpc.api.enabled"
)
// appRuntime 承载一次进程启动所需的依赖图。
//
// 职责边界:
@@ -69,8 +75,6 @@ type appRuntime struct {
agentCache *dao.AgentCache
manager *dao.RepoManager
outboxRepo *outboxinfra.Repository
eventBus eventsvc.OutboxBus
memoryModule *memory.Module
limiter *pkg.RateLimiter
handlers *api.ApiHandlers
userAuthClient *gatewayuserauth.Client
@@ -112,8 +116,11 @@ func StartAPI() {
runtime.startHTTP(ctx)
}
// StartWorker 只启动后台异步能力,不注册 Gin 路由
// 当前只包含单体残留域 agent outbox relay / Kafka consumermemory worker 已迁到 cmd/memory。
// StartWorker 保留历史 worker 入口,但阶段 6 后不再拥有 agent / memory 消费边界
// 当前语义:
// 1. agent outbox relay / consumer 已迁到 cmd/agent
// 2. memory worker 已迁到 cmd/memory
// 3. 该入口仅用于兼容旧启动命令,后续可在 gateway 收口阶段删除。
func StartWorker() {
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer stop()
@@ -138,10 +145,10 @@ func mustBuildRuntime(ctx context.Context) *appRuntime {
// buildRuntime 装配应用依赖图,但不启动 HTTP 或后台循环。
//
// 步骤说明:
// 1. 先初始化配置、数据库、Redis、模型、RAG、memory 等基础设施;
// 2. 再构造 DAO / Service / agent 依赖
// 1. 先初始化配置、数据库、Redis 等 gateway 必需基础设施;
// 2. 再构造各服务 zrpc client并按开关决定是否装配 agent 本地 fallback
// 3. 最后构造 HTTP handlers供 api/all 模式按需启动;
// 4. worker 模式暂时也复用完整依赖图,避免同轮迁移拆出两套装配逻辑
// 4. worker 模式暂时也复用 gateway 依赖图,但不再启动 agent / memory worker
func buildRuntime(ctx context.Context) (*appRuntime, error) {
if err := loadConfig(); err != nil {
return nil, err
@@ -158,54 +165,9 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
}
limiter := pkg.NewRateLimiter(rdb)
aiHub, err := inits.InitEino()
if err != nil {
return nil, fmt.Errorf("failed to initialize Eino: %w", err)
}
llmService := llmservice.New(llmservice.Options{
AIHub: aiHub,
APIKey: os.Getenv("ARK_API_KEY"),
BaseURL: viper.GetString("agent.baseURL"),
CourseVisionModel: viper.GetString("courseImport.visionModel"),
})
ragService, err := buildRAGService(ctx)
if err != nil {
return nil, err
}
ragRuntime := ragService.Runtime()
memoryCfg := memory.LoadConfigFromViper()
memoryObserver := memoryobserve.NewLoggerObserver(log.Default())
memoryMetrics := memoryobserve.NewMetricsRegistry()
memoryModule := memory.NewModuleWithObserve(
db,
llmService.ProClient(),
ragRuntime,
memoryCfg,
memory.ObserveDeps{
Observer: memoryObserver,
Metrics: memoryMetrics,
},
)
// DAO 层初始化。
cacheRepo := dao.NewCacheDAO(rdb)
agentCacheRepo := dao.NewAgentCache(rdb)
_ = db.Use(middleware.NewGormCachePlugin(cacheRepo))
taskRepo := dao.NewTaskDAO(db)
taskClassRepo := dao.NewTaskClassDAO(db)
scheduleRepo := dao.NewScheduleDAO(db)
manager := dao.NewManager(db)
agentRepo := dao.NewAgentDAO(db)
outboxRepo := outboxinfra.NewRepository(db)
eventBus, err := buildAgentEventBus(outboxRepo)
if err != nil {
return nil, err
}
eventPublisher := buildCoreOutboxPublisher(outboxRepo)
// Service 层初始化。
userAuthClient, err := gatewayuserauth.NewClient(gatewayuserauth.ClientConfig{
@@ -265,6 +227,14 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
if err != nil {
return nil, fmt.Errorf("failed to initialize memory zrpc client: %w", err)
}
agentRPCClient, err := gatewayagent.NewClient(gatewayagent.ClientConfig{
Endpoints: viper.GetStringSlice("agent.rpc.endpoints"),
Target: viper.GetString("agent.rpc.target"),
Timeout: viper.GetDuration("agent.rpc.timeout"),
})
if err != nil {
return nil, fmt.Errorf("failed to initialize agent zrpc client: %w", err)
}
activeSchedulerClient, err := gatewayactivescheduler.NewClient(gatewayactivescheduler.ClientConfig{
Endpoints: viper.GetStringSlice("activeScheduler.rpc.endpoints"),
Target: viper.GetString("activeScheduler.rpc.target"),
@@ -273,6 +243,45 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
if err != nil {
return nil, fmt.Errorf("failed to initialize active-scheduler zrpc client: %w", err)
}
var agentRepo *dao.AgentDAO
var agentCacheRepo *dao.AgentCache
var manager *dao.RepoManager
var outboxRepo *outboxinfra.Repository
var agentService *agentsv.AgentService
if shouldBuildGatewayAgentFallback() {
log.Println("Gateway agent RPC fallback is enabled; building local AgentService compatibility path")
aiHub, err := inits.InitEino()
if err != nil {
return nil, fmt.Errorf("failed to initialize Eino: %w", err)
}
llmService := llmservice.New(llmservice.Options{
AIHub: aiHub,
APIKey: os.Getenv("ARK_API_KEY"),
BaseURL: viper.GetString("agent.baseURL"),
CourseVisionModel: viper.GetString("courseImport.visionModel"),
})
ragService, err := buildRAGService(ctx)
if err != nil {
return nil, err
}
ragRuntime := ragService.Runtime()
memoryCfg := memory.LoadConfigFromViper()
memoryObserver := memoryobserve.NewLoggerObserver(log.Default())
memoryMetrics := memoryobserve.NewMetricsRegistry()
agentCacheRepo = dao.NewAgentCache(rdb)
taskRepo := dao.NewTaskDAO(db)
taskClassRepo := dao.NewTaskClassDAO(db)
scheduleRepo := dao.NewScheduleDAO(db)
manager = dao.NewManager(db)
agentRepo = dao.NewAgentDAO(db)
outboxRepo = outboxinfra.NewRepository(db)
// 1. fallback 仅用于 RPC 开关关闭时的迁移期回退,不再启动 agent outbox event bus。
// 2. fallback 产生的事件仍写入服务级 outbox 表,由 cmd/agent / cmd/task 独立进程负责 relay / consume。
eventPublisher := buildCoreOutboxPublisher(outboxRepo)
if err := eventsvc.RegisterTaskUrgencyPromoteRoute(); err != nil {
return nil, fmt.Errorf("failed to register task outbox route: %w", err)
}
@@ -280,7 +289,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
taskSv := service.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
taskSv.SetActiveScheduleDAO(manager.ActiveSchedule)
scheduleService := service.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
agentService := agentsv.NewAgentService(
agentService = agentsv.NewAgentService(
llmService,
agentRepo,
taskRepo,
@@ -347,7 +356,10 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
return nil, err
}
agentService.SetActiveScheduleSessionRerunFunc(buildActiveScheduleSessionRerunFunc(manager.ActiveSchedule, activeScheduleGraphRunner, activeSchedulePreviewConfirm, activeScheduleFeedbackLocator))
handlers := buildAPIHandlers(taskClient, taskClassClient, courseClient, scheduleClient, agentService, memoryClient, activeSchedulerClient, notificationClient)
} else {
log.Println("Gateway agent local fallback is disabled; /agent HTTP routes use cmd/agent zrpc")
}
handlers := buildAPIHandlers(taskClient, taskClassClient, courseClient, scheduleClient, agentService, agentRPCClient, memoryClient, activeSchedulerClient, notificationClient)
runtime := &appRuntime{
db: db,
@@ -358,20 +370,23 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
agentCache: agentCacheRepo,
manager: manager,
outboxRepo: outboxRepo,
eventBus: eventBus,
memoryModule: memoryModule,
limiter: limiter,
handlers: handlers,
userAuthClient: userAuthClient,
}
if runtime.eventBus != nil {
if err := runtime.registerEventHandlers(); err != nil {
return nil, err
}
}
return runtime, nil
}
// shouldBuildGatewayAgentFallback 判断 gateway 是否需要保留本地 AgentService 回退面。
//
// 职责边界:
// 1. 只读取启动期配置,不做运行时动态切换;
// 2. chat 或非 chat 任一 RPC 开关关闭时,保守装配 fallback避免旧环境无法启动
// 3. 两个开关都开启时跳过本地 agent 编排依赖,让 gateway 只保留 HTTP/SSE 门面。
func shouldBuildGatewayAgentFallback() bool {
return !viper.GetBool(gatewayAgentRPCChatEnabledKey) || !viper.GetBool(gatewayAgentRPCAPIEnabledKey)
}
func buildRAGService(ctx context.Context) (*ragservice.Service, error) {
ragCfg := ragconfig.LoadFromViper()
if !ragCfg.Enabled {
@@ -394,28 +409,6 @@ func buildRAGService(ctx context.Context) (*ragservice.Service, error) {
return ragService, nil
}
func buildAgentEventBus(outboxRepo *outboxinfra.Repository) (eventsvc.OutboxBus, error) {
// agent outbox 消费边界装配:
// 1. 单体残留在 CP1 后只消费 agent 自己的 outbox
// 2. memory.extract.requested 仍可被发布到 memory_outbox_messages但消费与 worker 已迁往 cmd/memory
// 3. kafka.enabled=false 时返回 nil业务按既有同步降级策略执行。
kafkaCfg := kafkabus.LoadConfig()
bus, err := eventsvc.NewServiceOutboxBus(outboxRepo, kafkaCfg, outboxinfra.ServiceAgent)
if err != nil {
return nil, fmt.Errorf("failed to initialize outbox event bus for service %s: %w", outboxinfra.ServiceAgent, err)
}
serviceBuses := make(map[string]eventsvc.OutboxBus, 1)
if bus != nil {
serviceBuses[outboxinfra.ServiceAgent] = bus
}
eventBus := eventsvc.NewRoutedOutboxBus(serviceBuses)
if eventBus == nil {
log.Println("Outbox event bus is disabled")
}
return eventBus, nil
}
// buildCoreOutboxPublisher 构造单体残留发布器。
//
// 职责边界:
@@ -823,7 +816,7 @@ func configureAgentService(
agentService.SetQuickTaskDeps(agentsv.NewTaskRPCQuickTaskDeps(taskClient))
// 1. agent 主链路读取记忆统一走 memory zrpc避免 CP3 后继续直连本进程 memory.Module
// 2. observer / metrics 继续复用启动期装配,保证注入侧观测在 RPC 切流后不丢;
// 3. memoryModule 仍保留在启动图中,作为迁移期依赖和后续回退面
// 3. gateway 不再组装 memory.Modulememory worker / 管理能力统一交给 cmd/memory
// 4. memory 服务暂不可用时,预取链路只记录警告并软降级,不阻断聊天主流程。
agentService.SetMemoryReader(agentsv.NewMemoryRPCReader(memoryReaderClient, memoryObserver, memoryMetrics), memoryCfg)
}
@@ -834,6 +827,7 @@ func buildAPIHandlers(
courseClient ports.CourseCommandClient,
scheduleClient ports.ScheduleCommandClient,
agentService *agentsv.AgentService,
agentRPCClient *gatewayagent.Client,
memoryClient ports.MemoryCommandClient,
activeSchedulerClient ports.ActiveSchedulerCommandClient,
notificationClient ports.NotificationCommandClient,
@@ -843,7 +837,7 @@ func buildAPIHandlers(
TaskClassHandler: api.NewTaskClassHandler(taskClassClient),
CourseHandler: api.NewCourseHandler(courseClient),
ScheduleHandler: api.NewScheduleAPI(scheduleClient),
AgentHandler: api.NewAgentHandler(agentService),
AgentHandler: api.NewAgentHandlerWithRPC(agentService, agentRPCClient),
MemoryHandler: api.NewMemoryHandler(memoryClient),
ActiveSchedule: api.NewActiveScheduleAPI(activeSchedulerClient),
Notification: api.NewNotificationAPI(notificationClient),
@@ -855,29 +849,8 @@ func (r *appRuntime) startWorkers(ctx context.Context) {
return
}
if r.eventBus != nil {
r.eventBus.Start(ctx)
log.Println("Outbox event bus started")
} else {
log.Println("Outbox event bus is disabled")
}
log.Println("Memory worker is managed by cmd/memory in phase 6 CP1")
}
func (r *appRuntime) registerEventHandlers() error {
// 调用目的:只注册仍留在单体残留域内的 outbox handleractive-scheduler / notification 已由各自独立进程管理消费边界。
if err := eventsvc.RegisterCoreOutboxHandlers(
r.eventBus,
r.outboxRepo,
r.manager,
r.agentRepo,
r.cacheRepo,
r.memoryModule,
r.userAuthClient,
); err != nil {
return err
}
return nil
log.Println("Gateway outbox worker is disabled; agent relay/consumer is managed by cmd/agent")
log.Println("Memory worker is managed by cmd/memory in phase 6")
}
func (r *appRuntime) startHTTP(ctx context.Context) {
@@ -889,7 +862,4 @@ func (r *appRuntime) close() {
if r == nil {
return
}
if r.eventBus != nil {
r.eventBus.Close()
}
}

View File

@@ -109,6 +109,15 @@ time:
# 智能体模型配置。
agent:
rpc:
listenOn: "0.0.0.0:9089"
endpoints:
- "127.0.0.1:9089"
timeout: 0s
chat:
enabled: true
api:
enabled: true
liteModel: "doubao-seed-2-0-code-preview-260215"
proModel: "doubao-seed-2-0-code-preview-260215"
maxModel: "doubao-seed-2-0-code-preview-260215"

View File

@@ -6,7 +6,7 @@ import (
"errors"
"fmt"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
"strings"
"time"

View File

@@ -8,18 +8,30 @@ import (
"net/http"
"strconv"
"strings"
"sync"
"time"
gatewayagent "github.com/LoveLosita/smartflow/backend/gateway/client/agent"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/respond"
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
agentcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/agent"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/spf13/viper"
"gorm.io/gorm"
)
const (
agentChatHeartbeatInterval = 5 * time.Second
agentRPCChatEnabledKey = "agent.rpc.chat.enabled"
agentRPCAPIEnabledKey = "agent.rpc.api.enabled"
)
type AgentHandler struct {
svc *agentsv.AgentService
rpcClient *gatewayagent.Client
rpcClientMu sync.Mutex
}
// NewAgentHandler 组装 AgentHandler。
@@ -29,6 +41,20 @@ func NewAgentHandler(svc *agentsv.AgentService) *AgentHandler {
}
}
// NewAgentHandlerWithRPC 组装带 agent RPC stream 适配能力的 AgentHandler。
//
// 职责边界:
// 1. HTTP / SSE 协议仍由 Gateway 持有;
// 2. agent RPC 作为 chat stream 与非 chat /agent/* 查询/命令的服务间通道;
// 3. svc 只用于 RPC 开关关闭时的迁移期 fallback当前默认可为 nil
// 4. rpcClient 为空时允许按配置懒加载,避免测试和旧装配必须提前构造 client。
func NewAgentHandlerWithRPC(svc *agentsv.AgentService, rpcClient *gatewayagent.Client) *AgentHandler {
return &AgentHandler{
svc: svc,
rpcClient: rpcClient,
}
}
func writeSSEData(w io.Writer, payload string) error {
_, err := io.WriteString(w, "data: "+payload+"\n\n")
return err
@@ -51,6 +77,13 @@ func mapResumeConfirmAction(action model.AgentResumeAction) string {
}
}
type agentChatStreamEvent struct {
payload string
done bool
errorJSON json.RawMessage
err error
}
func (api *AgentHandler) ChatAgent(c *gin.Context) {
// 1) 设置 SSE 响应头
c.Writer.Header().Set("Content-Type", "text/event-stream")
@@ -103,6 +136,16 @@ func (api *AgentHandler) ChatAgent(c *gin.Context) {
c.Writer.Header().Set("X-Conversation-ID", conversationID)
userID := c.GetInt("user_id")
if api.useAgentRPCChat() {
api.streamAgentChatByRPC(c, req, userID, conversationID)
return
}
if api.svc == nil {
writeAgentSSEError(c.Writer, errors.New("agent local fallback is disabled"))
flushSSEWriter(c.Writer)
return
}
outChan, errChan := api.svc.AgentChat(c.Request.Context(), req.Message, req.Thinking, req.Model, userID, conversationID, req.Extra)
// 4) 转发 SSE 流
@@ -115,22 +158,7 @@ func (api *AgentHandler) ChatAgent(c *gin.Context) {
select {
case err, ok := <-errChan:
if ok && err != nil {
// 4.1 统一 SSE 错误体:
// 4.1.1 默认按内部错误输出 message/type
// 4.1.2 若是 respond.Response含业务码额外透传 code便于前端识别 5xxxx 等自定义错误。
errorBody := map[string]any{
"message": err.Error(),
"type": "server_error",
}
var respErr respond.Response
if errors.As(err, &respErr) {
errorBody["code"] = respErr.Status
}
errPayload, _ := json.Marshal(map[string]any{
"error": errorBody,
})
_ = writeSSEData(w, string(errPayload))
_ = writeSSEData(w, "[DONE]")
writeAgentSSEError(w, err)
}
return false
case msg, ok := <-outChan:
@@ -152,6 +180,263 @@ func (api *AgentHandler) ChatAgent(c *gin.Context) {
})
}
func (api *AgentHandler) useAgentRPCChat() bool {
return api != nil && viper.GetBool(agentRPCChatEnabledKey)
}
func (api *AgentHandler) useAgentRPCAPI() bool {
return api != nil && viper.GetBool(agentRPCAPIEnabledKey)
}
// streamAgentChatByRPC 把 agent RPC server-stream 平滑转成前端既有 SSE。
//
// 职责边界:
// 1. Gateway 继续负责 SSE header、心跳和 data 帧写出;
// 2. agent RPC 只负责服务间 chunk stream不暴露 Go channel 给跨进程调用方;
// 3. RPC 建流失败或服务端 error_json 仍按现有 SSE 错误体输出,再追加 [DONE]。
func (api *AgentHandler) streamAgentChatByRPC(c *gin.Context, req model.UserSendMessageRequest, userID int, conversationID string) {
client, err := api.getAgentRPCClient()
if err != nil {
writeAgentSSEError(c.Writer, err)
flushSSEWriter(c.Writer)
return
}
extraJSON, err := json.Marshal(req.Extra)
if err != nil {
writeAgentSSEError(c.Writer, err)
flushSSEWriter(c.Writer)
return
}
stream, err := client.Chat(c.Request.Context(), agentcontracts.ChatRequest{
Message: req.Message,
Thinking: req.Thinking,
Model: req.Model,
UserID: userID,
ConversationID: conversationID,
ExtraJSON: extraJSON,
})
if err != nil {
writeAgentSSEError(c.Writer, err)
flushSSEWriter(c.Writer)
return
}
recvCh := make(chan agentChatStreamEvent, 1)
requestCtx := c.Request.Context()
go func() {
defer close(recvCh)
sendEvent := func(event agentChatStreamEvent) bool {
select {
case recvCh <- event:
return true
case <-requestCtx.Done():
return false
}
}
for {
chunk, recvErr := stream.Recv()
if recvErr != nil {
if errors.Is(recvErr, io.EOF) {
return
}
sendEvent(agentChatStreamEvent{err: recvErr})
return
}
if !sendEvent(agentChatStreamEvent{
payload: chunk.Payload,
done: chunk.Done,
errorJSON: append(json.RawMessage(nil), chunk.ErrorJSON...),
}) {
return
}
if chunk.Done || len(chunk.ErrorJSON) > 0 {
return
}
}
}()
heartbeat := time.NewTicker(agentChatHeartbeatInterval)
defer heartbeat.Stop()
c.Stream(func(w io.Writer) bool {
select {
case event, ok := <-recvCh:
if !ok {
return false
}
if event.err != nil {
writeAgentSSEError(w, event.err)
return false
}
if event.payload != "" {
if err := writeSSEData(w, event.payload); err != nil {
return false
}
}
if len(event.errorJSON) > 0 {
_ = writeSSEData(w, string(normalizeAgentRPCErrorJSON(event.errorJSON)))
_ = writeSSEData(w, "[DONE]")
return false
}
if event.done {
_ = writeSSEData(w, "[DONE]")
return false
}
return true
case <-c.Request.Context().Done():
return false
case <-heartbeat.C:
_, _ = io.WriteString(w, ": ping\n\n")
flushSSEWriter(c.Writer)
return true
}
})
}
func writeAgentSSEError(w io.Writer, err error) {
if err == nil {
return
}
_ = writeSSEData(w, string(buildAgentErrorEnvelopeJSON(errorCodeFromError(err), err.Error(), "server_error")))
_ = writeSSEData(w, "[DONE]")
}
func (api *AgentHandler) getAgentRPCClient() (*gatewayagent.Client, error) {
if api == nil {
return nil, errors.New("agent handler is not initialized")
}
api.rpcClientMu.Lock()
defer api.rpcClientMu.Unlock()
if api.rpcClient != nil {
return api.rpcClient, nil
}
client, err := gatewayagent.NewClient(gatewayagent.ClientConfig{
Endpoints: viper.GetStringSlice("agent.rpc.endpoints"),
Target: viper.GetString("agent.rpc.target"),
Timeout: viper.GetDuration("agent.rpc.timeout"),
})
if err != nil {
return nil, err
}
api.rpcClient = client
return api.rpcClient, nil
}
func normalizeAgentRPCErrorJSON(raw json.RawMessage) json.RawMessage {
trimmed := strings.TrimSpace(string(raw))
if trimmed == "" {
return buildAgentErrorEnvelopeJSON("", "agent rpc service returned empty error payload", "server_error")
}
var payload map[string]any
if err := json.Unmarshal([]byte(trimmed), &payload); err != nil {
return buildAgentErrorEnvelopeJSON("", trimmed, "server_error")
}
if nested, ok := payload["error"].(map[string]any); ok {
return buildAgentErrorEnvelopeJSON(
firstNonEmptyString(stringFromAny(nested["code"]), stringFromAny(nested["status"])),
firstNonEmptyString(stringFromAny(nested["message"]), stringFromAny(nested["info"]), "agent rpc service returned error"),
firstNonEmptyString(stringFromAny(nested["type"]), "server_error"),
)
}
return buildAgentErrorEnvelopeJSON(
firstNonEmptyString(stringFromAny(payload["code"]), stringFromAny(payload["status"])),
firstNonEmptyString(stringFromAny(payload["message"]), stringFromAny(payload["info"]), trimmed),
firstNonEmptyString(stringFromAny(payload["type"]), "server_error"),
)
}
func buildAgentErrorEnvelopeJSON(code string, message string, errorType string) json.RawMessage {
errorBody := map[string]any{
"message": strings.TrimSpace(message),
"type": strings.TrimSpace(errorType),
}
if errorBody["message"] == "" {
errorBody["message"] = "agent stream error"
}
if errorBody["type"] == "" {
errorBody["type"] = "server_error"
}
if trimmedCode := strings.TrimSpace(code); trimmedCode != "" {
errorBody["code"] = trimmedCode
}
payload, err := json.Marshal(map[string]any{"error": errorBody})
if err != nil {
return json.RawMessage(`{"error":{"message":"agent stream error","type":"server_error"}}`)
}
return payload
}
func errorCodeFromError(err error) string {
var respErr respond.Response
if errors.As(err, &respErr) {
return strings.TrimSpace(respErr.Status)
}
return ""
}
func stringFromAny(value any) string {
switch typed := value.(type) {
case string:
return strings.TrimSpace(typed)
case json.Number:
return strings.TrimSpace(typed.String())
case float64:
return strings.TrimSpace(strconv.FormatFloat(typed, 'f', -1, 64))
case float32:
return strings.TrimSpace(strconv.FormatFloat(float64(typed), 'f', -1, 32))
case int:
return strconv.Itoa(typed)
case int32:
return strconv.FormatInt(int64(typed), 10)
case int64:
return strconv.FormatInt(typed, 10)
case uint:
return strconv.FormatUint(uint64(typed), 10)
case uint32:
return strconv.FormatUint(uint64(typed), 10)
case uint64:
return strconv.FormatUint(typed, 10)
default:
return ""
}
}
func firstNonEmptyString(values ...string) string {
for _, value := range values {
if trimmed := strings.TrimSpace(value); trimmed != "" {
return trimmed
}
}
return ""
}
func flushSSEWriter(w io.Writer) {
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
}
func writeAgentHTTPError(c *gin.Context, err error) {
if err == nil {
return
}
var respErr respond.Response
if errors.As(err, &respErr) && respErr.Status == respond.ConversationNotFound.Status {
c.JSON(http.StatusNotFound, respErr)
return
}
respond.DealWithError(c, err)
}
// GetConversationMeta 返回单个会话的元信息(标题、消息数、最近消息时间等)。
// 设计说明:
// 1) 该接口用于配合 SSE 聊天链路:标题异步生成后,前端可通过 conversation_id 拉取;
@@ -172,8 +457,31 @@ func (api *AgentHandler) GetConversationMeta(c *gin.Context) {
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
defer cancel()
if api.useAgentRPCAPI() {
client, err := api.getAgentRPCClient()
if err != nil {
writeAgentHTTPError(c, err)
return
}
meta, err := client.GetConversationMeta(ctx, agentcontracts.ConversationQueryRequest{
UserID: userID,
ConversationID: conversationID,
})
if err != nil {
writeAgentHTTPError(c, err)
return
}
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, meta))
return
}
localSvc, ok := api.localAgentService(c)
if !ok {
return
}
// 4. 调 service 查询会话元信息。
meta, err := api.svc.GetConversationMeta(ctx, userID, conversationID)
meta, err := localSvc.GetConversationMeta(ctx, userID, conversationID)
if err != nil {
// 会话不存在或越权访问时返回 404让前端能和“参数格式错误”区分开。
if errors.Is(err, gorm.ErrRecordNotFound) {
@@ -241,8 +549,33 @@ func (api *AgentHandler) GetConversationList(c *gin.Context) {
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
defer cancel()
if api.useAgentRPCAPI() {
client, err := api.getAgentRPCClient()
if err != nil {
writeAgentHTTPError(c, err)
return
}
resp, err := client.GetConversationList(ctx, agentcontracts.ConversationListRequest{
UserID: userID,
Page: page,
PageSize: pageSize,
Status: status,
})
if err != nil {
writeAgentHTTPError(c, err)
return
}
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, resp))
return
}
localSvc, ok := api.localAgentService(c)
if !ok {
return
}
// 5. 调 service 查询并返回统一响应结构。
resp, err := api.svc.GetConversationList(ctx, userID, page, pageSize, status)
resp, err := localSvc.GetConversationList(ctx, userID, page, pageSize, status)
if err != nil {
respond.DealWithError(c, err)
return
@@ -268,7 +601,30 @@ func (api *AgentHandler) GetConversationTimeline(c *gin.Context) {
ctx, cancel := context.WithTimeout(c.Request.Context(), 2*time.Second)
defer cancel()
timeline, err := api.svc.GetConversationTimeline(ctx, userID, conversationID)
if api.useAgentRPCAPI() {
client, err := api.getAgentRPCClient()
if err != nil {
writeAgentHTTPError(c, err)
return
}
timeline, err := client.GetConversationTimeline(ctx, agentcontracts.ConversationQueryRequest{
UserID: userID,
ConversationID: conversationID,
})
if err != nil {
writeAgentHTTPError(c, err)
return
}
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, timeline))
return
}
localSvc, ok := api.localAgentService(c)
if !ok {
return
}
timeline, err := localSvc.GetConversationTimeline(ctx, userID, conversationID)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
c.JSON(http.StatusNotFound, respond.ConversationNotFound)
@@ -302,8 +658,31 @@ func (api *AgentHandler) GetSchedulePlanPreview(c *gin.Context) {
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
defer cancel()
if api.useAgentRPCAPI() {
client, err := api.getAgentRPCClient()
if err != nil {
writeAgentHTTPError(c, err)
return
}
preview, err := client.GetSchedulePlanPreview(ctx, agentcontracts.ConversationQueryRequest{
UserID: userID,
ConversationID: conversationID,
})
if err != nil {
writeAgentHTTPError(c, err)
return
}
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, preview))
return
}
localSvc, ok := api.localAgentService(c)
if !ok {
return
}
// 4. 调 service 查询并返回统一响应结构。
preview, err := api.svc.GetSchedulePlanPreview(ctx, userID, conversationID)
preview, err := localSvc.GetSchedulePlanPreview(ctx, userID, conversationID)
if err != nil {
respond.DealWithError(c, err)
return
@@ -324,7 +703,34 @@ func (api *AgentHandler) GetContextStats(c *gin.Context) {
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
defer cancel()
statsJSON, err := api.svc.GetContextStats(ctx, userID, conversationID)
if api.useAgentRPCAPI() {
client, err := api.getAgentRPCClient()
if err != nil {
writeAgentHTTPError(c, err)
return
}
statsJSON, err := client.GetContextStats(ctx, agentcontracts.ConversationQueryRequest{
UserID: userID,
ConversationID: conversationID,
})
if err != nil {
writeAgentHTTPError(c, err)
return
}
if strings.TrimSpace(statsJSON) == "" {
statsJSON = "null"
}
var raw json.RawMessage = json.RawMessage(statsJSON)
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, raw))
return
}
localSvc, ok := api.localAgentService(c)
if !ok {
return
}
statsJSON, err := localSvc.GetContextStats(ctx, userID, conversationID)
if err != nil {
respond.DealWithError(c, err)
return
@@ -373,10 +779,65 @@ func (api *AgentHandler) SaveScheduleState(c *gin.Context) {
ctx, cancel := context.WithTimeout(c.Request.Context(), 3*time.Second)
defer cancel()
if api.useAgentRPCAPI() {
client, err := api.getAgentRPCClient()
if err != nil {
writeAgentHTTPError(c, err)
return
}
if err := client.SaveScheduleState(ctx, agentcontracts.SaveScheduleStateRequest{
UserID: userID,
ConversationID: conversationID,
Items: toAgentContractScheduleStateItems(req.Items),
}); err != nil {
writeAgentHTTPError(c, err)
return
}
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, nil))
return
}
localSvc, ok := api.localAgentService(c)
if !ok {
return
}
// 5. 调用 service 层执行 Load → 应用放置项 → Save。
if err := api.svc.SaveScheduleState(ctx, userID, conversationID, req.Items); err != nil {
if err := localSvc.SaveScheduleState(ctx, userID, conversationID, req.Items); err != nil {
respond.DealWithError(c, err)
return
}
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, nil))
}
// localAgentService 返回迁移期本地 fallback 服务。
//
// 职责边界:
// 1. 只服务于 RPC 开关关闭时的回退路径;
// 2. 默认 RPC 切流态允许 svc 为 nil因此所有本地调用前必须经过此处
// 3. 缺失时返回 500提示启动配置和运行时装配不一致而不是让 handler panic。
func (api *AgentHandler) localAgentService(c *gin.Context) (*agentsv.AgentService, bool) {
if api != nil && api.svc != nil {
return api.svc, true
}
respond.DealWithError(c, errors.New("agent local fallback is disabled"))
return nil, false
}
func toAgentContractScheduleStateItems(items []model.SaveScheduleStatePlacedItem) []agentcontracts.SaveScheduleStatePlacedItem {
if len(items) == 0 {
return nil
}
result := make([]agentcontracts.SaveScheduleStatePlacedItem, 0, len(items))
for _, item := range items {
result = append(result, agentcontracts.SaveScheduleStatePlacedItem{
TaskItemID: item.TaskItemID,
Week: item.Week,
DayOfWeek: item.DayOfWeek,
StartSection: item.StartSection,
EndSection: item.EndSection,
EmbedCourseEventID: item.EmbedCourseEventID,
})
}
return result
}

View File

@@ -0,0 +1,192 @@
package agent
import (
"context"
"encoding/json"
"errors"
"io"
"strings"
"time"
agentpb "github.com/LoveLosita/smartflow/backend/services/agent/rpc/pb"
agentcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/agent"
"github.com/zeromicro/go-zero/zrpc"
"google.golang.org/grpc"
)
const (
defaultEndpoint = "127.0.0.1:9089"
defaultTimeout = 0
)
type ClientConfig struct {
Endpoints []string
Target string
Timeout time.Duration
}
// Client 是 gateway 访问 agent zrpc 的流式适配层。
//
// 职责边界:
// 1. 只负责跨进程 gRPC stream 调用,不感知 Gin / SSE
// 2. ChatChunk 的 payload 保持 agent 服务原样输出Gateway API 再转成 SSE data
// 3. agent.rpc.chat.enabled 关闭时,调用方仍可走本地 AgentService 回退链路。
type Client struct {
rpc agentpb.AgentClient
}
func NewClient(cfg ClientConfig) (*Client, error) {
timeout := cfg.Timeout
if timeout < 0 {
timeout = defaultTimeout
}
endpoints := normalizeEndpoints(cfg.Endpoints)
target := strings.TrimSpace(cfg.Target)
if len(endpoints) == 0 && target == "" {
endpoints = []string{defaultEndpoint}
}
zclient, err := zrpc.NewClient(zrpc.RpcClientConf{
Endpoints: endpoints,
Target: target,
NonBlock: true,
Timeout: int64(timeout / time.Millisecond),
})
if err != nil {
return nil, err
}
return &Client{rpc: agentpb.NewAgentClient(zclient.Conn())}, nil
}
func (c *Client) Ping(ctx context.Context) error {
if err := c.ensureReady(); err != nil {
return err
}
_, err := c.rpc.Ping(ctx, &agentpb.StatusResponse{})
return responseFromRPCError(err)
}
func (c *Client) Chat(ctx context.Context, req agentcontracts.ChatRequest) (*ChatStream, error) {
if err := c.ensureReady(); err != nil {
return nil, err
}
stream, err := c.rpc.Chat(ctx, &agentpb.ChatRequest{
Message: req.Message,
Thinking: req.Thinking,
Model: req.Model,
UserId: int32(req.UserID),
ConversationId: req.ConversationID,
ExtraJson: append([]byte(nil), req.ExtraJSON...),
})
if err != nil {
return nil, responseFromRPCError(err)
}
return &ChatStream{stream: stream}, nil
}
func (c *Client) GetConversationMeta(ctx context.Context, req agentcontracts.ConversationQueryRequest) (json.RawMessage, error) {
resp, err := c.callJSON(ctx, c.rpc.GetConversationMeta, req)
return jsonFromResponse(resp, err)
}
func (c *Client) GetConversationList(ctx context.Context, req agentcontracts.ConversationListRequest) (json.RawMessage, error) {
resp, err := c.callJSON(ctx, c.rpc.GetConversationList, req)
return jsonFromResponse(resp, err)
}
func (c *Client) GetConversationTimeline(ctx context.Context, req agentcontracts.ConversationQueryRequest) (json.RawMessage, error) {
resp, err := c.callJSON(ctx, c.rpc.GetConversationTimeline, req)
return jsonFromResponse(resp, err)
}
func (c *Client) GetSchedulePlanPreview(ctx context.Context, req agentcontracts.ConversationQueryRequest) (json.RawMessage, error) {
resp, err := c.callJSON(ctx, c.rpc.GetSchedulePlanPreview, req)
return jsonFromResponse(resp, err)
}
func (c *Client) GetContextStats(ctx context.Context, req agentcontracts.ConversationQueryRequest) (string, error) {
resp, err := c.callJSON(ctx, c.rpc.GetContextStats, req)
raw, err := jsonFromResponse(resp, err)
if err != nil {
return "", err
}
return string(raw), nil
}
func (c *Client) SaveScheduleState(ctx context.Context, req agentcontracts.SaveScheduleStateRequest) error {
_, err := c.callJSON(ctx, c.rpc.SaveScheduleState, req)
return responseFromRPCError(err)
}
type ChatStream struct {
stream agentpb.Agent_ChatClient
}
// Recv 读取 agent RPC 的下一段输出。
//
// 返回语义:
// 1. io.EOF 表示服务端正常关闭 stream
// 2. 其它 error 已尽量反解为项目内错误;
// 3. chunk.Done 由上层决定是否写出 [DONE]。
func (s *ChatStream) Recv() (agentcontracts.ChatChunk, error) {
if s == nil || s.stream == nil {
return agentcontracts.ChatChunk{}, errors.New("agent zrpc stream is not initialized")
}
chunk, err := s.stream.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
return agentcontracts.ChatChunk{}, io.EOF
}
return agentcontracts.ChatChunk{}, responseFromRPCError(err)
}
if chunk == nil {
return agentcontracts.ChatChunk{}, errors.New("agent zrpc service returned empty chunk")
}
return agentcontracts.ChatChunk{
Payload: chunk.Payload,
Done: chunk.Done,
ErrorJSON: append([]byte(nil), chunk.ErrorJson...),
}, nil
}
func (c *Client) ensureReady() error {
if c == nil || c.rpc == nil {
return errors.New("agent zrpc client is not initialized")
}
return nil
}
func (c *Client) callJSON(ctx context.Context, fn func(context.Context, *agentpb.JSONRequest, ...grpc.CallOption) (*agentpb.JSONResponse, error), payload any) (*agentpb.JSONResponse, error) {
if err := c.ensureReady(); err != nil {
return nil, err
}
raw, err := json.Marshal(payload)
if err != nil {
return nil, err
}
return fn(ctx, &agentpb.JSONRequest{PayloadJson: raw})
}
func jsonFromResponse(resp *agentpb.JSONResponse, rpcErr error) (json.RawMessage, error) {
if rpcErr != nil {
return nil, responseFromRPCError(rpcErr)
}
if resp == nil {
return nil, errors.New("agent zrpc service returned empty JSON response")
}
if len(resp.DataJson) == 0 {
return json.RawMessage("null"), nil
}
return json.RawMessage(resp.DataJson), nil
}
func normalizeEndpoints(values []string) []string {
endpoints := make([]string, 0, len(values))
for _, value := range values {
trimmed := strings.TrimSpace(value)
if trimmed != "" {
endpoints = append(endpoints, trimmed)
}
}
return endpoints
}

View File

@@ -0,0 +1,94 @@
package agent
import (
"errors"
"fmt"
"strings"
"github.com/LoveLosita/smartflow/backend/respond"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// responseFromRPCError 负责把 agent 的 gRPC 错误反解回项目内错误。
//
// 职责边界:
// 1. 只在 gateway 边缘层使用;
// 2. 业务错误尽量恢复成 respond.Response便于 API 层继续复用 DealWithError
// 3. 服务不可用或未知内部错误包装成普通 error避免误报成用户可修正参数问题。
func responseFromRPCError(err error) error {
if err == nil {
return nil
}
st, ok := status.FromError(err)
if !ok {
return wrapRPCError(err)
}
if resp, ok := responseFromStatus(st); ok {
return resp
}
switch st.Code() {
case codes.Internal, codes.Unknown, codes.Unavailable, codes.DeadlineExceeded, codes.DataLoss, codes.Unimplemented:
msg := strings.TrimSpace(st.Message())
if msg == "" {
msg = "agent zrpc service internal error"
}
return wrapRPCError(errors.New(msg))
}
msg := strings.TrimSpace(st.Message())
if msg == "" {
msg = "agent zrpc service rejected request"
}
return respond.Response{Status: grpcCodeToRespondStatus(st.Code()), Info: msg}
}
func responseFromStatus(st *status.Status) (respond.Response, bool) {
if st == nil {
return respond.Response{}, false
}
for _, detail := range st.Details() {
info, ok := detail.(*errdetails.ErrorInfo)
if !ok {
continue
}
statusValue := strings.TrimSpace(info.Reason)
if statusValue == "" {
statusValue = grpcCodeToRespondStatus(st.Code())
}
message := strings.TrimSpace(st.Message())
if message == "" && info.Metadata != nil {
message = strings.TrimSpace(info.Metadata["info"])
}
if message == "" {
message = statusValue
}
return respond.Response{Status: statusValue, Info: message}, true
}
return respond.Response{}, false
}
func grpcCodeToRespondStatus(code codes.Code) string {
switch code {
case codes.Unauthenticated:
return respond.ErrUnauthorized.Status
case codes.InvalidArgument:
return respond.MissingParam.Status
case codes.NotFound:
return respond.ConversationNotFound.Status
case codes.Internal, codes.Unknown, codes.DataLoss:
return "500"
default:
return "400"
}
}
func wrapRPCError(err error) error {
if err == nil {
return nil
}
return fmt.Errorf("调用 agent zrpc 服务失败: %w", err)
}

View File

@@ -5,7 +5,7 @@ import (
"github.com/LoveLosita/smartflow/backend/dao"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
"github.com/LoveLosita/smartflow/backend/memory"
"github.com/LoveLosita/smartflow/backend/services/memory"
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
"github.com/LoveLosita/smartflow/backend/shared/ports"
)

View File

@@ -13,9 +13,9 @@ import (
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
"github.com/LoveLosita/smartflow/backend/memory"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/services/memory"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
"github.com/spf13/viper"
"gorm.io/gorm"
)

View File

@@ -0,0 +1,42 @@
syntax = "proto3";
package smartflow.agent;
option go_package = "github.com/LoveLosita/smartflow/backend/services/agent/rpc/pb";
service Agent {
rpc Ping(StatusResponse) returns (StatusResponse);
rpc Chat(ChatRequest) returns (stream ChatChunk);
rpc GetConversationMeta(JSONRequest) returns (JSONResponse);
rpc GetConversationList(JSONRequest) returns (JSONResponse);
rpc GetConversationTimeline(JSONRequest) returns (JSONResponse);
rpc GetSchedulePlanPreview(JSONRequest) returns (JSONResponse);
rpc GetContextStats(JSONRequest) returns (JSONResponse);
rpc SaveScheduleState(JSONRequest) returns (JSONResponse);
}
message ChatRequest {
string message = 1;
string thinking = 2;
string model = 3;
int32 user_id = 4;
string conversation_id = 5;
bytes extra_json = 6;
}
message ChatChunk {
string payload = 1;
bool done = 2;
bytes error_json = 3;
}
message StatusResponse {
}
message JSONRequest {
bytes payload_json = 1;
}
message JSONResponse {
bytes data_json = 1;
}

View File

@@ -0,0 +1,76 @@
package rpc
import (
"errors"
"log"
"strings"
"github.com/LoveLosita/smartflow/backend/respond"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"gorm.io/gorm"
)
var errAgentServiceNotReady = errors.New("agent service dependency not initialized")
const agentErrorDomain = "smartflow.agent"
// grpcErrorFromServiceError 负责把 agent 内部错误转换为 gRPC status。
//
// 职责边界:
// 1. respond.Response 保留项目内部 status/info供 gateway 反解;
// 2. 未分类错误只暴露通用内部错误,详细信息留在服务日志;
// 3. 不在 RPC 层重判业务规则,业务语义仍由 agent/sv 决定。
func grpcErrorFromServiceError(err error) error {
if err == nil {
return nil
}
if errors.Is(err, gorm.ErrRecordNotFound) {
return grpcErrorFromResponse(respond.ConversationNotFound)
}
var resp respond.Response
if errors.As(err, &resp) {
return grpcErrorFromResponse(resp)
}
log.Printf("agent rpc internal error: %v", err)
return status.Error(codes.Internal, "agent service internal error")
}
func grpcErrorFromResponse(resp respond.Response) error {
code := grpcCodeFromRespondStatus(resp.Status)
message := strings.TrimSpace(resp.Info)
if message == "" {
message = strings.TrimSpace(resp.Status)
}
st := status.New(code, message)
detail := &errdetails.ErrorInfo{
Domain: agentErrorDomain,
Reason: resp.Status,
Metadata: map[string]string{
"info": resp.Info,
},
}
withDetails, err := st.WithDetails(detail)
if err != nil {
return st.Err()
}
return withDetails.Err()
}
func grpcCodeFromRespondStatus(statusValue string) codes.Code {
switch strings.TrimSpace(statusValue) {
case respond.MissingToken.Status, respond.InvalidToken.Status, respond.InvalidClaims.Status,
respond.ErrUnauthorized.Status, respond.WrongTokenType.Status, respond.UserLoggedOut.Status:
return codes.Unauthenticated
case respond.ConversationNotFound.Status:
return codes.NotFound
case respond.MissingParam.Status, respond.WrongParamType.Status, respond.ParamTooLong.Status,
respond.WrongUserID.Status, respond.MissingConversationID.Status:
return codes.InvalidArgument
}
if strings.HasPrefix(strings.TrimSpace(statusValue), "5") {
return codes.Internal
}
return codes.InvalidArgument
}

View File

@@ -0,0 +1,256 @@
package rpc
import (
"context"
"encoding/json"
"errors"
"strings"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/respond"
"github.com/LoveLosita/smartflow/backend/services/agent/rpc/pb"
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
agentcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/agent"
)
type Handler struct {
pb.UnimplementedAgentServer
svc *agentsv.AgentService
}
func NewHandler(svc *agentsv.AgentService) *Handler {
return &Handler{svc: svc}
}
// Ping 供调用方在启动期确认 agent zrpc 已可用。
func (h *Handler) Ping(ctx context.Context, req *pb.StatusResponse) (*pb.StatusResponse, error) {
if err := h.ensureReady(req); err != nil {
return nil, err
}
return &pb.StatusResponse{}, nil
}
// Chat 把 agent 内部 channel 输出适配为 gRPC server-stream。
//
// 职责边界:
// 1. RPC 层只负责协议转换,不改写 agent/sv 的图编排、工具调用和持久化语义;
// 2. AgentService 内部仍使用 channel 解耦节点输出,跨进程边界统一转换为 stream.Send
// 3. 业务错误通过 error_json chunk 传给 Gateway由 Gateway 保持原 SSE 错误体输出。
func (h *Handler) Chat(req *pb.ChatRequest, stream pb.Agent_ChatServer) error {
if err := h.ensureReady(req); err != nil {
return err
}
extra, err := decodeExtra(req.ExtraJson)
if err != nil {
return grpcErrorFromServiceError(respond.WrongParamType)
}
outChan, errChan := h.svc.AgentChat(
stream.Context(),
req.Message,
req.Thinking,
req.Model,
int(req.UserId),
req.ConversationId,
extra,
)
for outChan != nil || errChan != nil {
select {
case err, ok := <-errChan:
if !ok {
// 1. errChan 关闭表示当前没有更多异步错误;置 nil 后让 select 不再命中该分支。
// 2. 若继续读取已关闭 channel会形成忙等并拖慢长连接 stream。
errChan = nil
continue
}
if err == nil {
continue
}
errorJSON := buildStreamErrorJSON(err)
return stream.Send(&pb.ChatChunk{Done: true, ErrorJson: errorJSON})
case payload, ok := <-outChan:
if !ok {
outChan = nil
return stream.Send(&pb.ChatChunk{Done: true})
}
if err := stream.Send(&pb.ChatChunk{Payload: payload}); err != nil {
return err
}
if strings.TrimSpace(payload) == "[DONE]" {
// 1. AgentService 旧链路已经把 OpenAI 兼容的 [DONE] 当作普通 payload 推给前端。
// 2. RPC 层只负责跨进程透传;这里直接结束 stream避免 Gateway 再补一帧重复 [DONE]。
return nil
}
case <-stream.Context().Done():
return stream.Context().Err()
}
}
return stream.Send(&pb.ChatChunk{Done: true})
}
// GetConversationMeta 透传查询单个会话元信息。
//
// 职责边界:
// 1. RPC 层只负责 JSON 契约反序列化和响应序列化;
// 2. 会话归属、404 语义和 DTO 组装继续由 AgentService 决定;
// 3. Gateway 仍负责 HTTP query 绑定和最终响应包装。
func (h *Handler) GetConversationMeta(ctx context.Context, req *pb.JSONRequest) (*pb.JSONResponse, error) {
var payload agentcontracts.ConversationQueryRequest
if err := h.decodeJSONRequest(req, &payload); err != nil {
return nil, err
}
resp, err := h.svc.GetConversationMeta(ctx, payload.UserID, payload.ConversationID)
if err != nil {
return nil, grpcErrorFromServiceError(err)
}
return jsonResponseFromPayload(resp)
}
// GetConversationList 透传查询当前用户会话列表。
func (h *Handler) GetConversationList(ctx context.Context, req *pb.JSONRequest) (*pb.JSONResponse, error) {
var payload agentcontracts.ConversationListRequest
if err := h.decodeJSONRequest(req, &payload); err != nil {
return nil, err
}
resp, err := h.svc.GetConversationList(ctx, payload.UserID, payload.Page, payload.PageSize, payload.Status)
if err != nil {
return nil, grpcErrorFromServiceError(err)
}
return jsonResponseFromPayload(resp)
}
// GetConversationTimeline 透传查询会话时间线。
func (h *Handler) GetConversationTimeline(ctx context.Context, req *pb.JSONRequest) (*pb.JSONResponse, error) {
var payload agentcontracts.ConversationQueryRequest
if err := h.decodeJSONRequest(req, &payload); err != nil {
return nil, err
}
resp, err := h.svc.GetConversationTimeline(ctx, payload.UserID, payload.ConversationID)
if err != nil {
return nil, grpcErrorFromServiceError(err)
}
return jsonResponseFromPayload(resp)
}
// GetSchedulePlanPreview 透传查询会话内排程预览。
func (h *Handler) GetSchedulePlanPreview(ctx context.Context, req *pb.JSONRequest) (*pb.JSONResponse, error) {
var payload agentcontracts.ConversationQueryRequest
if err := h.decodeJSONRequest(req, &payload); err != nil {
return nil, err
}
resp, err := h.svc.GetSchedulePlanPreview(ctx, payload.UserID, payload.ConversationID)
if err != nil {
return nil, grpcErrorFromServiceError(err)
}
return jsonResponseFromPayload(resp)
}
// GetContextStats 透传查询会话上下文 token 统计 JSON。
func (h *Handler) GetContextStats(ctx context.Context, req *pb.JSONRequest) (*pb.JSONResponse, error) {
var payload agentcontracts.ConversationQueryRequest
if err := h.decodeJSONRequest(req, &payload); err != nil {
return nil, err
}
statsJSON, err := h.svc.GetContextStats(ctx, payload.UserID, payload.ConversationID)
if err != nil {
return nil, grpcErrorFromServiceError(err)
}
return &pb.JSONResponse{DataJson: []byte(strings.TrimSpace(statsJSON))}, nil
}
// SaveScheduleState 透传保存会话内排程拖拽状态。
//
// 职责边界:
// 1. RPC 层只把跨进程契约转换为 AgentService 既有模型;
// 2. 快照读取、归属校验、坐标转换和 Redis 回写仍由 AgentService 完成;
// 3. 成功时返回空 JSON 响应Gateway 继续保持 data=null 的 HTTP 语义。
func (h *Handler) SaveScheduleState(ctx context.Context, req *pb.JSONRequest) (*pb.JSONResponse, error) {
var payload agentcontracts.SaveScheduleStateRequest
if err := h.decodeJSONRequest(req, &payload); err != nil {
return nil, err
}
if err := h.svc.SaveScheduleState(ctx, payload.UserID, payload.ConversationID, toModelScheduleStateItems(payload.Items)); err != nil {
return nil, grpcErrorFromServiceError(err)
}
return &pb.JSONResponse{}, nil
}
func (h *Handler) ensureReady(req any) error {
if h == nil || h.svc == nil {
return grpcErrorFromServiceError(errAgentServiceNotReady)
}
if req == nil {
return grpcErrorFromServiceError(respond.MissingParam)
}
return nil
}
func (h *Handler) decodeJSONRequest(req *pb.JSONRequest, out any) error {
if err := h.ensureReady(req); err != nil {
return err
}
if len(req.PayloadJson) == 0 {
return grpcErrorFromServiceError(respond.MissingParam)
}
if err := json.Unmarshal(req.PayloadJson, out); err != nil {
return grpcErrorFromServiceError(respond.WrongParamType)
}
return nil
}
func jsonResponseFromPayload(payload any) (*pb.JSONResponse, error) {
raw, err := json.Marshal(payload)
if err != nil {
return nil, grpcErrorFromServiceError(err)
}
return &pb.JSONResponse{DataJson: raw}, nil
}
func toModelScheduleStateItems(items []agentcontracts.SaveScheduleStatePlacedItem) []model.SaveScheduleStatePlacedItem {
if len(items) == 0 {
return nil
}
result := make([]model.SaveScheduleStatePlacedItem, 0, len(items))
for _, item := range items {
result = append(result, model.SaveScheduleStatePlacedItem{
TaskItemID: item.TaskItemID,
Week: item.Week,
DayOfWeek: item.DayOfWeek,
StartSection: item.StartSection,
EndSection: item.EndSection,
EmbedCourseEventID: item.EmbedCourseEventID,
})
}
return result
}
func decodeExtra(raw []byte) (map[string]any, error) {
if len(raw) == 0 || string(raw) == "null" {
return nil, nil
}
var extra map[string]any
if err := json.Unmarshal(raw, &extra); err != nil {
return nil, err
}
return extra, nil
}
func buildStreamErrorJSON(err error) []byte {
errorBody := map[string]any{
"message": err.Error(),
"type": "server_error",
}
var respErr respond.Response
if errors.As(err, &respErr) {
errorBody["code"] = respErr.Status
if respErr.Info != "" {
errorBody["message"] = respErr.Info
}
}
raw, marshalErr := json.Marshal(map[string]any{"error": errorBody})
if marshalErr != nil {
return []byte(`{"error":{"message":"agent stream error","type":"server_error"}}`)
}
return raw
}

View File

@@ -0,0 +1,68 @@
package pb
import proto "github.com/golang/protobuf/proto"
var _ = proto.Marshal
const _ = proto.ProtoPackageIsVersion3
type ChatRequest struct {
Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
Thinking string `protobuf:"bytes,2,opt,name=thinking,proto3" json:"thinking,omitempty"`
Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
UserId int32 `protobuf:"varint,4,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
ConversationId string `protobuf:"bytes,5,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"`
ExtraJson []byte `protobuf:"bytes,6,opt,name=extra_json,json=extraJson,proto3" json:"extra_json,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChatRequest) Reset() { *m = ChatRequest{} }
func (m *ChatRequest) String() string { return proto.CompactTextString(m) }
func (*ChatRequest) ProtoMessage() {}
type ChatChunk struct {
Payload string `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
Done bool `protobuf:"varint,2,opt,name=done,proto3" json:"done,omitempty"`
ErrorJson []byte `protobuf:"bytes,3,opt,name=error_json,json=errorJson,proto3" json:"error_json,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ChatChunk) Reset() { *m = ChatChunk{} }
func (m *ChatChunk) String() string { return proto.CompactTextString(m) }
func (*ChatChunk) ProtoMessage() {}
type StatusResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StatusResponse) Reset() { *m = StatusResponse{} }
func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
func (*StatusResponse) ProtoMessage() {}
type JSONRequest struct {
PayloadJson []byte `protobuf:"bytes,1,opt,name=payload_json,json=payloadJson,proto3" json:"payload_json,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *JSONRequest) Reset() { *m = JSONRequest{} }
func (m *JSONRequest) String() string { return proto.CompactTextString(m) }
func (*JSONRequest) ProtoMessage() {}
type JSONResponse struct {
DataJson []byte `protobuf:"bytes,1,opt,name=data_json,json=dataJson,proto3" json:"data_json,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *JSONResponse) Reset() { *m = JSONResponse{} }
func (m *JSONResponse) String() string { return proto.CompactTextString(m) }
func (*JSONResponse) ProtoMessage() {}

View File

@@ -0,0 +1,313 @@
package pb
import (
context "context"
io "io"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
const (
Agent_Ping_FullMethodName = "/smartflow.agent.Agent/Ping"
Agent_Chat_FullMethodName = "/smartflow.agent.Agent/Chat"
Agent_GetConversationMeta_FullMethodName = "/smartflow.agent.Agent/GetConversationMeta"
Agent_GetConversationList_FullMethodName = "/smartflow.agent.Agent/GetConversationList"
Agent_GetConversationTimeline_FullMethodName = "/smartflow.agent.Agent/GetConversationTimeline"
Agent_GetSchedulePlanPreview_FullMethodName = "/smartflow.agent.Agent/GetSchedulePlanPreview"
Agent_GetContextStats_FullMethodName = "/smartflow.agent.Agent/GetContextStats"
Agent_SaveScheduleState_FullMethodName = "/smartflow.agent.Agent/SaveScheduleState"
)
type AgentClient interface {
Ping(ctx context.Context, in *StatusResponse, opts ...grpc.CallOption) (*StatusResponse, error)
Chat(ctx context.Context, in *ChatRequest, opts ...grpc.CallOption) (Agent_ChatClient, error)
GetConversationMeta(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error)
GetConversationList(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error)
GetConversationTimeline(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error)
GetSchedulePlanPreview(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error)
GetContextStats(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error)
SaveScheduleState(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error)
}
type agentClient struct {
cc grpc.ClientConnInterface
}
func NewAgentClient(cc grpc.ClientConnInterface) AgentClient {
return &agentClient{cc}
}
func (c *agentClient) Ping(ctx context.Context, in *StatusResponse, opts ...grpc.CallOption) (*StatusResponse, error) {
out := new(StatusResponse)
err := c.cc.Invoke(ctx, Agent_Ping_FullMethodName, in, out, opts...)
return out, err
}
func (c *agentClient) Chat(ctx context.Context, in *ChatRequest, opts ...grpc.CallOption) (Agent_ChatClient, error) {
stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[0], Agent_Chat_FullMethodName, opts...)
if err != nil {
return nil, err
}
client := &agentChatClient{stream}
if err := client.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := client.ClientStream.CloseSend(); err != nil {
return nil, err
}
return client, nil
}
func (c *agentClient) GetConversationMeta(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
out := new(JSONResponse)
err := c.cc.Invoke(ctx, Agent_GetConversationMeta_FullMethodName, in, out, opts...)
return out, err
}
func (c *agentClient) GetConversationList(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
out := new(JSONResponse)
err := c.cc.Invoke(ctx, Agent_GetConversationList_FullMethodName, in, out, opts...)
return out, err
}
func (c *agentClient) GetConversationTimeline(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
out := new(JSONResponse)
err := c.cc.Invoke(ctx, Agent_GetConversationTimeline_FullMethodName, in, out, opts...)
return out, err
}
func (c *agentClient) GetSchedulePlanPreview(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
out := new(JSONResponse)
err := c.cc.Invoke(ctx, Agent_GetSchedulePlanPreview_FullMethodName, in, out, opts...)
return out, err
}
func (c *agentClient) GetContextStats(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
out := new(JSONResponse)
err := c.cc.Invoke(ctx, Agent_GetContextStats_FullMethodName, in, out, opts...)
return out, err
}
func (c *agentClient) SaveScheduleState(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
out := new(JSONResponse)
err := c.cc.Invoke(ctx, Agent_SaveScheduleState_FullMethodName, in, out, opts...)
return out, err
}
type Agent_ChatClient interface {
Recv() (*ChatChunk, error)
grpc.ClientStream
}
type agentChatClient struct {
grpc.ClientStream
}
func (x *agentChatClient) Recv() (*ChatChunk, error) {
m := new(ChatChunk)
if err := x.ClientStream.RecvMsg(m); err != nil {
if err == io.EOF {
return nil, err
}
return nil, err
}
return m, nil
}
type AgentServer interface {
Ping(context.Context, *StatusResponse) (*StatusResponse, error)
Chat(*ChatRequest, Agent_ChatServer) error
GetConversationMeta(context.Context, *JSONRequest) (*JSONResponse, error)
GetConversationList(context.Context, *JSONRequest) (*JSONResponse, error)
GetConversationTimeline(context.Context, *JSONRequest) (*JSONResponse, error)
GetSchedulePlanPreview(context.Context, *JSONRequest) (*JSONResponse, error)
GetContextStats(context.Context, *JSONRequest) (*JSONResponse, error)
SaveScheduleState(context.Context, *JSONRequest) (*JSONResponse, error)
}
type UnimplementedAgentServer struct{}
func (UnimplementedAgentServer) Ping(context.Context, *StatusResponse) (*StatusResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented")
}
func (UnimplementedAgentServer) Chat(*ChatRequest, Agent_ChatServer) error {
return status.Errorf(codes.Unimplemented, "method Chat not implemented")
}
func (UnimplementedAgentServer) GetConversationMeta(context.Context, *JSONRequest) (*JSONResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetConversationMeta not implemented")
}
func (UnimplementedAgentServer) GetConversationList(context.Context, *JSONRequest) (*JSONResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetConversationList not implemented")
}
func (UnimplementedAgentServer) GetConversationTimeline(context.Context, *JSONRequest) (*JSONResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetConversationTimeline not implemented")
}
func (UnimplementedAgentServer) GetSchedulePlanPreview(context.Context, *JSONRequest) (*JSONResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSchedulePlanPreview not implemented")
}
func (UnimplementedAgentServer) GetContextStats(context.Context, *JSONRequest) (*JSONResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetContextStats not implemented")
}
func (UnimplementedAgentServer) SaveScheduleState(context.Context, *JSONRequest) (*JSONResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SaveScheduleState not implemented")
}
func RegisterAgentServer(s grpc.ServiceRegistrar, srv AgentServer) {
s.RegisterService(&Agent_ServiceDesc, srv)
}
type Agent_ChatServer interface {
Send(*ChatChunk) error
grpc.ServerStream
}
type agentChatServer struct {
grpc.ServerStream
}
func (x *agentChatServer) Send(m *ChatChunk) error {
return x.ServerStream.SendMsg(m)
}
func _Agent_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatusResponse)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).Ping(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Agent_Ping_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).Ping(ctx, req.(*StatusResponse))
}
return interceptor(ctx, in, info, handler)
}
func _Agent_GetConversationMeta_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(JSONRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).GetConversationMeta(ctx, in)
}
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Agent_GetConversationMeta_FullMethodName}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).GetConversationMeta(ctx, req.(*JSONRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Agent_GetConversationList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(JSONRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).GetConversationList(ctx, in)
}
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Agent_GetConversationList_FullMethodName}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).GetConversationList(ctx, req.(*JSONRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Agent_GetConversationTimeline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(JSONRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).GetConversationTimeline(ctx, in)
}
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Agent_GetConversationTimeline_FullMethodName}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).GetConversationTimeline(ctx, req.(*JSONRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Agent_GetSchedulePlanPreview_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(JSONRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).GetSchedulePlanPreview(ctx, in)
}
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Agent_GetSchedulePlanPreview_FullMethodName}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).GetSchedulePlanPreview(ctx, req.(*JSONRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Agent_GetContextStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(JSONRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).GetContextStats(ctx, in)
}
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Agent_GetContextStats_FullMethodName}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).GetContextStats(ctx, req.(*JSONRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Agent_SaveScheduleState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(JSONRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).SaveScheduleState(ctx, in)
}
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Agent_SaveScheduleState_FullMethodName}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).SaveScheduleState(ctx, req.(*JSONRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Agent_Chat_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(ChatRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(AgentServer).Chat(m, &agentChatServer{stream})
}
var Agent_ServiceDesc = grpc.ServiceDesc{
ServiceName: "smartflow.agent.Agent",
HandlerType: (*AgentServer)(nil),
Methods: []grpc.MethodDesc{
{MethodName: "Ping", Handler: _Agent_Ping_Handler},
{MethodName: "GetConversationMeta", Handler: _Agent_GetConversationMeta_Handler},
{MethodName: "GetConversationList", Handler: _Agent_GetConversationList_Handler},
{MethodName: "GetConversationTimeline", Handler: _Agent_GetConversationTimeline_Handler},
{MethodName: "GetSchedulePlanPreview", Handler: _Agent_GetSchedulePlanPreview_Handler},
{MethodName: "GetContextStats", Handler: _Agent_GetContextStats_Handler},
{MethodName: "SaveScheduleState", Handler: _Agent_SaveScheduleState_Handler},
},
Streams: []grpc.StreamDesc{
{StreamName: "Chat", Handler: _Agent_Chat_Handler, ServerStreams: true},
},
Metadata: "services/agent/rpc/agent.proto",
}

View File

@@ -0,0 +1,60 @@
package rpc
import (
"errors"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/services/agent/rpc/pb"
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
"github.com/zeromicro/go-zero/core/service"
"github.com/zeromicro/go-zero/zrpc"
"google.golang.org/grpc"
)
const (
defaultListenOn = "0.0.0.0:9089"
defaultTimeout = 0
)
type ServerOptions struct {
ListenOn string
Timeout time.Duration
Service *agentsv.AgentService
}
// NewServer 创建 agent zrpc 服务端。
//
// 职责边界:
// 1. 只负责 zrpc server 配置与 gRPC handler 注册;
// 2. 不创建数据库、Redis、LLM 或业务服务,它们由 cmd/agent 管理;
// 3. Chat 是长连接 server-stream默认不设置 RPC timeout避免截断 SSE 转发。
func NewServer(opts ServerOptions) (*zrpc.RpcServer, string, error) {
if opts.Service == nil {
return nil, "", errors.New("agent service dependency not initialized")
}
listenOn := strings.TrimSpace(opts.ListenOn)
if listenOn == "" {
listenOn = defaultListenOn
}
timeout := opts.Timeout
if timeout < 0 {
timeout = defaultTimeout
}
server, err := zrpc.NewServer(zrpc.RpcServerConf{
ServiceConf: service.ServiceConf{
Name: "agent.rpc",
Mode: service.DevMode,
},
ListenOn: listenOn,
Timeout: int64(timeout / time.Millisecond),
}, func(grpcServer *grpc.Server) {
pb.RegisterAgentServer(grpcServer, NewHandler(opts.Service))
})
if err != nil {
return nil, "", err
}
return server, listenOn, nil
}

View File

@@ -12,8 +12,6 @@ import (
"github.com/LoveLosita/smartflow/backend/conv"
"github.com/LoveLosita/smartflow/backend/dao"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/pkg"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
@@ -21,6 +19,8 @@ import (
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
)

View File

@@ -6,9 +6,9 @@ import (
"strings"
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
)
const (

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// renderMemoryPinnedContentByMode 根据配置选择记忆渲染方式。

View File

@@ -4,8 +4,8 @@ import (
"context"
"errors"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
memorycontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/memory"
)

View File

@@ -7,11 +7,11 @@ import (
"strings"
"time"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
memoryvectorsync "github.com/LoveLosita/smartflow/backend/memory/vectorsync"
"github.com/LoveLosita/smartflow/backend/model"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memoryvectorsync "github.com/LoveLosita/smartflow/backend/services/memory/internal/vectorsync"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
"gorm.io/gorm"
)

View File

@@ -6,8 +6,8 @@ import (
"log"
"strings"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
const defaultDecisionCompareMaxTokens = 600

View File

@@ -7,9 +7,9 @@ import (
"log"
"strings"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
const (

View File

@@ -4,8 +4,8 @@ import (
"context"
"strings"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// WriteOrchestrator 是 Day1 的本地回退版本。

View File

@@ -6,8 +6,8 @@ import (
"strings"
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
"github.com/LoveLosita/smartflow/backend/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
"gorm.io/gorm"
)

View File

@@ -6,8 +6,8 @@ import (
"errors"
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
"github.com/LoveLosita/smartflow/backend/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)

View File

@@ -3,9 +3,9 @@ package service
import (
"strings"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
"github.com/LoveLosita/smartflow/backend/model"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
func toItemDTO(item model.MemoryItem) memorymodel.ItemDTO {

View File

@@ -3,7 +3,7 @@ package service
import (
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
"github.com/spf13/viper"
)

View File

@@ -4,8 +4,8 @@ import (
"context"
"errors"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// EnqueueService 是 Day1 的“任务入队门面”。

View File

@@ -6,13 +6,13 @@ import (
"strings"
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
memoryvectorsync "github.com/LoveLosita/smartflow/backend/memory/vectorsync"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/LoveLosita/smartflow/backend/respond"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memoryvectorsync "github.com/LoveLosita/smartflow/backend/services/memory/internal/vectorsync"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
"gorm.io/gorm"
)

View File

@@ -3,7 +3,7 @@ package service
import (
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
)

View File

@@ -8,11 +8,11 @@ import (
"strings"
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
"github.com/LoveLosita/smartflow/backend/model"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
)

View File

@@ -5,9 +5,9 @@ import (
"strings"
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
"github.com/LoveLosita/smartflow/backend/model"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// HybridRetrieve 统一承接读取侧 RAG-first 召回链路。

View File

@@ -4,7 +4,7 @@ import (
"sort"
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// RankItems 对读取结果做统一重排。

View File

@@ -3,7 +3,7 @@ package utils
import (
"fmt"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// AggregateComparisons 把一轮 LLM 比对结果汇总为最终动作。

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// 合法关系类型集合,用于校验 LLM 输出的 relation 字段。

View File

@@ -6,7 +6,7 @@ import (
"fmt"
"strings"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
const (

View File

@@ -1,8 +1,8 @@
package utils
import (
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
"github.com/LoveLosita/smartflow/backend/model"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// EffectiveUserSetting 返回用户记忆设置的生效值。

View File

@@ -6,9 +6,9 @@ import (
"log"
"strings"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
"github.com/LoveLosita/smartflow/backend/model"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
)

View File

@@ -5,10 +5,10 @@ import (
"fmt"
"strings"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
"github.com/LoveLosita/smartflow/backend/model"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// ApplyActionOutcome 是单个决策动作的执行结果。

View File

@@ -4,10 +4,10 @@ import (
"context"
"fmt"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
"github.com/LoveLosita/smartflow/backend/model"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
"gorm.io/gorm"
)

View File

@@ -3,8 +3,8 @@ package worker
import (
"context"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryorchestrator "github.com/LoveLosita/smartflow/backend/memory/orchestrator"
memoryorchestrator "github.com/LoveLosita/smartflow/backend/services/memory/internal/orchestrator"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
)
// Extractor 是 worker 抽取依赖接口。

View File

@@ -9,13 +9,13 @@ import (
"strings"
"time"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
memoryorchestrator "github.com/LoveLosita/smartflow/backend/memory/orchestrator"
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
memoryvectorsync "github.com/LoveLosita/smartflow/backend/memory/vectorsync"
"github.com/LoveLosita/smartflow/backend/model"
memoryorchestrator "github.com/LoveLosita/smartflow/backend/services/memory/internal/orchestrator"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memoryutils "github.com/LoveLosita/smartflow/backend/services/memory/internal/utils"
memoryvectorsync "github.com/LoveLosita/smartflow/backend/services/memory/internal/vectorsync"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
"gorm.io/gorm"
)

View File

@@ -5,16 +5,16 @@ import (
"errors"
"log"
memorycleanup "github.com/LoveLosita/smartflow/backend/memory/cleanup"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/memory/observe"
memoryorchestrator "github.com/LoveLosita/smartflow/backend/memory/orchestrator"
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
memoryservice "github.com/LoveLosita/smartflow/backend/memory/service"
memoryvectorsync "github.com/LoveLosita/smartflow/backend/memory/vectorsync"
memoryworker "github.com/LoveLosita/smartflow/backend/memory/worker"
"github.com/LoveLosita/smartflow/backend/model"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
memorycleanup "github.com/LoveLosita/smartflow/backend/services/memory/internal/cleanup"
memoryorchestrator "github.com/LoveLosita/smartflow/backend/services/memory/internal/orchestrator"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memoryservice "github.com/LoveLosita/smartflow/backend/services/memory/internal/service"
memoryvectorsync "github.com/LoveLosita/smartflow/backend/services/memory/internal/vectorsync"
memoryworker "github.com/LoveLosita/smartflow/backend/services/memory/internal/worker"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
"gorm.io/gorm"
)

View File

@@ -7,10 +7,10 @@ import (
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
memorymodule "github.com/LoveLosita/smartflow/backend/memory"
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
coremodel "github.com/LoveLosita/smartflow/backend/model"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
memorycontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/memory"
)

View File

@@ -0,0 +1,52 @@
package agent
// ConversationQueryRequest 描述 gateway 查询单个 agent 会话资源的最小跨进程参数。
//
// 职责边界:
// 1. UserID 由 gateway 鉴权后填充,不信任前端传入;
// 2. ConversationID 只表达会话归属,不承载 HTTP query 或 SSE 细节;
// 3. 会话是否存在、是否属于当前用户仍由 agent 服务内部校验。
type ConversationQueryRequest struct {
UserID int `json:"user_id"`
ConversationID string `json:"conversation_id"`
}
// ConversationListRequest 描述 gateway 拉取当前用户会话列表的最小查询条件。
//
// 职责边界:
// 1. Page/PageSize 允许为 0默认值和上限由 agent 服务统一兜底;
// 2. Status 只透传过滤条件,合法值仍由 agent 服务决定;
// 3. 不包含消息正文,避免列表接口被扩成重查询。
type ConversationListRequest struct {
UserID int `json:"user_id"`
Page int `json:"page,omitempty"`
PageSize int `json:"page_size,omitempty"`
Status string `json:"status,omitempty"`
}
// SaveScheduleStatePlacedItem 描述前端拖拽后的单个 task_item 绝对时间位置。
//
// 职责边界:
// 1. 字段形状与 HTTP 入参保持一致,避免 gateway 做额外翻译;
// 2. 这里只承载跨进程 JSON 契约,不判断节次、周次或 task_item 是否有效;
// 3. agent 服务会把绝对坐标转换为内部 ScheduleState 相对坐标。
type SaveScheduleStatePlacedItem struct {
TaskItemID int `json:"task_item_id"`
Week int `json:"week"`
DayOfWeek int `json:"day_of_week"`
StartSection int `json:"start_section"`
EndSection int `json:"end_section"`
EmbedCourseEventID int `json:"embed_course_event_id,omitempty"`
}
// SaveScheduleStateRequest 描述 gateway 暂存会话内排程拖拽状态的跨进程命令。
//
// 职责边界:
// 1. UserID 由 gateway 注入,用于 agent 服务做会话归属校验;
// 2. ConversationID 指向当前会话快照;
// 3. Items 只包含用户拖拽后的 task_item 位置,不承载课程写入或正式确认语义。
type SaveScheduleStateRequest struct {
UserID int `json:"user_id"`
ConversationID string `json:"conversation_id"`
Items []SaveScheduleStatePlacedItem `json:"items"`
}

View File

@@ -0,0 +1,30 @@
package agent
import "encoding/json"
// ChatRequest 是 Gateway 调用 agent RPC Chat 流时使用的最小跨进程契约。
//
// 职责边界:
// 1. 只承载 Gateway 已完成鉴权与会话归一化后的字段;
// 2. 不承载 HTTP header、SSE 细节或 Gin 上下文;
// 3. extra_json 只负责透传 extra 的 JSON 快照,不在契约层解释业务语义。
type ChatRequest struct {
Message string `json:"message"`
Thinking string `json:"thinking,omitempty"`
Model string `json:"model,omitempty"`
UserID int `json:"user_id"`
ConversationID string `json:"conversation_id"`
ExtraJSON json.RawMessage `json:"extra_json,omitempty"`
}
// ChatChunk 是 agent RPC Chat 流回传给 Gateway 的最小分块契约。
//
// 职责边界:
// 1. payload 保持前端现有 SSE data 负载格式,不在 Gateway 二次改写;
// 2. done 只表达“流是否结束”,不包含额外业务语义;
// 3. error_json 只表达服务端错误快照,最终由 Gateway 转换成既有 SSE 错误形态。
type ChatChunk struct {
Payload string `json:"payload,omitempty"`
Done bool `json:"done,omitempty"`
ErrorJSON json.RawMessage `json:"error_json,omitempty"`
}

View File

@@ -71,10 +71,11 @@ Gin Gateway 只做边缘层职责:
当前 gateway 切流点:
1. `/api/v1/user/*``backend/gateway/api/userauth` 承载 HTTP 入口,核心能力通过 `backend/gateway/client/userauth``cmd/userauth` zrpc。
1. `/api/v1/user/*``backend/gateway/api/userauth` 承载 HTTP 入口,核心能力通过 zrpc client`cmd/userauth` zrpc。
2. `gateway/middleware` 的 JWT 鉴权和 token quota guard 只调 `userauth`,不直接读写 `users`、Redis 黑名单或额度缓存。
3. `notification``active-scheduler` 等跨服务 zrpc client 统一放在 `backend/gateway/client/<service>`HTTP 门面统一放在 `backend/gateway/api`
3. `notification``active-scheduler``agent``memory` 等跨服务 zrpc client 终态统一放在 `backend/client/<service>`;当前 `backend/gateway/client/<service>` 是迁移期旧位置,下一轮目录收口应机械迁出
4. zrpc client 不放进 `cmd``cmd` 只负责进程入口和装配,不承载跨服务 client 语义。
5. HTTP 门面统一放在 `backend/gateway/api`gateway 内部可新增 `backend/gateway/shared`,只放 HTTP/SSE/bind/multipart/respond 等门面复用能力,禁止服务层 import。
### 3.2 服务层
@@ -134,6 +135,8 @@ gozero 服务负责领域能力:
6. 换句话说,`shared` 是“跨进程契约层 + 少量跨服务底座”,不是“公共业务层”。
7. 阶段 2 已经新增 `backend/shared/contracts/userauth``backend/shared/ports`只承载跨层契约和端口接口user/auth 的 JWT、DAO、额度治理、黑名单实现不进入 `shared`
8. 阶段 3 已经新增 `backend/shared/contracts/notification`,只承载 notification 跨层 DTO通知通道 DAO、投递状态机、provider、重试策略和 outbox handler 都留在 `backend/services/notification`
9. `backend/client` 是跨进程调用客户端层,不属于 gateway 私有目录gateway 和服务进程都可以 import但 client 只能依赖 `shared/contracts``shared/ports`、rpc/pb 和通用错误适配,不反向依赖 `gateway/api` 或服务私有 `sv/dao/model`
10. `backend/gateway/shared` 与根 `backend/shared` 分层:前者只服务 gateway 门面内部复用,后者服务全后端跨服务契约。服务代码禁止 import `gateway/shared`,避免形成“服务依赖网关”的反向边界。
---
@@ -503,7 +506,16 @@ flowchart LR
1. `backend/newAgent/*` 已按机械搬迁方式迁入 `backend/services/agent/*`Go import 路径和 `agent*` 包名前缀已切到新位置。
2. `backend/service/agentsvc/*` 已继续机械迁入 `backend/services/agent/sv/*`,启动装配和 Agent HTTP handler 直接依赖 `agent/sv.AgentService`
3. 历史 timeline payload key`newagent_history_kind`)暂不改名,避免破坏旧会话兼容
3. `backend/cmd/agent/main.go` 已补齐独立进程入口:负责 DB / Redis / LLM / RAG 初始化、agent outbox consumer 启停和 agent zrpc server 生命周期;旧 `backend/cmd/start.go` 的 gateway 本地链路继续保留
4. agent 事件归属继续复用 `backend/service/events` 与服务级 outbox 路由:`chat.*` / `agent.*` 事件归 `ServiceAgent``memory.extract.requested` 只登记路由不再由 agent 进程消费,`task.urgency.promote.requested` 仍是 publish-only 写入 `task_outbox_messages`
5. `backend/services/agent/rpc` 已补齐 `Ping``Chat` server-stream 以及 conversation meta/list/timeline、schedule-preview、context-stats、schedule-state 6 个 unary JSON 透传 RPC跨进程 chat 边界传 `ChatChunk`,不传 Go channelGateway 继续对前端输出原 SSE 协议。
6. `backend/gateway/client/agent``gateway/api/agent.go` 已接入 `agent.rpc.chat.enabled``agent.rpc.api.enabled` 两个开关;本地 `config.yaml``config.example.yaml` 当前默认 `true`,真实 UTF-8 中文 SSE smoke 已通过chat 主链路走 `agent RPC Chat(stream)` 再转 SSE非 chat `/agent/*` 走 agent unary RPC。
7. 历史 timeline payload key`newagent_history_kind`)暂不改名,避免破坏旧会话兼容。
8. `backend/memory/*` 已物理迁入 `backend/services/memory/*``module.go``model/``observe/` 作为公共门面保留,`cleanup/``orchestrator/``repo/``service/``utils/``vectorsync/``worker/` 收入 `internal/`,旧 `backend/memory` 目录已删除。
9. `cmd/start.go` 不再创建/注册/启动 agent outbox event busagent relay / consumer 由 `cmd/agent` 独占memory worker / 管理能力由 `cmd/memory` 承担。
10. `cmd/start.go` 已收缩 gateway 本地 `AgentService` 构建:当 `agent.rpc.chat.enabled=true``agent.rpc.api.enabled=true`gateway 不再初始化 agent 本地编排、LLM、RAG、memory reader fallback只有任一 RPC 开关关闭时才保守装配本地 fallback。
11. 最新验证:重建并重启 `api` / `agent`UTF-8 中文 SSE smoke 通过且只有单个 `[DONE]`6 个非 chat `/agent/*` HTTP smoke 中 meta/list/timeline/context-stats 返回 200schedule-preview / schedule-state 在无快照场景返回预期业务 400。
12. 下一轮目录收口按新口径推进:把 `backend/gateway/client/*` 机械迁到 `backend/client/*`,让 gateway 和服务进程共同复用 zrpc client保留根 `backend/shared` 承载跨服务契约,同时新增 `backend/gateway/shared` 承载 HTTP/SSE/bind/respond 等 gateway 门面复用。
建议提交点:
@@ -552,18 +564,18 @@ flowchart LR
当前建议按这个顺序推进:
注:阶段 1.5 / 1.6 / 2 / 3 / 4 已完成首轮收口;`notification``active-scheduler` 都不再作为“未拆服务”待办
注:阶段 1.5 / 1.6 / 2 / 3 / 4 已完成首轮收口;阶段 5 的 schedule / task / course / task-class 已完成首轮服务化和 HTTP 切流;当前推进点是阶段 6 的 agent / memory 收口
1. 以阶段 1 的服务级 outbox 为当前基线,不再回头做共享 outbox 方案。
2. 保持 `backend/services/llm``backend/services/rag` 为 canonical 入口,不再把它们写成待办。
3. 保持 `backend/services/userauth` + `cmd/userauth` 为阶段 2 样板,不再回头恢复 Gin 单体 user/auth。
4. 下一步进入阶段 5优先切 schedule / task / course / task-class,逐步替换 active-scheduler 当前的跨域 DB 依赖
5. 再切 agent / memory把聊天编排、主动调度会话复跑和记忆链路独立出去。
4. 保持 schedule / task / course / task-class 的 zrpc 边界为阶段 5 当前基线,不再把它们重新列为未拆服务
5. 继续收 agent / memory把聊天编排、主动调度会话复跑和记忆链路独立出去。
6. 最后把 Gin 收口成纯 Gateway。
一句话总结:
> outbox 的服务级基础设施、llm-service、rag-service、user/auth 样板服务和 notification 阶段 3 都已经完成;下一步让 active-scheduler、schedule、task、course、task-class 按稳定边界逐步独立;再把 agent / memory 独立出来,完成聊天编排和记忆链路的服务化;最后把 Gin 收口成真正的 Gateway。
> outbox 的服务级基础设施、llm-service、rag-service、user/authnotificationactive-scheduler、schedule、task、course、task-class 都已经完成首轮服务化基线;当前继续把 agent / memory 独立出来,完成聊天编排和记忆链路的服务化;最后把 Gin 收口成真正的 Gateway。
---
@@ -749,7 +761,7 @@ SmartFlow-Agent/
> 4. `backend/services/agent/*` 已承接原 `backend/newAgent/*` 内核,`backend/services/agent/sv/*` 已承接原 `backend/service/agentsvc/*` 编排层;后面再按风险拆到 `internal/{prompt,graph,stream,tool,session,router}`。
> 5. `backend/services/notification/*` 已经是阶段 3 终态样板;`backend/cmd/notification` 是独立进程入口,`backend/gateway/client/notification` 是 gateway 侧 zrpc client`backend/shared/contracts/notification` 只放跨层契约;旧 `backend/notification/*`、旧 DAO/model 和旧 `service/events/notification_feishu.go` 不再作为活跃实现。
> 6. `backend/services/active_scheduler/*` 已经是阶段 4 当前样板;`backend/cmd/active-scheduler` 是独立进程入口,`backend/gateway/client/activescheduler` 是 gateway 侧 zrpc client`backend/services/active_scheduler/core` 承载迁移期领域核心;旧 `backend/active_scheduler/*` 不再作为活跃实现。
> 7. `backend/memory/*`,后面要收束到 `backend/services/memory/`;当前 `memory/service/*` 只是迁移过渡态,终态还是按 `sv/` 或 `internal/` 拆开
> 7. `backend/services/memory/*` 已成为 memory 当前 canonical 入口;`module.go`、`model/`、`observe/` 是对外可见门面,服务私有实现已收入 `internal/`,旧 `backend/memory/*` 只在 legacy 文档中作为历史路径出现
>
> 说明 4`shared` 先保留 `events` 和少量跨服务底座型 `infra`。以后如果真的出现跨服务 DTO / 枚举 / 常量,再新增 `contracts` 一类目录,但不要把 `dao`、`model`、`sv`、`handler` 这类服务私有层塞进去。
@@ -759,7 +771,7 @@ SmartFlow-Agent/
>
> 说明 7目录树里如果暂时写成 `backend/services/llm/` 和 `backend/services/rag/`,那只是目录名写法;后文所有职责判断都以 `llm-service` / `rag-service` 这两个逻辑服务名为准。
>
> 说明 8阶段 2 已经采用 `backend/services/userauth/` 作为实际目录名,不再使用 `user-auth`。阶段 3 已经采用 `backend/services/notification/` 作为实际目录名。gateway 侧 zrpc client 放在 `backend/gateway/{userauth,notification}/`进程入口放在 `backend/cmd/{userauth,notification}/`不要把 rpc client 放进 `cmd`。
> 说明 8阶段 2 已经采用 `backend/services/userauth/` 作为实际目录名,不再使用 `user-auth`。阶段 3 已经采用 `backend/services/notification/` 作为实际目录名。zrpc client 终态放在 `backend/client/<service>/`,迁移期旧 `backend/gateway/client/<service>/` 要按机械迁移逐步挪出;进程入口放在 `backend/cmd/<service>/`不要把 rpc client 放进 `cmd`。
### 6.3 哪些可以不用变
@@ -910,19 +922,22 @@ graph TD
5. 阶段 3 `notification` 已完成实现、code review 修复和真实 smoke`llm-service``rag-service` 也已完成,不要重新当成待办。
6. 阶段 4 `active-scheduler` 已完成首轮收口;后续不要再把它当成“未拆服务”,除非是在补契约测试或继续替换跨域 DB 访问。
7. `shared` 只保留跨进程契约和少量跨服务底座不承载业务逻辑、DAO、模型或状态机。
8. 如果后续要改目录,必须先回答“这个文件属于哪一个典型用例”,回答不清楚就先别动结构
9. 当前文档已经可以作为切对话基线;后续代理默认按本文件推进。现阶段的迁移基线入口是 `backend/cmd/api``backend/cmd/worker``backend/cmd/all`,它们只是当前仓库的启动壳,不是终态。`backend/cmd/userauth` 是阶段 2 的独立服务入口,`backend/cmd/notification` 是阶段 3 的独立服务入口,`backend/cmd/active-scheduler` 是阶段 4 的独立服务入口,`backend/cmd/schedule``backend/cmd/task``backend/cmd/task-class``backend/cmd/course` 是阶段 5 已落地的独立服务入口。终态仍然是“一个服务一个独立 `main.go`”,只在出现新的契约风险、边界变化或业务语义变化时再重新讨论架构
8. `backend/client` 是 zrpc client 的终态共享位置,`backend/gateway/client` 只是迁移期旧位置;后续服务或 gateway 需要复用 client 时,优先推进机械搬迁到根 `client`
9. `backend/gateway/shared` 只放 gateway 门面复用,服务代码禁止 import跨服务契约仍留在根 `backend/shared`
10. 如果后续要改目录,必须先回答“这个文件属于哪一个典型用例”,回答不清楚就先别动结构。
11. 当前文档已经可以作为切对话基线;后续代理默认按本文件推进。现阶段的迁移基线入口是 `backend/cmd/api``backend/cmd/worker``backend/cmd/all`,它们只是当前仓库的启动壳,不是终态。`backend/cmd/userauth` 是阶段 2 的独立服务入口,`backend/cmd/notification` 是阶段 3 的独立服务入口,`backend/cmd/active-scheduler` 是阶段 4 的独立服务入口,`backend/cmd/schedule``backend/cmd/task``backend/cmd/task-class``backend/cmd/course` 是阶段 5 已落地的独立服务入口。终态仍然是“一个服务一个独立 `main.go`”,只在出现新的契约风险、边界变化或业务语义变化时再重新讨论架构。
### 6.10 启动方式与进程模型
1. 终态里每个 gozero 服务都应当是独立进程:一个服务一个 `main.go`,一份配置,一组日志,一套端口和资源连接。
2. 目录上可以继续采用 `backend/cmd/<service>/main.go` 作为可执行入口,`backend/services/<service>/` 负责 `sv/``dao/``model/``internal/``rpc/`gateway 自己的 HTTP 适配 zrpc client 放在 `backend/gateway/...`,不要把 rpc client 放进 `cmd`
2. 目录上可以继续采用 `backend/cmd/<service>/main.go` 作为可执行入口,`backend/services/<service>/` 负责 `sv/``dao/``model/``internal/``rpc/`gateway 自己的 HTTP 适配放在 `backend/gateway/...`,跨服务 zrpc client 放在 `backend/client/<service>/`,不要把 rpc client 放进 `cmd` 或长期留在 `gateway/client`
3. 本地开发为了方便,可以保留 `backend/cmd/all``make dev` 或类似聚合启动器,但它只负责拉起多个独立进程,不在同一个 Go 进程里把所有服务 `startXXX()` 混着跑。
4. `go startxxx()` 这种“一个进程里同时起多个服务”的方式只适合作为过渡调试壳,不作为最终部署形态。
5. 如果某些服务需要联动启动应通过脚本、Makefile、docker compose 或开发编排器去启动多个二进制,而不是把进程边界打穿。
6. 带 worker 的服务可以继续保留多入口角色,例如 `api` / `worker` / `all`,但它们仍然是同一服务的不同可执行角色,不是把多个服务硬塞进一个进程。
7. MySQL / Redis 容器的启动归 `docker compose` 或运维层Go 服务只负责在自己的进程里建立连接、做自己的 AutoMigrate 和连通性检查。
8. 阶段 5 后,旧 `cmd/start.go` / `cmd/all` 只是 gateway 和迁移期组合壳;本地完整 smoke 必须额外启动 `cmd/userauth``cmd/notification``cmd/active-scheduler``cmd/schedule``cmd/task``cmd/task-class``cmd/course`。如果同机已有另一条线占用默认端口,应复制临时配置,把 HTTP / zrpc 端口整体平移后再启动服务。
9. 阶段 6 后,`cmd/agent``cmd/memory` 也应纳入完整本地 smoke目录收口时优先把服务与 gateway 共同使用的 zrpc client 从 `gateway/client` 挪到根 `client`,再清理 gateway 门面复用到 `gateway/shared`
### 6.11 测试自动化与 smoke 权限边界