Version: 0.9.70.dev.260504
后端:
1. 阶段 5 schedule 首刀服务化落地,新增 `cmd/schedule`、`services/schedule/{dao,rpc,sv,core}`、`gateway/client/schedule`、`shared/contracts/schedule` 和 schedule port
2. gateway `/api/v1/schedule/*` 切到 schedule zrpc client,HTTP 门面只保留鉴权、参数绑定、超时和轻量转发
3. active-scheduler 的 schedule facts、feedback 和 confirm apply 改为调用 schedule RPC adapter,减少对 `schedule_events`、`schedules`、`task_classes`、`task_items` 的跨域 DB 依赖
4. 单体聊天主动调度 rerun 的 schedule 读写链路切到 schedule RPC,迁移期仅保留 task facts 直读 Gorm
5. 为 schedule zrpc 补充 `Ping` 启动健康检查,并在 gateway client 与 active-scheduler adapter 初始化时校验服务可用
6. `cmd/schedule` 独立初始化 DB / Redis,只 AutoMigrate schedule 自有表,并显式检查迁移期 task / task-class 依赖表
7. 更新 active-scheduler 依赖表检查和 preview confirm apply 抽象,保留旧 Gorm 实现作为迁移期回退路径
8. 补充 `schedule.rpc` 示例配置和 schedule HTTP RPC 超时配置
文档:
1. 更新微服务迁移计划,将阶段 5 schedule 首刀进展、当前切流点、旧实现保留范围和 active-scheduler DB 依赖收缩情况写入基线
This commit is contained in:
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activeschedulerdao "github.com/LoveLosita/smartflow/backend/services/active_scheduler/dao"
|
||||
activeschedulerrpc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc"
|
||||
activeschedulersv "github.com/LoveLosita/smartflow/backend/services/active_scheduler/sv"
|
||||
@@ -45,6 +46,11 @@ func main() {
|
||||
JobScanEvery: viper.GetDuration("activeScheduler.jobScanEvery"),
|
||||
JobScanLimit: viper.GetInt("activeScheduler.jobScanLimit"),
|
||||
KafkaConfig: kafkabus.LoadConfig(),
|
||||
ScheduleRPC: activeadapters.ScheduleRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize active-scheduler service: %v", err)
|
||||
|
||||
68
backend/cmd/schedule/main.go
Normal file
68
backend/cmd/schedule/main.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/services/schedule/core/applyadapter"
|
||||
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
||||
schedulerpc "github.com/LoveLosita/smartflow/backend/services/schedule/rpc"
|
||||
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := bootstrap.LoadConfig(); err != nil {
|
||||
log.Fatalf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
db, err := scheduledao.OpenDBFromConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to connect schedule database: %v", err)
|
||||
}
|
||||
redisClient, err := scheduledao.OpenRedisFromConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to connect schedule redis: %v", err)
|
||||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err := db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
log.Fatalf("failed to initialize schedule cache deleter: %v", err)
|
||||
}
|
||||
|
||||
svc := schedulesv.NewScheduleService(
|
||||
scheduledao.NewScheduleDAO(db),
|
||||
rootdao.NewTaskClassDAO(db),
|
||||
rootdao.NewManager(db),
|
||||
cacheRepo,
|
||||
)
|
||||
svc.SetApplyAdapter(applyadapter.NewGormApplyAdapter(db))
|
||||
|
||||
server, listenOn, err := schedulerpc.NewServer(schedulerpc.ServerOptions{
|
||||
ListenOn: viper.GetString("schedule.rpc.listenOn"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
Service: svc,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build schedule zrpc server: %v", err)
|
||||
}
|
||||
defer server.Stop()
|
||||
|
||||
go func() {
|
||||
log.Printf("schedule zrpc service starting on %s", listenOn)
|
||||
server.Start()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
log.Println("schedule service stopping")
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/api"
|
||||
gatewayactivescheduler "github.com/LoveLosita/smartflow/backend/gateway/client/activescheduler"
|
||||
gatewaynotification "github.com/LoveLosita/smartflow/backend/gateway/client/notification"
|
||||
gatewayschedule "github.com/LoveLosita/smartflow/backend/gateway/client/schedule"
|
||||
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
|
||||
gatewayrouter "github.com/LoveLosita/smartflow/backend/gateway/router"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
@@ -36,7 +37,7 @@ import (
|
||||
agentsvcsvc "github.com/LoveLosita/smartflow/backend/service/agentsvc"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
@@ -221,6 +222,14 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize notification zrpc client: %w", err)
|
||||
}
|
||||
scheduleClient, err := gatewayschedule.NewClient(gatewayschedule.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize schedule zrpc client: %w", err)
|
||||
}
|
||||
activeSchedulerClient, err := gatewayactivescheduler.NewClient(gatewayactivescheduler.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("activeScheduler.rpc.endpoints"),
|
||||
Target: viper.GetString("activeScheduler.rpc.target"),
|
||||
@@ -259,12 +268,22 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
memoryCfg,
|
||||
)
|
||||
|
||||
activeReaders := activeadapters.NewGormReaders(db)
|
||||
activeScheduleDryRun, err := activesvc.NewDryRunService(activeadapters.ReadersFromGorm(activeReaders))
|
||||
// 1. 迁移期 task_pool 事实仍由单体 task 表读取,下一轮切 task 服务后替换为 task RPC;
|
||||
// 2. schedule facts / feedback / apply 已统一走 schedule RPC,避免聊天 rerun 继续直连 schedule 表。
|
||||
activeTaskReader := activeadapters.NewGormReaders(db)
|
||||
activeScheduleAdapter, err := activeadapters.NewScheduleRPCAdapter(activeadapters.ScheduleRPCConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize schedule rpc adapter for active-scheduler rerun: %w", err)
|
||||
}
|
||||
activeScheduleDryRun, err := activesvc.NewDryRunService(activeadapters.ReadersWithScheduleRPC(activeTaskReader, activeScheduleAdapter))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
activeSchedulePreviewConfirm, err := buildActiveSchedulePreviewConfirmService(db, manager.ActiveSchedule, activeScheduleDryRun)
|
||||
activeSchedulePreviewConfirm, err := buildActiveSchedulePreviewConfirmService(manager.ActiveSchedule, activeScheduleDryRun, activeScheduleAdapter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -272,13 +291,13 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
// 2. dry-run 与 selection 通过 graph runner 串起来,避免 trigger_pipeline 再拼第二套候选逻辑。
|
||||
activeScheduleLLMClient := llmService.ProClient()
|
||||
activeScheduleSelector := activesel.NewService(activeScheduleLLMClient)
|
||||
activeScheduleFeedbackLocator := activefeedbacklocate.NewService(activeReaders, activeScheduleLLMClient)
|
||||
activeScheduleFeedbackLocator := activefeedbacklocate.NewService(activeScheduleAdapter, activeScheduleLLMClient)
|
||||
activeScheduleGraphRunner, err := activegraph.NewRunner(activeScheduleDryRun.AsGraphDryRunFunc(), activeScheduleSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agentService.SetActiveScheduleSessionRerunFunc(buildActiveScheduleSessionRerunFunc(manager.ActiveSchedule, activeScheduleGraphRunner, activeSchedulePreviewConfirm, activeScheduleFeedbackLocator))
|
||||
handlers := buildAPIHandlers(taskSv, taskClassService, courseService, scheduleService, agentService, memoryModule, activeSchedulerClient, notificationClient)
|
||||
handlers := buildAPIHandlers(taskSv, taskClassService, courseService, scheduleClient, agentService, memoryModule, activeSchedulerClient, notificationClient)
|
||||
|
||||
runtime := &appRuntime{
|
||||
db: db,
|
||||
@@ -363,17 +382,14 @@ func buildCourseService(llmService *llmservice.Service, courseRepo *dao.CourseDA
|
||||
)
|
||||
}
|
||||
|
||||
func buildActiveScheduleDryRunService(db *gorm.DB) (*activesvc.DryRunService, error) {
|
||||
readers := activeadapters.NewGormReaders(db)
|
||||
return activesvc.NewDryRunService(activeadapters.ReadersFromGorm(readers))
|
||||
}
|
||||
|
||||
func buildActiveSchedulePreviewConfirmService(db *gorm.DB, activeDAO *dao.ActiveScheduleDAO, dryRun *activesvc.DryRunService) (*activesvc.PreviewConfirmService, error) {
|
||||
func buildActiveSchedulePreviewConfirmService(activeDAO *dao.ActiveScheduleDAO, dryRun *activesvc.DryRunService, scheduleApplyAdapter interface {
|
||||
ApplyActiveScheduleChanges(context.Context, activeapplyadapter.ApplyActiveScheduleRequest) (activeapplyadapter.ApplyActiveScheduleResult, error)
|
||||
}) (*activesvc.PreviewConfirmService, error) {
|
||||
previewService, err := activepreview.NewService(activeDAO)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, applyadapter.NewGormApplyAdapter(db))
|
||||
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, scheduleApplyAdapter)
|
||||
}
|
||||
|
||||
// buildActiveScheduleSessionRerunFunc 把主动调度定位器 / graph / preview 能力装成聊天入口可调用的 rerun 闭包。
|
||||
@@ -810,7 +826,7 @@ func buildAPIHandlers(
|
||||
taskService *service.TaskService,
|
||||
taskClassService *service.TaskClassService,
|
||||
courseService *service.CourseService,
|
||||
scheduleService *service.ScheduleService,
|
||||
scheduleClient ports.ScheduleCommandClient,
|
||||
agentService *service.AgentService,
|
||||
memoryModule *memory.Module,
|
||||
activeSchedulerClient ports.ActiveSchedulerCommandClient,
|
||||
@@ -820,7 +836,7 @@ func buildAPIHandlers(
|
||||
TaskHandler: api.NewTaskHandler(taskService),
|
||||
TaskClassHandler: api.NewTaskClassHandler(taskClassService),
|
||||
CourseHandler: api.NewCourseHandler(courseService),
|
||||
ScheduleHandler: api.NewScheduleAPI(scheduleService),
|
||||
ScheduleHandler: api.NewScheduleAPI(scheduleClient),
|
||||
AgentHandler: api.NewAgentHandler(agentService),
|
||||
MemoryHandler: api.NewMemoryHandler(memoryModule),
|
||||
ActiveSchedule: api.NewActiveScheduleAPI(activeSchedulerClient),
|
||||
|
||||
Reference in New Issue
Block a user