后端:
1. 阶段 5 schedule 首刀服务化落地,新增 `cmd/schedule`、`services/schedule/{dao,rpc,sv,core}`、`gateway/client/schedule`、`shared/contracts/schedule` 和 schedule port
2. gateway `/api/v1/schedule/*` 切到 schedule zrpc client,HTTP 门面只保留鉴权、参数绑定、超时和轻量转发
3. active-scheduler 的 schedule facts、feedback 和 confirm apply 改为调用 schedule RPC adapter,减少对 `schedule_events`、`schedules`、`task_classes`、`task_items` 的跨域 DB 依赖
4. 单体聊天主动调度 rerun 的 schedule 读写链路切到 schedule RPC,迁移期仅保留 task facts 直读 Gorm
5. 为 schedule zrpc 补充 `Ping` 启动健康检查,并在 gateway client 与 active-scheduler adapter 初始化时校验服务可用
6. `cmd/schedule` 独立初始化 DB / Redis,只 AutoMigrate schedule 自有表,并显式检查迁移期 task / task-class 依赖表
7. 更新 active-scheduler 依赖表检查和 preview confirm apply 抽象,保留旧 Gorm 实现作为迁移期回退路径
8. 补充 `schedule.rpc` 示例配置和 schedule HTTP RPC 超时配置
文档:
1. 更新微服务迁移计划,将阶段 5 schedule 首刀进展、当前切流点、旧实现保留范围和 active-scheduler DB 依赖收缩情况写入基线
80 lines
2.6 KiB
Go
80 lines
2.6 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"log"
|
|
"os"
|
|
"os/signal"
|
|
"syscall"
|
|
|
|
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
|
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
|
"github.com/LoveLosita/smartflow/backend/inits"
|
|
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
|
activeschedulerdao "github.com/LoveLosita/smartflow/backend/services/active_scheduler/dao"
|
|
activeschedulerrpc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc"
|
|
activeschedulersv "github.com/LoveLosita/smartflow/backend/services/active_scheduler/sv"
|
|
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
|
"github.com/spf13/viper"
|
|
)
|
|
|
|
func main() {
|
|
if err := bootstrap.LoadConfig(); err != nil {
|
|
log.Fatalf("failed to load config: %v", err)
|
|
}
|
|
|
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
|
defer stop()
|
|
|
|
db, err := activeschedulerdao.OpenDBFromConfig()
|
|
if err != nil {
|
|
log.Fatalf("failed to connect active-scheduler database: %v", err)
|
|
}
|
|
|
|
aiHub, err := inits.InitEino()
|
|
if err != nil {
|
|
log.Fatalf("failed to initialize active-scheduler Eino runtime: %v", err)
|
|
}
|
|
llmService := llmservice.New(llmservice.Options{
|
|
AIHub: aiHub,
|
|
APIKey: os.Getenv("ARK_API_KEY"),
|
|
BaseURL: viper.GetString("agent.baseURL"),
|
|
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
|
})
|
|
|
|
svc, err := activeschedulersv.New(db, llmService, activeschedulersv.Options{
|
|
JobScanEvery: viper.GetDuration("activeScheduler.jobScanEvery"),
|
|
JobScanLimit: viper.GetInt("activeScheduler.jobScanLimit"),
|
|
KafkaConfig: kafkabus.LoadConfig(),
|
|
ScheduleRPC: activeadapters.ScheduleRPCConfig{
|
|
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
|
Target: viper.GetString("schedule.rpc.target"),
|
|
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
|
},
|
|
})
|
|
if err != nil {
|
|
log.Fatalf("failed to initialize active-scheduler service: %v", err)
|
|
}
|
|
defer svc.Close()
|
|
svc.StartWorkers(ctx)
|
|
log.Println("Active-scheduler outbox consumer and due job scanner started")
|
|
|
|
server, listenOn, err := activeschedulerrpc.NewServer(activeschedulerrpc.ServerOptions{
|
|
ListenOn: viper.GetString("activeScheduler.rpc.listenOn"),
|
|
Timeout: viper.GetDuration("activeScheduler.rpc.timeout"),
|
|
Service: svc,
|
|
})
|
|
if err != nil {
|
|
log.Fatalf("failed to build active-scheduler zrpc server: %v", err)
|
|
}
|
|
defer server.Stop()
|
|
|
|
go func() {
|
|
log.Printf("active-scheduler zrpc service starting on %s", listenOn)
|
|
server.Start()
|
|
}()
|
|
|
|
<-ctx.Done()
|
|
log.Println("active-scheduler service stopping")
|
|
}
|