后端:
1.阶段 6 CP4/CP5 目录收口与共享边界纯化
- 将 backend 根目录收口为 services、client、gateway、cmd、shared 五个一级目录
- 收拢 bootstrap、inits、infra/kafka、infra/outbox、conv、respond、pkg、middleware,移除根目录旧实现与空目录
- 将 utils 下沉到 services/userauth/internal/auth,将 logic 下沉到 services/schedule/core/planning
- 将迁移期 runtime 桥接实现统一收拢到 services/runtime/{conv,dao,eventsvc,model},删除 shared/legacy 与未再被 import 的旧 service 实现
- 将 gateway/shared/respond 收口为 HTTP/Gin 错误写回适配,shared/respond 仅保留共享错误语义与状态映射
- 将 HTTP IdempotencyMiddleware 与 RateLimitMiddleware 收口到 gateway/middleware
- 将 GormCachePlugin 下沉到 shared/infra/gormcache,将共享 RateLimiter 下沉到 shared/infra/ratelimit,将 agent token budget 下沉到 services/agent/shared
- 删除 InitEino 兼容壳,收缩 cmd/internal/coreinit 仅保留旧组合壳残留域初始化语义
- 更新微服务迁移计划与桌面 checklist,补齐 CP4/CP5 当前切流点、目录终态与验证结果
- 完成 go test ./...、git diff --check 与最终真实 smoke;health、register/login、task/create+get、schedule/today、task-class/list、memory/items、agent chat/meta/timeline/context-stats 全部 200,SSE 合并结果为 CP5_OK 且 [DONE] 只有 1 个
69 lines
1.9 KiB
Go
69 lines
1.9 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"log"
|
|
"os"
|
|
"os/signal"
|
|
"syscall"
|
|
|
|
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
|
"github.com/LoveLosita/smartflow/backend/services/schedule/core/applyadapter"
|
|
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
|
schedulerpc "github.com/LoveLosita/smartflow/backend/services/schedule/rpc"
|
|
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
|
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
|
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
|
"github.com/spf13/viper"
|
|
)
|
|
|
|
func main() {
|
|
if err := bootstrap.LoadConfig(); err != nil {
|
|
log.Fatalf("failed to load config: %v", err)
|
|
}
|
|
|
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
|
defer stop()
|
|
|
|
db, err := scheduledao.OpenDBFromConfig()
|
|
if err != nil {
|
|
log.Fatalf("failed to connect schedule database: %v", err)
|
|
}
|
|
redisClient, err := scheduledao.OpenRedisFromConfig()
|
|
if err != nil {
|
|
log.Fatalf("failed to connect schedule redis: %v", err)
|
|
}
|
|
defer redisClient.Close()
|
|
|
|
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
|
if err := db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
|
log.Fatalf("failed to initialize schedule cache deleter: %v", err)
|
|
}
|
|
|
|
svc := schedulesv.NewScheduleService(
|
|
scheduledao.NewScheduleDAO(db),
|
|
rootdao.NewTaskClassDAO(db),
|
|
rootdao.NewManager(db),
|
|
cacheRepo,
|
|
)
|
|
svc.SetApplyAdapter(applyadapter.NewGormApplyAdapter(db))
|
|
|
|
server, listenOn, err := schedulerpc.NewServer(schedulerpc.ServerOptions{
|
|
ListenOn: viper.GetString("schedule.rpc.listenOn"),
|
|
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
|
Service: svc,
|
|
})
|
|
if err != nil {
|
|
log.Fatalf("failed to build schedule zrpc server: %v", err)
|
|
}
|
|
defer server.Stop()
|
|
|
|
go func() {
|
|
log.Printf("schedule zrpc service starting on %s", listenOn)
|
|
server.Start()
|
|
}()
|
|
|
|
<-ctx.Done()
|
|
log.Println("schedule service stopping")
|
|
}
|