后端:
1.阶段 6 CP4/CP5 目录收口与共享边界纯化
- 将 backend 根目录收口为 services、client、gateway、cmd、shared 五个一级目录
- 收拢 bootstrap、inits、infra/kafka、infra/outbox、conv、respond、pkg、middleware,移除根目录旧实现与空目录
- 将 utils 下沉到 services/userauth/internal/auth,将 logic 下沉到 services/schedule/core/planning
- 将迁移期 runtime 桥接实现统一收拢到 services/runtime/{conv,dao,eventsvc,model},删除 shared/legacy 与未再被 import 的旧 service 实现
- 将 gateway/shared/respond 收口为 HTTP/Gin 错误写回适配,shared/respond 仅保留共享错误语义与状态映射
- 将 HTTP IdempotencyMiddleware 与 RateLimitMiddleware 收口到 gateway/middleware
- 将 GormCachePlugin 下沉到 shared/infra/gormcache,将共享 RateLimiter 下沉到 shared/infra/ratelimit,将 agent token budget 下沉到 services/agent/shared
- 删除 InitEino 兼容壳,收缩 cmd/internal/coreinit 仅保留旧组合壳残留域初始化语义
- 更新微服务迁移计划与桌面 checklist,补齐 CP4/CP5 当前切流点、目录终态与验证结果
- 完成 go test ./...、git diff --check 与最终真实 smoke;health、register/login、task/create+get、schedule/today、task-class/list、memory/items、agent chat/meta/timeline/context-stats 全部 200,SSE 合并结果为 CP5_OK 且 [DONE] 只有 1 个
84 lines
2.6 KiB
Go
84 lines
2.6 KiB
Go
package main
|
||
|
||
import (
|
||
"context"
|
||
"log"
|
||
"os"
|
||
"os/signal"
|
||
"syscall"
|
||
|
||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||
taskdao "github.com/LoveLosita/smartflow/backend/services/task/dao"
|
||
taskrpc "github.com/LoveLosita/smartflow/backend/services/task/rpc"
|
||
tasksv "github.com/LoveLosita/smartflow/backend/services/task/sv"
|
||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||
"github.com/spf13/viper"
|
||
)
|
||
|
||
func main() {
|
||
if err := bootstrap.LoadConfig(); err != nil {
|
||
log.Fatalf("failed to load config: %v", err)
|
||
}
|
||
|
||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||
defer stop()
|
||
|
||
db, err := taskdao.OpenDBFromConfig()
|
||
if err != nil {
|
||
log.Fatalf("failed to connect task database: %v", err)
|
||
}
|
||
redisClient, err := taskdao.OpenRedisFromConfig()
|
||
if err != nil {
|
||
log.Fatalf("failed to connect task redis: %v", err)
|
||
}
|
||
defer redisClient.Close()
|
||
|
||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||
if err := db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
||
log.Fatalf("failed to initialize task cache deleter: %v", err)
|
||
}
|
||
|
||
taskRepo := taskdao.NewTaskDAO(db)
|
||
outboxRepo := outboxinfra.NewRepository(db)
|
||
eventBus, err := outboxinfra.NewEventBus(outboxRepo, kafkabus.LoadConfig())
|
||
if err != nil {
|
||
log.Fatalf("failed to initialize task outbox bus: %v", err)
|
||
}
|
||
|
||
svc := tasksv.NewTaskService(taskRepo, cacheRepo, eventBus)
|
||
// 迁移期 task 服务仍 best-effort 维护 active-scheduler due job,后续改成 RPC/事件后再移除该跨域 DAO。
|
||
svc.SetActiveScheduleDAO(rootdao.NewActiveScheduleDAO(db))
|
||
|
||
if eventBus != nil {
|
||
if err := tasksv.RegisterTaskUrgencyPromoteHandler(eventBus, outboxRepo, taskRepo); err != nil {
|
||
log.Fatalf("failed to register task outbox handler: %v", err)
|
||
}
|
||
eventBus.Start(ctx)
|
||
defer eventBus.Close()
|
||
log.Println("Task outbox consumer started")
|
||
} else {
|
||
log.Println("Task outbox consumer is disabled")
|
||
}
|
||
|
||
server, listenOn, err := taskrpc.NewServer(taskrpc.ServerOptions{
|
||
ListenOn: viper.GetString("task.rpc.listenOn"),
|
||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||
Service: svc,
|
||
})
|
||
if err != nil {
|
||
log.Fatalf("failed to build task zrpc server: %v", err)
|
||
}
|
||
defer server.Stop()
|
||
|
||
go func() {
|
||
log.Printf("task zrpc service starting on %s", listenOn)
|
||
server.Start()
|
||
}()
|
||
|
||
<-ctx.Done()
|
||
log.Println("task service stopping")
|
||
}
|