后端:
1.阶段 6 CP4/CP5 目录收口与共享边界纯化
- 将 backend 根目录收口为 services、client、gateway、cmd、shared 五个一级目录
- 收拢 bootstrap、inits、infra/kafka、infra/outbox、conv、respond、pkg、middleware,移除根目录旧实现与空目录
- 将 utils 下沉到 services/userauth/internal/auth,将 logic 下沉到 services/schedule/core/planning
- 将迁移期 runtime 桥接实现统一收拢到 services/runtime/{conv,dao,eventsvc,model},删除 shared/legacy 与未再被 import 的旧 service 实现
- 将 gateway/shared/respond 收口为 HTTP/Gin 错误写回适配,shared/respond 仅保留共享错误语义与状态映射
- 将 HTTP IdempotencyMiddleware 与 RateLimitMiddleware 收口到 gateway/middleware
- 将 GormCachePlugin 下沉到 shared/infra/gormcache,将共享 RateLimiter 下沉到 shared/infra/ratelimit,将 agent token budget 下沉到 services/agent/shared
- 删除 InitEino 兼容壳,收缩 cmd/internal/coreinit 仅保留旧组合壳残留域初始化语义
- 更新微服务迁移计划与桌面 checklist,补齐 CP4/CP5 当前切流点、目录终态与验证结果
- 完成 go test ./...、git diff --check 与最终真实 smoke;health、register/login、task/create+get、schedule/today、task-class/list、memory/items、agent chat/meta/timeline/context-stats 全部 200,SSE 合并结果为 CP5_OK 且 [DONE] 只有 1 个
78 lines
2.5 KiB
Go
78 lines
2.5 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"log"
|
|
"os"
|
|
"os/signal"
|
|
"syscall"
|
|
|
|
notificationdao "github.com/LoveLosita/smartflow/backend/services/notification/dao"
|
|
notificationrpc "github.com/LoveLosita/smartflow/backend/services/notification/rpc"
|
|
notificationsv "github.com/LoveLosita/smartflow/backend/services/notification/sv"
|
|
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
|
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
|
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
|
"github.com/spf13/viper"
|
|
)
|
|
|
|
func main() {
|
|
if err := bootstrap.LoadConfig(); err != nil {
|
|
log.Fatalf("failed to load config: %v", err)
|
|
}
|
|
|
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
|
defer stop()
|
|
|
|
db, err := notificationdao.OpenDBFromConfig()
|
|
if err != nil {
|
|
log.Fatalf("failed to connect notification database: %v", err)
|
|
}
|
|
|
|
channelDAO := notificationdao.NewChannelDAO(db)
|
|
recordDAO := notificationdao.NewRecordDAO(db)
|
|
svc, err := notificationsv.NewNotificationServiceWithFeishuWebhook(recordDAO, channelDAO, notificationsv.FeishuWebhookProviderOptions{
|
|
FrontendBaseURL: viper.GetString("notification.frontendBaseURL"),
|
|
}, notificationsv.ServiceOptions{})
|
|
if err != nil {
|
|
log.Fatalf("failed to initialize notification service: %v", err)
|
|
}
|
|
|
|
outboxRepo := outboxinfra.NewRepository(db)
|
|
eventBus, err := outboxinfra.NewEventBus(outboxRepo, kafkabus.LoadConfig())
|
|
if err != nil {
|
|
log.Fatalf("failed to initialize notification outbox bus: %v", err)
|
|
}
|
|
if eventBus != nil {
|
|
if err := notificationsv.RegisterFeishuRequestedHandler(eventBus, outboxRepo, svc); err != nil {
|
|
log.Fatalf("failed to register notification outbox handler: %v", err)
|
|
}
|
|
eventBus.Start(ctx)
|
|
defer eventBus.Close()
|
|
log.Println("Notification outbox consumer started")
|
|
} else {
|
|
log.Println("Notification outbox consumer is disabled")
|
|
}
|
|
|
|
svc.StartRetryLoop(ctx, viper.GetDuration("notification.retryScanEvery"), viper.GetInt("notification.retryBatchSize"))
|
|
log.Println("Notification retry scanner started")
|
|
|
|
server, listenOn, err := notificationrpc.NewServer(notificationrpc.ServerOptions{
|
|
ListenOn: viper.GetString("notification.rpc.listenOn"),
|
|
Timeout: viper.GetDuration("notification.rpc.timeout"),
|
|
Service: svc,
|
|
})
|
|
if err != nil {
|
|
log.Fatalf("failed to build notification zrpc server: %v", err)
|
|
}
|
|
defer server.Stop()
|
|
|
|
go func() {
|
|
log.Printf("notification zrpc service starting on %s", listenOn)
|
|
server.Start()
|
|
}()
|
|
|
|
<-ctx.Done()
|
|
log.Println("notification service stopping")
|
|
}
|