Files
smartmate/backend/cmd/start.go
Losita 959049db42 Version: 0.4.9.dev.260309
feat: 🗄️ 新增自动建表功能

* 新增项目启动时自动建表能力,减少手动初始化数据库步骤
* 解决 `agent_chat` 与 `chat_history` 结构体互相持有对方结构体用于 `preload` 导致的循环依赖问题
* 修复因结构体互相依赖引发的建表失败问题,保证数据库初始化流程稳定

feat: 🐳 Docker Compose 引入 Kafka 分区自动初始化

* 更新 `docker-compose` 配置,引入 Kafka partition 自动初始化脚本
* 保证服务启动后 Topic 即具备可用 partition,实现开箱即用
* 修复转移环境后 MySQL 等容器数据无法持久化的问题,统一改为使用命名卷进行数据持久化

docs: 📚 补充 Outbox + Kafka 持久化链路注释

* 为 Outbox + Kafka 消息持久化链路补充详细代码注释
* 提升异步消息链路的可读性与维护性
* 当前代码 Review 进度约 50%

undo: ⚠️ Kafka 初始化阶段出现消息短暂堆积

* 初次初始化项目时观察到消息在 Kafka 中短暂堆积现象
* 后续被消费者一次性消费且未再次复现
* 已在生产者启动、消费者启动以及消息消费流程中增加控制台日志输出,降低系统黑箱程度
* 后续若条件允许将进一步排查该现象的触发原因
2026-03-09 23:25:25 +08:00

107 lines
3.2 KiB
Go

package cmd
import (
"context"
"fmt"
"log"
"github.com/LoveLosita/smartflow/backend/api"
"github.com/LoveLosita/smartflow/backend/dao"
"github.com/LoveLosita/smartflow/backend/inits"
kafkabus "github.com/LoveLosita/smartflow/backend/kafka"
"github.com/LoveLosita/smartflow/backend/middleware"
"github.com/LoveLosita/smartflow/backend/pkg"
"github.com/LoveLosita/smartflow/backend/routers"
"github.com/LoveLosita/smartflow/backend/service"
"github.com/spf13/viper"
)
// loadConfig 加载应用配置。
func loadConfig() error {
viper.SetConfigName("config")
viper.SetConfigType("yaml")
viper.AddConfigPath(".")
if err := viper.ReadInConfig(); err != nil {
return fmt.Errorf("failed to read config file: %w", err)
}
log.Println("Config loaded successfully")
return nil
}
// Start 是应用启动入口。
func Start() {
if err := loadConfig(); err != nil {
log.Fatalf("Failed to load config: %v", err)
}
db, err := inits.ConnectDB()
if err != nil {
log.Fatalf("Failed to connect to database: %v", err)
}
rdb := inits.InitRedis()
limiter := pkg.NewRateLimiter(rdb)
aiHub, err := inits.InitEino()
if err != nil {
log.Fatalf("Failed to initialize Eino: %v", err)
}
// DAO 层初始化。
cacheRepo := dao.NewCacheDAO(rdb)
agentCacheRepo := dao.NewAgentCache(rdb)
_ = db.Use(middleware.NewGormCachePlugin(cacheRepo))
userRepo := dao.NewUserDAO(db)
taskRepo := dao.NewTaskDAO(db)
courseRepo := dao.NewCourseDAO(db)
taskClassRepo := dao.NewTaskClassDAO(db)
scheduleRepo := dao.NewScheduleDAO(db)
manager := dao.NewManager(db)
agentRepo := dao.NewAgentDAO(db)
outboxRepo := dao.NewOutboxDAO(db)
// outbox 异步链路接线:
// - 读取 Kafka 配置
// - 初始化 producer/consumer
// - 启动 dispatch/consume 两个后台循环
kafkaCfg := kafkabus.LoadConfig()
asyncPipeline, err := service.NewAgentAsyncPipeline(outboxRepo, kafkaCfg)
if err != nil {
log.Fatalf("Failed to initialize Kafka async pipeline: %v", err)
}
if asyncPipeline != nil {
asyncPipeline.Start(context.Background())
defer asyncPipeline.Close()
log.Println("Kafka async pipeline started")
} else {
log.Println("Kafka async pipeline is disabled")
}
// Service 层初始化。
userService := service.NewUserService(userRepo, cacheRepo)
taskSv := service.NewTaskService(taskRepo, cacheRepo)
courseService := service.NewCourseService(courseRepo, scheduleRepo)
taskClassService := service.NewTaskClassService(taskClassRepo, cacheRepo, scheduleRepo, manager)
scheduleService := service.NewScheduleService(scheduleRepo, userRepo, taskClassRepo, manager, cacheRepo)
agentService := service.NewAgentService(aiHub, agentRepo, agentCacheRepo, asyncPipeline)
// API 层初始化。
userApi := api.NewUserHandler(userService)
taskApi := api.NewTaskHandler(taskSv)
courseApi := api.NewCourseHandler(courseService)
taskClassApi := api.NewTaskClassHandler(taskClassService)
scheduleApi := api.NewScheduleAPI(scheduleService)
agentApi := api.NewAgentHandler(agentService)
handlers := &api.ApiHandlers{
UserHandler: userApi,
TaskHandler: taskApi,
TaskClassHandler: taskClassApi,
CourseHandler: courseApi,
ScheduleHandler: scheduleApi,
AgentHandler: agentApi,
}
r := routers.RegisterRouters(handlers, cacheRepo, limiter)
routers.StartEngine(r)
}