后端:
1. LLM 客户端从 newAgent/llm 提升为 infra/llm 基础设施层
- 删除 backend/newAgent/llm/(ark.go / ark_adapter.go / client.go / json.go)
- 等价迁移至 backend/infra/llm/,所有 newAgent node 与 service 统一改引用 infrallm
- 消除 newAgent 对模型客户端的私有依赖,为 memory / websearch 等多模块复用铺路
2. RAG 基础设施完成可运行态接入(factory / runtime / observer / service 四层成型)
- 新建 backend/infra/rag/factory.go / runtime.go / observe.go / observer.go /
service.go:工厂创建、运行时生命周期、轻量观测接口、检索服务门面
- 更新 infra/rag/config/config.go:补齐 Milvus / Embed / Reranker 全部配置项与默认值
- 更新 infra/rag/embed/eino_embedder.go:增强 Eino embedding 适配,支持 BaseURL / APIKey 环境变量 / 超时 /
维度等参数
- 更新 infra/rag/store/milvus_store.go:完整实现 Milvus 向量存储(建集合 / 建 Index / Upsert / Search /
Delete),支持 COSINE / L2 / IP 度量
- 更新 infra/rag/core/pipeline.go:适配 Runtime 接口,Pipeline 由 factory 注入而非手动拼装
- 更新 infra/rag/corpus/memory_corpus.go / vector_store.go:对接 Memory 模块数据源与 Store 接口扩展
3. Memory 模块从 Day1 骨架升级为 Day2 完整可运行态
- 新建 memory/module.go:统一门面 Module,对外封装 EnqueueExtract / ReadService / ManageService / WithTx /
StartWorker,启动层只依赖这一个入口
- 新建 memory/orchestrator/llm_write_orchestrator.go:LLM 驱动的记忆抽取编排器,替代原 mock 抽取
- 新建 memory/service/read_service.go:按用户开关过滤 + 轻量重排 + 访问时间刷新的读取链路
- 新建 memory/service/manage_service.go:记忆管理面能力(列出 / 软删除 / 开关读写),删除同步写审计日志
- 新建 memory/service/common.go:服务层公共工具
- 新建 memory/worker/loop.go:后台轮询循环 RunPollingLoop,定时抢占 pending 任务并推进
- 新建 memory/utils/audit.go / settings.go:审计日志构造、用户设置过滤等纯函数
- 更新 memory/model/item.go / job.go / settings.go / config.go / status.go:补齐 DTO 字段与状态常量
- 更新 memory/repo/item_repo.go / job_repo.go / audit_repo.go / settings_repo.go:补齐 CRUD 与查询能力
- 更新 memory/worker/runner.go:Runner 对接 Module 与 LLM 抽取器,任务状态机完整化
- 更新 memory/README.md:同步模块现状说明
4. newAgent 接入 Memory 读取注入与工具注册依赖预埋
- 新建 service/agentsvc/agent_memory.go:定义 MemoryReader 接口 + injectMemoryContext,在 graph
执行前统一补充记忆上下文
- 更新 service/agentsvc/agent.go:新增 memoryReader 字段与 SetMemoryReader 方法
- 更新 service/agentsvc/agent_newagent.go:调用 injectMemoryContext 注入 pinned block,检索失败仅降级不阻断主链路
- 更新 newAgent/tools/registry.go:新增 DefaultRegistryDeps(含 RAGRuntime),工具注册表支持依赖注入
5. 启动流程与事件处理器接线更新
- 更新 cmd/start.go:初始化 RAG Runtime → Memory Module → 注册事件处理器 → 启动 Worker 后台轮询
- 更新 service/events/memory_extract_requested.go:改用 memory.Module.WithTx(tx) 统一门面,事件处理器不再直接依赖
repo/service 内部包
6. 缓存插件与配置同步
- 更新 middleware/cache_deleter.go:静默忽略 MemoryJob / MemoryItem / MemoryAuditLog / MemoryUserSetting
等新模型,避免日志刷屏;清理冗余注释
- 更新 config.example.yaml:补齐 rag / memory / websearch 配置段及默认值
- 更新 go.mod / go.sum:新增 eino-ext/openai / json-patch / go-openai 依赖
前端:无 仓库:无
168 lines
6.6 KiB
Go
168 lines
6.6 KiB
Go
package cmd
|
||
|
||
import (
|
||
"context"
|
||
"fmt"
|
||
"log"
|
||
|
||
"github.com/LoveLosita/smartflow/backend/api"
|
||
"github.com/LoveLosita/smartflow/backend/dao"
|
||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
|
||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||
infrarag "github.com/LoveLosita/smartflow/backend/infra/rag"
|
||
ragconfig "github.com/LoveLosita/smartflow/backend/infra/rag/config"
|
||
"github.com/LoveLosita/smartflow/backend/inits"
|
||
"github.com/LoveLosita/smartflow/backend/memory"
|
||
"github.com/LoveLosita/smartflow/backend/middleware"
|
||
newagentconv "github.com/LoveLosita/smartflow/backend/newAgent/conv"
|
||
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
|
||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||
"github.com/LoveLosita/smartflow/backend/routers"
|
||
"github.com/LoveLosita/smartflow/backend/service"
|
||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||
"github.com/spf13/viper"
|
||
)
|
||
|
||
// loadConfig 加载应用配置。
|
||
func loadConfig() error {
|
||
viper.SetConfigName("config")
|
||
viper.SetConfigType("yaml")
|
||
viper.AddConfigPath(".")
|
||
if err := viper.ReadInConfig(); err != nil {
|
||
return fmt.Errorf("failed to read config file: %w", err)
|
||
}
|
||
log.Println("Config loaded successfully")
|
||
return nil
|
||
}
|
||
|
||
// Start 是应用启动入口。
|
||
func Start() {
|
||
if err := loadConfig(); err != nil {
|
||
log.Fatalf("Failed to load config: %v", err)
|
||
}
|
||
|
||
db, err := inits.ConnectDB()
|
||
if err != nil {
|
||
log.Fatalf("Failed to connect to database: %v", err)
|
||
}
|
||
|
||
rdb := inits.InitRedis()
|
||
limiter := pkg.NewRateLimiter(rdb)
|
||
|
||
aiHub, err := inits.InitEino()
|
||
if err != nil {
|
||
log.Fatalf("Failed to initialize Eino: %v", err)
|
||
}
|
||
|
||
ragCfg := ragconfig.LoadFromViper()
|
||
var ragRuntime infrarag.Runtime
|
||
if ragCfg.Enabled {
|
||
// 1. 当前项目尚未完成全局观测平台建设,这里先注入一层轻量 Observer;
|
||
// 2. RAG 内部只依赖 Observer 接口,后续若全项目统一日志/指标系统,只需替换这里;
|
||
// 3. 这样可以避免 RAG 单独自建一套割裂的日志基础设施。
|
||
ragLogger := log.Default()
|
||
ragRuntime, err = infrarag.NewRuntimeFromConfig(context.Background(), ragCfg, infrarag.FactoryDeps{
|
||
Logger: ragLogger,
|
||
Observer: infrarag.NewLoggerObserver(ragLogger),
|
||
})
|
||
if err != nil {
|
||
log.Fatalf("Failed to initialize RAG runtime: %v", err)
|
||
}
|
||
log.Printf("RAG runtime initialized: store=%s embed=%s reranker=%s", ragCfg.Store, ragCfg.EmbedProvider, ragCfg.RerankerProvider)
|
||
} else {
|
||
log.Println("RAG runtime is disabled")
|
||
}
|
||
|
||
// 1. memory 模块对启动层只暴露一个门面。
|
||
// 2. 后续若接入统一 DI 容器,也优先注入这个门面,而不是继续暴露内部 repo/service。
|
||
memoryCfg := memory.LoadConfigFromViper()
|
||
memoryModule := memory.NewModule(db, infrallm.WrapArkClient(aiHub.Worker), ragRuntime, memoryCfg)
|
||
|
||
// DAO 层初始化。
|
||
cacheRepo := dao.NewCacheDAO(rdb)
|
||
agentCacheRepo := dao.NewAgentCache(rdb)
|
||
_ = db.Use(middleware.NewGormCachePlugin(cacheRepo))
|
||
userRepo := dao.NewUserDAO(db)
|
||
taskRepo := dao.NewTaskDAO(db)
|
||
courseRepo := dao.NewCourseDAO(db)
|
||
taskClassRepo := dao.NewTaskClassDAO(db)
|
||
scheduleRepo := dao.NewScheduleDAO(db)
|
||
manager := dao.NewManager(db)
|
||
agentRepo := dao.NewAgentDAO(db)
|
||
outboxRepo := outboxinfra.NewRepository(db)
|
||
|
||
// outbox 通用事件总线接线(第二阶段):
|
||
// 1. 读取 Kafka 配置;
|
||
// 2. 创建 infra 级 EventBus;
|
||
// 3. 显式注册业务事件处理器;
|
||
// 4. 启动总线后台 dispatch/consume 循环。
|
||
kafkaCfg := kafkabus.LoadConfig()
|
||
eventBus, err := outboxinfra.NewEventBus(outboxRepo, kafkaCfg)
|
||
if err != nil {
|
||
log.Fatalf("Failed to initialize outbox event bus: %v", err)
|
||
}
|
||
if eventBus != nil {
|
||
// 1. 在启动前完成业务事件处理器注册。
|
||
// 2. memory 事件处理器也统一通过 memoryModule 接入,避免启动层感知内部细节。
|
||
if err = eventsvc.RegisterChatHistoryPersistHandler(eventBus, outboxRepo, manager); err != nil {
|
||
log.Fatalf("Failed to register chat history event handler: %v", err)
|
||
}
|
||
if err = eventsvc.RegisterTaskUrgencyPromoteHandler(eventBus, outboxRepo, manager); err != nil {
|
||
log.Fatalf("Failed to register task urgency promote event handler: %v", err)
|
||
}
|
||
if err = eventsvc.RegisterChatTokenUsageAdjustHandler(eventBus, outboxRepo, manager); err != nil {
|
||
log.Fatalf("Failed to register chat token usage adjust event handler: %v", err)
|
||
}
|
||
if err = eventsvc.RegisterAgentStateSnapshotHandler(eventBus, outboxRepo, manager); err != nil {
|
||
log.Fatalf("Failed to register agent state snapshot event handler: %v", err)
|
||
}
|
||
if err = eventsvc.RegisterMemoryExtractRequestedHandler(eventBus, outboxRepo, memoryModule); err != nil {
|
||
log.Fatalf("Failed to register memory extract event handler: %v", err)
|
||
}
|
||
eventBus.Start(context.Background())
|
||
defer eventBus.Close()
|
||
log.Println("Outbox event bus started")
|
||
} else {
|
||
log.Println("Outbox event bus is disabled")
|
||
}
|
||
|
||
memoryModule.StartWorker(context.Background())
|
||
|
||
// Service 层初始化。
|
||
userService := service.NewUserService(userRepo, cacheRepo)
|
||
taskSv := service.NewTaskService(taskRepo, cacheRepo, eventBus)
|
||
courseService := service.NewCourseService(courseRepo, scheduleRepo)
|
||
taskClassService := service.NewTaskClassService(taskClassRepo, cacheRepo, scheduleRepo, manager)
|
||
scheduleService := service.NewScheduleService(scheduleRepo, userRepo, taskClassRepo, manager, cacheRepo)
|
||
agentService := service.NewAgentServiceWithSchedule(aiHub, agentRepo, taskRepo, cacheRepo, agentCacheRepo, eventBus, scheduleService)
|
||
|
||
// newAgent 依赖接线。
|
||
agentService.SetAgentStateStore(dao.NewAgentStateStoreAdapter(cacheRepo))
|
||
agentService.SetToolRegistry(newagenttools.NewDefaultRegistryWithDeps(newagenttools.DefaultRegistryDeps{
|
||
RAGRuntime: ragRuntime,
|
||
}))
|
||
agentService.SetScheduleProvider(newagentconv.NewScheduleProvider(scheduleRepo, taskClassRepo))
|
||
agentService.SetSchedulePersistor(newagentconv.NewSchedulePersistorAdapter(manager))
|
||
agentService.SetMemoryReader(memoryModule)
|
||
|
||
// API 层初始化。
|
||
userApi := api.NewUserHandler(userService)
|
||
taskApi := api.NewTaskHandler(taskSv)
|
||
courseApi := api.NewCourseHandler(courseService)
|
||
taskClassApi := api.NewTaskClassHandler(taskClassService)
|
||
scheduleApi := api.NewScheduleAPI(scheduleService)
|
||
agentApi := api.NewAgentHandler(agentService)
|
||
handlers := &api.ApiHandlers{
|
||
UserHandler: userApi,
|
||
TaskHandler: taskApi,
|
||
TaskClassHandler: taskClassApi,
|
||
CourseHandler: courseApi,
|
||
ScheduleHandler: scheduleApi,
|
||
AgentHandler: agentApi,
|
||
}
|
||
|
||
r := routers.RegisterRouters(handlers, cacheRepo, userRepo, limiter)
|
||
routers.StartEngine(r)
|
||
}
|