后端:
1.阶段 6 CP4/CP5 目录收口与共享边界纯化
- 将 backend 根目录收口为 services、client、gateway、cmd、shared 五个一级目录
- 收拢 bootstrap、inits、infra/kafka、infra/outbox、conv、respond、pkg、middleware,移除根目录旧实现与空目录
- 将 utils 下沉到 services/userauth/internal/auth,将 logic 下沉到 services/schedule/core/planning
- 将迁移期 runtime 桥接实现统一收拢到 services/runtime/{conv,dao,eventsvc,model},删除 shared/legacy 与未再被 import 的旧 service 实现
- 将 gateway/shared/respond 收口为 HTTP/Gin 错误写回适配,shared/respond 仅保留共享错误语义与状态映射
- 将 HTTP IdempotencyMiddleware 与 RateLimitMiddleware 收口到 gateway/middleware
- 将 GormCachePlugin 下沉到 shared/infra/gormcache,将共享 RateLimiter 下沉到 shared/infra/ratelimit,将 agent token budget 下沉到 services/agent/shared
- 删除 InitEino 兼容壳,收缩 cmd/internal/coreinit 仅保留旧组合壳残留域初始化语义
- 更新微服务迁移计划与桌面 checklist,补齐 CP4/CP5 当前切流点、目录终态与验证结果
- 完成 go test ./...、git diff --check 与最终真实 smoke;health、register/login、task/create+get、schedule/today、task-class/list、memory/items、agent chat/meta/timeline/context-stats 全部 200,SSE 合并结果为 CP5_OK 且 [DONE] 只有 1 个
139 lines
4.6 KiB
Go
139 lines
4.6 KiB
Go
package main
|
||
|
||
import (
|
||
"context"
|
||
"fmt"
|
||
"log"
|
||
"os"
|
||
"os/signal"
|
||
"syscall"
|
||
|
||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
|
||
memorydao "github.com/LoveLosita/smartflow/backend/services/memory/dao"
|
||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||
memoryrpc "github.com/LoveLosita/smartflow/backend/services/memory/rpc"
|
||
memorysv "github.com/LoveLosita/smartflow/backend/services/memory/sv"
|
||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||
einoinfra "github.com/LoveLosita/smartflow/backend/shared/infra/eino"
|
||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||
"github.com/spf13/viper"
|
||
)
|
||
|
||
func main() {
|
||
if err := bootstrap.LoadConfig(); err != nil {
|
||
log.Fatalf("failed to load config: %v", err)
|
||
}
|
||
|
||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||
defer stop()
|
||
|
||
db, err := memorydao.OpenDBFromConfig()
|
||
if err != nil {
|
||
log.Fatalf("failed to connect memory database: %v", err)
|
||
}
|
||
|
||
llmClient, err := buildMemoryLLMClient()
|
||
if err != nil {
|
||
log.Fatalf("failed to initialize memory LLM client: %v", err)
|
||
}
|
||
|
||
ragRuntime, err := buildMemoryRAGRuntime(ctx)
|
||
if err != nil {
|
||
log.Fatalf("failed to initialize memory RAG runtime: %v", err)
|
||
}
|
||
|
||
memoryCfg := memorymodule.LoadConfigFromViper()
|
||
memoryObserver := memoryobserve.NewLoggerObserver(log.Default())
|
||
memoryMetrics := memoryobserve.NewMetricsRegistry()
|
||
module := memorymodule.NewModuleWithObserve(
|
||
db,
|
||
llmClient,
|
||
ragRuntime,
|
||
memoryCfg,
|
||
memorymodule.ObserveDeps{
|
||
Observer: memoryObserver,
|
||
Metrics: memoryMetrics,
|
||
},
|
||
)
|
||
|
||
outboxRepo := outboxinfra.NewRepository(db)
|
||
svc, err := memorysv.NewService(memorysv.Options{
|
||
Module: module,
|
||
OutboxRepo: outboxRepo,
|
||
KafkaConfig: kafkabus.LoadConfig(),
|
||
})
|
||
if err != nil {
|
||
log.Fatalf("failed to initialize memory service: %v", err)
|
||
}
|
||
defer svc.Close()
|
||
|
||
svc.StartWorkers(ctx)
|
||
|
||
server, listenOn, err := memoryrpc.NewServer(memoryrpc.ServerOptions{
|
||
ListenOn: viper.GetString("memory.rpc.listenOn"),
|
||
Timeout: viper.GetDuration("memory.rpc.timeout"),
|
||
Service: svc,
|
||
})
|
||
if err != nil {
|
||
log.Fatalf("failed to build memory zrpc server: %v", err)
|
||
}
|
||
defer server.Stop()
|
||
|
||
go func() {
|
||
log.Printf("memory zrpc service starting on %s", listenOn)
|
||
server.Start()
|
||
}()
|
||
|
||
<-ctx.Done()
|
||
log.Println("memory service stopping")
|
||
}
|
||
|
||
// buildMemoryLLMClient 初始化 memory 抽取链路使用的模型客户端。
|
||
//
|
||
// 说明:
|
||
// 1. CP1 先复用既有 llm-service canonical 入口,不在 memory 服务里重建模型调用封装;
|
||
// 2. 当前启动入口与 cmd/start.go / cmd/active-scheduler 都需要 Eino 初始化,后续若出现第三处重复装配,应抽公共 bootstrap;
|
||
// 3. 返回 ProClient 是因为现有 memory.Module 只需要 llmservice.Client,不需要完整 Service。
|
||
func buildMemoryLLMClient() (*llmservice.Client, error) {
|
||
aiHub, err := einoinfra.InitEino()
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
llmService := llmservice.New(llmservice.Options{
|
||
AIHub: aiHub,
|
||
APIKey: os.Getenv("ARK_API_KEY"),
|
||
BaseURL: viper.GetString("agent.baseURL"),
|
||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||
})
|
||
return llmService.ProClient(), nil
|
||
}
|
||
|
||
// buildMemoryRAGRuntime 初始化 memory 检索与向量同步使用的 RAG Runtime。
|
||
//
|
||
// 暂不抽公共层原因:
|
||
// 1. 本轮只迁 memory 一个能力域,避免同时调整 cmd/start.go 的既有装配路径;
|
||
// 2. RAG 的 canonical 入口已在 services/rag 内,当前函数只做启动层配置读取与日志包装;
|
||
// 3. 等 agent 服务也迁出后,再统一评估 llm/rag 启动装配的公共 bootstrap。
|
||
func buildMemoryRAGRuntime(ctx context.Context) (ragservice.Runtime, error) {
|
||
ragCfg := ragconfig.LoadFromViper()
|
||
if !ragCfg.Enabled {
|
||
log.Println("RAG service is disabled for memory")
|
||
return nil, nil
|
||
}
|
||
|
||
ragLogger := log.Default()
|
||
ragService, err := ragservice.NewFromConfig(ctx, ragCfg, ragservice.FactoryDeps{
|
||
Logger: ragLogger,
|
||
Observer: ragservice.NewLoggerObserver(ragLogger),
|
||
})
|
||
if err != nil {
|
||
return nil, fmt.Errorf("build memory RAG service failed: %w", err)
|
||
}
|
||
log.Printf("Memory RAG runtime initialized: store=%s embed=%s reranker=%s", ragCfg.Store, ragCfg.EmbedProvider, ragCfg.RerankerProvider)
|
||
return ragService.Runtime(), nil
|
||
}
|