后端: 1. LLM 独立服务与统一计费出口落地:新增 `cmd/llm`、`client/llm` 与 `services/llm/rpc`,补齐 BillingContext、CreditBalanceGuard、价格规则解析、stream usage 归集与 `credit.charge.requested` outbox 发布,active-scheduler / agent / course / memory / gateway fallback 全部改走 llm zrpc,不再各自本地初始化模型。 2. TokenStore 收口为 Credit 权威账本:新增 credit account / ledger / product / order / price-rule / reward-rule 能力与 Redis 快照缓存,扩展 tokenstore rpc/client 支撑余额快照、消耗看板、商品、订单、流水、价格规则和奖励规则,并接入 LLM charge 事件消费完成 Credit 扣费落账。 3. 计费旧链路下线与网关切口切换:`/token-store` 语义整体切到 `/credit-store`,agent chat 移除旧 TokenQuotaGuard,userauth 的 CheckTokenQuota / AdjustTokenUsage 改为废弃,聊天历史落库不再同步旧 token 额度账本,course 图片解析请求补 user_id 进入新计费口径。 前端: 4. 计划广场从 mock 数据切到真实接口:新增 forum api/types,首页支持真实列表、标签、搜索、防抖、点赞、导入和发布计划,详情页补齐帖子详情、评论树、回复和删除评论链路,同时补上“至少一个标签”的前后端约束与默认标签兜底。 5. 商店页切到 Credit 体系并重做展示:顶部改为余额 + Credit/Token 消耗看板,支持 24h/7d/30d/all 周期切换;套餐区展示原价与当前价;历史区改为当前用户 Credit 流水并支持查看更多,整体视觉和交互同步收口。 仓库: 6. 配置与本地启动体系补齐 llm / outbox 编排:`config.example.yaml` 增加 llm rpc 和统一 outbox service 配置,`dev-common.ps1` 把 llm 纳入多服务依赖并自动建 Kafka topic,`docker-compose.yml` 同步初始化 agent/task/memory/active-scheduler/notification/taskclass-forum/llm/token-store 全量 outbox topic。
140 lines
4.6 KiB
Go
140 lines
4.6 KiB
Go
package main
|
||
|
||
import (
|
||
"context"
|
||
"fmt"
|
||
"log"
|
||
"os"
|
||
"os/signal"
|
||
"syscall"
|
||
|
||
llmclient "github.com/LoveLosita/smartflow/backend/client/llm"
|
||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
|
||
memorydao "github.com/LoveLosita/smartflow/backend/services/memory/dao"
|
||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||
memoryrpc "github.com/LoveLosita/smartflow/backend/services/memory/rpc"
|
||
memorysv "github.com/LoveLosita/smartflow/backend/services/memory/sv"
|
||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||
"github.com/spf13/viper"
|
||
)
|
||
|
||
func main() {
|
||
if err := bootstrap.LoadConfig(); err != nil {
|
||
log.Fatalf("failed to load config: %v", err)
|
||
}
|
||
|
||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||
defer stop()
|
||
|
||
db, err := memorydao.OpenDBFromConfig()
|
||
if err != nil {
|
||
log.Fatalf("failed to connect memory database: %v", err)
|
||
}
|
||
|
||
llmClient, err := buildMemoryLLMClient()
|
||
if err != nil {
|
||
log.Fatalf("failed to initialize memory LLM client: %v", err)
|
||
}
|
||
|
||
ragRuntime, err := buildMemoryRAGRuntime(ctx)
|
||
if err != nil {
|
||
log.Fatalf("failed to initialize memory RAG runtime: %v", err)
|
||
}
|
||
|
||
memoryCfg := memorymodule.LoadConfigFromViper()
|
||
memoryObserver := memoryobserve.NewLoggerObserver(log.Default())
|
||
memoryMetrics := memoryobserve.NewMetricsRegistry()
|
||
module := memorymodule.NewModuleWithObserve(
|
||
db,
|
||
llmClient,
|
||
ragRuntime,
|
||
memoryCfg,
|
||
memorymodule.ObserveDeps{
|
||
Observer: memoryObserver,
|
||
Metrics: memoryMetrics,
|
||
},
|
||
)
|
||
|
||
outboxRepo := outboxinfra.NewRepository(db)
|
||
svc, err := memorysv.NewService(memorysv.Options{
|
||
Module: module,
|
||
OutboxRepo: outboxRepo,
|
||
KafkaConfig: kafkabus.LoadConfig(),
|
||
})
|
||
if err != nil {
|
||
log.Fatalf("failed to initialize memory service: %v", err)
|
||
}
|
||
defer svc.Close()
|
||
|
||
svc.StartWorkers(ctx)
|
||
|
||
server, listenOn, err := memoryrpc.NewServer(memoryrpc.ServerOptions{
|
||
ListenOn: viper.GetString("memory.rpc.listenOn"),
|
||
Timeout: viper.GetDuration("memory.rpc.timeout"),
|
||
Service: svc,
|
||
})
|
||
if err != nil {
|
||
log.Fatalf("failed to build memory zrpc server: %v", err)
|
||
}
|
||
defer server.Stop()
|
||
|
||
go func() {
|
||
log.Printf("memory zrpc service starting on %s", listenOn)
|
||
server.Start()
|
||
}()
|
||
|
||
<-ctx.Done()
|
||
log.Println("memory service stopping")
|
||
}
|
||
|
||
// buildMemoryLLMClient 初始化 memory 抽取链路使用的模型客户端。
|
||
//
|
||
// 说明:
|
||
// 1. CP1 先复用既有 llm-service canonical 入口,不在 memory 服务里重建模型调用封装;
|
||
// 2. 现在统一改走独立 llm zrpc client,memory 进程不再本地初始化 AIHub;
|
||
// 3. 返回 ProClient 是因为现有 memory.Module 只需要 llmservice.Client,不需要完整 Service。
|
||
func buildMemoryLLMClient() (*llmservice.Client, error) {
|
||
remoteService, err := llmclient.NewService(llmclient.ServiceConfig{
|
||
ClientConfig: llmclient.ClientConfig{
|
||
Endpoints: viper.GetStringSlice("llm.rpc.endpoints"),
|
||
Target: viper.GetString("llm.rpc.target"),
|
||
Timeout: viper.GetDuration("llm.rpc.timeout"),
|
||
},
|
||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||
})
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
return remoteService.ProClient(), nil
|
||
}
|
||
|
||
// buildMemoryRAGRuntime 初始化 memory 检索与向量同步使用的 RAG Runtime。
|
||
//
|
||
// 暂不抽公共层原因:
|
||
// 1. 本轮只迁 memory 一个能力域,避免同时调整 cmd/start.go 的既有装配路径;
|
||
// 2. RAG 的 canonical 入口已在 services/rag 内,当前函数只做启动层配置读取与日志包装;
|
||
// 3. 等 agent 服务也迁出后,再统一评估 llm/rag 启动装配的公共 bootstrap。
|
||
func buildMemoryRAGRuntime(ctx context.Context) (ragservice.Runtime, error) {
|
||
ragCfg := ragconfig.LoadFromViper()
|
||
if !ragCfg.Enabled {
|
||
log.Println("RAG service is disabled for memory")
|
||
return nil, nil
|
||
}
|
||
|
||
ragLogger := log.Default()
|
||
ragService, err := ragservice.NewFromConfig(ctx, ragCfg, ragservice.FactoryDeps{
|
||
Logger: ragLogger,
|
||
Observer: ragservice.NewLoggerObserver(ragLogger),
|
||
})
|
||
if err != nil {
|
||
return nil, fmt.Errorf("build memory RAG service failed: %w", err)
|
||
}
|
||
log.Printf("Memory RAG runtime initialized: store=%s embed=%s reranker=%s", ragCfg.Store, ragCfg.EmbedProvider, ragCfg.RerankerProvider)
|
||
return ragService.Runtime(), nil
|
||
}
|