feat: 🗄️ 新增自动建表功能 * 新增项目启动时自动建表能力,减少手动初始化数据库步骤 * 解决 `agent_chat` 与 `chat_history` 结构体互相持有对方结构体用于 `preload` 导致的循环依赖问题 * 修复因结构体互相依赖引发的建表失败问题,保证数据库初始化流程稳定 feat: 🐳 Docker Compose 引入 Kafka 分区自动初始化 * 更新 `docker-compose` 配置,引入 Kafka partition 自动初始化脚本 * 保证服务启动后 Topic 即具备可用 partition,实现开箱即用 * 修复转移环境后 MySQL 等容器数据无法持久化的问题,统一改为使用命名卷进行数据持久化 docs: 📚 补充 Outbox + Kafka 持久化链路注释 * 为 Outbox + Kafka 消息持久化链路补充详细代码注释 * 提升异步消息链路的可读性与维护性 * 当前代码 Review 进度约 50% undo: ⚠️ Kafka 初始化阶段出现消息短暂堆积 * 初次初始化项目时观察到消息在 Kafka 中短暂堆积现象 * 后续被消费者一次性消费且未再次复现 * 已在生产者启动、消费者启动以及消息消费流程中增加控制台日志输出,降低系统黑箱程度 * 后续若条件允许将进一步排查该现象的触发原因
200 lines
6.3 KiB
Go
200 lines
6.3 KiB
Go
package service
|
||
|
||
import (
|
||
"context"
|
||
"log"
|
||
"strings"
|
||
|
||
"github.com/LoveLosita/smartflow/backend/agent"
|
||
"github.com/LoveLosita/smartflow/backend/conv"
|
||
"github.com/LoveLosita/smartflow/backend/dao"
|
||
"github.com/LoveLosita/smartflow/backend/inits"
|
||
"github.com/LoveLosita/smartflow/backend/model"
|
||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||
"github.com/cloudwego/eino-ext/components/model/ark"
|
||
"github.com/cloudwego/eino/schema"
|
||
"github.com/google/uuid"
|
||
)
|
||
|
||
type AgentService struct {
|
||
AIHub *inits.AIHub
|
||
repo *dao.AgentDAO
|
||
agentCache *dao.AgentCache
|
||
asyncPipeline *AgentAsyncPipeline
|
||
}
|
||
|
||
func NewAgentService(aiHub *inits.AIHub, repo *dao.AgentDAO, agentRedis *dao.AgentCache, asyncPipeline *AgentAsyncPipeline) *AgentService {
|
||
return &AgentService{
|
||
AIHub: aiHub,
|
||
repo: repo,
|
||
agentCache: agentRedis,
|
||
asyncPipeline: asyncPipeline,
|
||
}
|
||
}
|
||
|
||
func normalizeConversationID(chatID string) string {
|
||
trimmed := strings.TrimSpace(chatID)
|
||
if trimmed == "" {
|
||
return uuid.NewString()
|
||
}
|
||
return trimmed
|
||
}
|
||
|
||
func (s *AgentService) pickChatModel(requestModel string) (*ark.ChatModel, string) {
|
||
modelName := strings.TrimSpace(requestModel)
|
||
if strings.EqualFold(modelName, "strategist") {
|
||
return s.AIHub.Strategist, "strategist"
|
||
}
|
||
return s.AIHub.Worker, "worker"
|
||
}
|
||
|
||
// saveChatHistoryReliable 是聊天记录持久化的统一入口:
|
||
// 1) 启用 outbox + Kafka 时,走异步可靠链路;
|
||
// 2) 未启用时,退化为同步写数据库。
|
||
func (s *AgentService) saveChatHistoryReliable(ctx context.Context, payload model.ChatHistoryPersistPayload) error {
|
||
if s.asyncPipeline == nil {
|
||
return s.repo.SaveChatHistory(ctx, payload.UserID, payload.ConversationID, payload.Role, payload.Message)
|
||
}
|
||
return s.asyncPipeline.EnqueueChatHistoryPersist(ctx, payload)
|
||
}
|
||
|
||
func pushErrNonBlocking(errChan chan error, err error) {
|
||
select {
|
||
case errChan <- err:
|
||
default:
|
||
log.Printf("error channel is full, drop error: %v", err)
|
||
}
|
||
}
|
||
|
||
func (s *AgentService) AgentChat(ctx context.Context, userMessage string, ifThinking bool, modelName string, userID int, chatID string) (<-chan string, <-chan error) {
|
||
// 1) 准备输出通道。
|
||
outChan := make(chan string, 5)
|
||
errChan := make(chan error, 1)
|
||
|
||
// 2) 规范会话 ID 并选择模型。
|
||
chatID = normalizeConversationID(chatID)
|
||
selectedModel, resolvedModelName := s.pickChatModel(modelName)
|
||
|
||
// 3) 确保会话存在:先查缓存,再回源数据库,必要时创建新会话。
|
||
result, err := s.agentCache.GetConversationStatus(ctx, chatID)
|
||
if err != nil {
|
||
errChan <- err
|
||
close(outChan)
|
||
close(errChan)
|
||
return outChan, errChan
|
||
}
|
||
if !result {
|
||
innerResult, ifErr := s.repo.IfChatExists(ctx, userID, chatID)
|
||
if ifErr != nil {
|
||
errChan <- ifErr
|
||
close(outChan)
|
||
close(errChan)
|
||
return outChan, errChan
|
||
}
|
||
if !innerResult {
|
||
if _, err = s.repo.CreateNewChat(userID, chatID); err != nil {
|
||
errChan <- err
|
||
close(outChan)
|
||
close(errChan)
|
||
return outChan, errChan
|
||
}
|
||
}
|
||
if err = s.agentCache.SetConversationStatus(ctx, chatID); err != nil {
|
||
log.Printf("failed to set conversation status cache for %s: %v", chatID, err)
|
||
}
|
||
}
|
||
|
||
// 4) 组装历史上下文:先读缓存,缓存未命中再读数据库。
|
||
chatHistory, err := s.agentCache.GetHistory(ctx, chatID)
|
||
if err != nil {
|
||
errChan <- err
|
||
close(outChan)
|
||
close(errChan)
|
||
return outChan, errChan
|
||
}
|
||
|
||
cacheMiss := false
|
||
if chatHistory == nil {
|
||
cacheMiss = true
|
||
histories, hisErr := s.repo.GetUserChatHistories(ctx, userID, pkg.HistoryFetchLimitByModel(resolvedModelName), chatID)
|
||
if hisErr != nil {
|
||
errChan <- hisErr
|
||
close(outChan)
|
||
close(errChan)
|
||
return outChan, errChan
|
||
}
|
||
chatHistory = conv.ToEinoMessages(histories)
|
||
}
|
||
|
||
// 5) 基于 token 预算裁剪历史,避免请求超长。
|
||
historyBudget := pkg.HistoryTokenBudgetByModel(resolvedModelName, agent.SystemPrompt, userMessage)
|
||
trimmedHistory, totalHistoryTokens, keptHistoryTokens, droppedCount := pkg.TrimHistoryByTokenBudget(chatHistory, historyBudget)
|
||
chatHistory = trimmedHistory
|
||
|
||
// 6) 根据裁剪结果调整 Redis 会话窗口,控制缓存体积。
|
||
targetWindow := pkg.CalcSessionWindowSize(len(chatHistory))
|
||
if err = s.agentCache.SetSessionWindowSize(ctx, chatID, targetWindow); err != nil {
|
||
log.Printf("failed to set history window for %s: %v", chatID, err)
|
||
}
|
||
if err = s.agentCache.EnforceHistoryWindow(ctx, chatID); err != nil {
|
||
log.Printf("failed to enforce history window for %s: %v", chatID, err)
|
||
}
|
||
|
||
if droppedCount > 0 {
|
||
log.Printf("agent history trimmed: chat=%s total_tokens=%d kept_tokens=%d dropped=%d budget=%d target_window=%d",
|
||
chatID, totalHistoryTokens, keptHistoryTokens, droppedCount, historyBudget, targetWindow)
|
||
}
|
||
|
||
// 缓存未命中时,把“裁剪后的历史”回填 Redis。
|
||
if cacheMiss {
|
||
if err = s.agentCache.BackfillHistory(ctx, chatID, chatHistory); err != nil {
|
||
errChan <- err
|
||
close(outChan)
|
||
close(errChan)
|
||
return outChan, errChan
|
||
}
|
||
}
|
||
|
||
// 7) 先同步写 Redis,再把数据库持久化交给 outbox 可靠链路。
|
||
if err = s.agentCache.PushMessage(ctx, chatID, &schema.Message{Role: schema.User, Content: userMessage}); err != nil {
|
||
log.Printf("failed to push user message into redis history: %v", err)
|
||
}
|
||
if err = s.saveChatHistoryReliable(ctx, model.ChatHistoryPersistPayload{
|
||
UserID: userID,
|
||
ConversationID: chatID,
|
||
Role: "user",
|
||
Message: userMessage,
|
||
}); err != nil {
|
||
errChan <- err
|
||
close(outChan)
|
||
close(errChan)
|
||
return outChan, errChan
|
||
}
|
||
|
||
// 8) 启动流式对话。
|
||
go func() {
|
||
defer close(outChan)
|
||
|
||
fullText, streamErr := agent.StreamChat(ctx, selectedModel, resolvedModelName, userMessage, ifThinking, chatHistory, outChan)
|
||
if streamErr != nil {
|
||
pushErrNonBlocking(errChan, streamErr)
|
||
return
|
||
}
|
||
|
||
// 9) 助手回答完成后,重复同样流程:先写 Redis,再异步持久化。
|
||
if cacheErr := s.agentCache.PushMessage(context.Background(), chatID, &schema.Message{Role: schema.Assistant, Content: fullText}); cacheErr != nil {
|
||
log.Printf("failed to push assistant message into redis history: %v", cacheErr)
|
||
}
|
||
if saveErr := s.saveChatHistoryReliable(context.Background(), model.ChatHistoryPersistPayload{
|
||
UserID: userID,
|
||
ConversationID: chatID,
|
||
Role: "assistant",
|
||
Message: fullText,
|
||
}); saveErr != nil {
|
||
pushErrNonBlocking(errChan, saveErr)
|
||
}
|
||
}()
|
||
|
||
return outChan, errChan
|
||
}
|