Files
smartmate/backend/services/memory/internal/vectorsync/syncer.go
Losita 2a96f4c6f9 Version: 0.9.76.dev.260505
后端:
1.阶段 6 agent / memory 服务化收口
- 新增 cmd/agent 独立进程入口,承载 agent zrpc server、agent outbox relay / consumer 和运行时依赖初始化
- 补齐 services/agent/rpc 的 Chat stream 与 conversation meta/list/timeline、schedule-preview、context-stats、schedule-state unary RPC
- 新增 gateway/client/agent 与 shared/contracts/agent,将 /api/v1/agent chat 和非 chat 门面切到 agent zrpc
- 收缩 gateway 本地 AgentService 装配,双 RPC 开关开启时不再初始化本地 agent 编排、LLM、RAG 和 memory reader fallback
- 将 backend/memory 物理迁入 services/memory,私有实现收入 internal,保留 module/model/observe 作为 memory 服务门面
- 调整 memory outbox、memory reader 和 agent 记忆渲染链路的 import 与服务边界,cmd/memory 独占 memory worker / consumer
- 关闭 gateway 侧 agent outbox worker 所有权,agent relay / consumer 由 cmd/agent 独占,gateway 仅保留 HTTP/SSE 门面与迁移期开关回退
- 更新阶段 6 文档,记录 agent / memory 当前切流点、smoke 结果,以及 backend/client 与 gateway/shared 的目录收口口径
2026-05-05 19:31:39 +08:00

214 lines
5.7 KiB
Go

package vectorsync
import (
"context"
"fmt"
"log"
"strings"
"github.com/LoveLosita/smartflow/backend/model"
memoryrepo "github.com/LoveLosita/smartflow/backend/services/memory/internal/repo"
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
)
// Syncer 负责 memory_items 与向量库之间的最小桥接。
//
// 职责边界:
// 1. 只负责“把已经落库的记忆同步到 RAG / 从 RAG 删除”;
// 2. 不负责决定哪些记忆该写、该删、该恢复,这些决策仍由上游 service/worker/cleanup 控制;
// 3. 同步失败时只回写 vector_status 并打观测,不反向回滚业务事务,避免把在线链路拖成强依赖。
type Syncer struct {
ragRuntime ragservice.Runtime
itemRepo *memoryrepo.ItemRepo
observer memoryobserve.Observer
metrics memoryobserve.MetricsRecorder
logger *log.Logger
}
func NewSyncer(
ragRuntime ragservice.Runtime,
itemRepo *memoryrepo.ItemRepo,
observer memoryobserve.Observer,
metrics memoryobserve.MetricsRecorder,
) *Syncer {
if observer == nil {
observer = memoryobserve.NewNopObserver()
}
if metrics == nil {
metrics = memoryobserve.NewNopMetrics()
}
return &Syncer{
ragRuntime: ragRuntime,
itemRepo: itemRepo,
observer: observer,
metrics: metrics,
logger: log.Default(),
}
}
// Upsert 把新增/修改/恢复后的记忆同步到向量库。
func (s *Syncer) Upsert(ctx context.Context, traceID string, items []model.MemoryItem) {
if s == nil || s.ragRuntime == nil || s.itemRepo == nil || len(items) == 0 {
return
}
requestItems := make([]ragservice.MemoryIngestItem, 0, len(items))
for _, item := range items {
requestItems = append(requestItems, ragservice.MemoryIngestItem{
MemoryID: item.ID,
UserID: item.UserID,
ConversationID: strValue(item.ConversationID),
AssistantID: strValue(item.AssistantID),
RunID: strValue(item.RunID),
MemoryType: item.MemoryType,
Title: item.Title,
Content: item.Content,
Confidence: item.Confidence,
Importance: item.Importance,
SensitivityLevel: item.SensitivityLevel,
IsExplicit: item.IsExplicit,
Status: item.Status,
TTLAt: item.TTLAt,
CreatedAt: item.CreatedAt,
})
}
result, err := s.ragRuntime.IngestMemory(memoryobserve.WithFields(ctx, map[string]any{
"trace_id": traceID,
}), ragservice.MemoryIngestRequest{
TraceID: traceID,
Action: "add",
Items: requestItems,
})
if err != nil {
s.observer.Observe(ctx, memoryobserve.Event{
Level: memoryobserve.LevelWarn,
Component: memoryobserve.ComponentWrite,
Operation: "vector_upsert",
Fields: map[string]any{
"trace_id": traceID,
"item_count": len(items),
"success": false,
"error": err,
"error_code": memoryobserve.ClassifyError(err),
},
})
for _, item := range items {
_ = s.itemRepo.UpdateVectorStateByID(ctx, item.ID, "failed", nil)
}
return
}
vectorIDMap := make(map[int64]string, len(result.DocumentIDs))
for _, documentID := range result.DocumentIDs {
memoryID := parseMemoryID(documentID)
if memoryID <= 0 {
continue
}
vectorIDMap[memoryID] = documentID
}
for _, item := range items {
vectorID := strPtrOrNil(vectorIDMap[item.ID])
_ = s.itemRepo.UpdateVectorStateByID(ctx, item.ID, "synced", vectorID)
}
s.observer.Observe(ctx, memoryobserve.Event{
Level: memoryobserve.LevelInfo,
Component: memoryobserve.ComponentWrite,
Operation: "vector_upsert",
Fields: map[string]any{
"trace_id": traceID,
"item_count": len(items),
"document_count": len(result.DocumentIDs),
"success": true,
},
})
}
// Delete 把一批记忆对应的向量从向量库中删除。
func (s *Syncer) Delete(ctx context.Context, traceID string, memoryIDs []int64) {
if s == nil || len(memoryIDs) == 0 {
return
}
if s.ragRuntime == nil || s.itemRepo == nil {
return
}
documentIDs := make([]string, 0, len(memoryIDs))
for _, id := range memoryIDs {
documentIDs = append(documentIDs, fmt.Sprintf("memory:%d", id))
}
err := s.ragRuntime.DeleteMemory(memoryobserve.WithFields(ctx, map[string]any{
"trace_id": traceID,
}), documentIDs)
if err != nil {
s.observer.Observe(ctx, memoryobserve.Event{
Level: memoryobserve.LevelWarn,
Component: memoryobserve.ComponentWrite,
Operation: "vector_delete",
Fields: map[string]any{
"trace_id": traceID,
"item_count": len(memoryIDs),
"success": false,
"error": err,
"error_code": memoryobserve.ClassifyError(err),
},
})
for _, memoryID := range memoryIDs {
_ = s.itemRepo.UpdateVectorStateByID(ctx, memoryID, "failed", nil)
}
return
}
for _, memoryID := range memoryIDs {
_ = s.itemRepo.UpdateVectorStateByID(ctx, memoryID, "deleted", nil)
}
s.observer.Observe(ctx, memoryobserve.Event{
Level: memoryobserve.LevelInfo,
Component: memoryobserve.ComponentWrite,
Operation: "vector_delete",
Fields: map[string]any{
"trace_id": traceID,
"item_count": len(memoryIDs),
"success": true,
},
})
}
func parseMemoryID(documentID string) int64 {
documentID = strings.TrimSpace(documentID)
if !strings.HasPrefix(documentID, "memory:") {
return 0
}
raw := strings.TrimPrefix(documentID, "memory:")
if strings.HasPrefix(raw, "uid:") {
return 0
}
var value int64
for _, ch := range raw {
if ch < '0' || ch > '9' {
return 0
}
value = value*10 + int64(ch-'0')
}
return value
}
func strPtrOrNil(v string) *string {
v = strings.TrimSpace(v)
if v == "" {
return nil
}
value := v
return &value
}
func strValue(v *string) string {
if v == nil {
return ""
}
return strings.TrimSpace(*v)
}