后端:
1. LLM 客户端从 newAgent/llm 提升为 infra/llm 基础设施层
- 删除 backend/newAgent/llm/(ark.go / ark_adapter.go / client.go / json.go)
- 等价迁移至 backend/infra/llm/,所有 newAgent node 与 service 统一改引用 infrallm
- 消除 newAgent 对模型客户端的私有依赖,为 memory / websearch 等多模块复用铺路
2. RAG 基础设施完成可运行态接入(factory / runtime / observer / service 四层成型)
- 新建 backend/infra/rag/factory.go / runtime.go / observe.go / observer.go /
service.go:工厂创建、运行时生命周期、轻量观测接口、检索服务门面
- 更新 infra/rag/config/config.go:补齐 Milvus / Embed / Reranker 全部配置项与默认值
- 更新 infra/rag/embed/eino_embedder.go:增强 Eino embedding 适配,支持 BaseURL / APIKey 环境变量 / 超时 /
维度等参数
- 更新 infra/rag/store/milvus_store.go:完整实现 Milvus 向量存储(建集合 / 建 Index / Upsert / Search /
Delete),支持 COSINE / L2 / IP 度量
- 更新 infra/rag/core/pipeline.go:适配 Runtime 接口,Pipeline 由 factory 注入而非手动拼装
- 更新 infra/rag/corpus/memory_corpus.go / vector_store.go:对接 Memory 模块数据源与 Store 接口扩展
3. Memory 模块从 Day1 骨架升级为 Day2 完整可运行态
- 新建 memory/module.go:统一门面 Module,对外封装 EnqueueExtract / ReadService / ManageService / WithTx /
StartWorker,启动层只依赖这一个入口
- 新建 memory/orchestrator/llm_write_orchestrator.go:LLM 驱动的记忆抽取编排器,替代原 mock 抽取
- 新建 memory/service/read_service.go:按用户开关过滤 + 轻量重排 + 访问时间刷新的读取链路
- 新建 memory/service/manage_service.go:记忆管理面能力(列出 / 软删除 / 开关读写),删除同步写审计日志
- 新建 memory/service/common.go:服务层公共工具
- 新建 memory/worker/loop.go:后台轮询循环 RunPollingLoop,定时抢占 pending 任务并推进
- 新建 memory/utils/audit.go / settings.go:审计日志构造、用户设置过滤等纯函数
- 更新 memory/model/item.go / job.go / settings.go / config.go / status.go:补齐 DTO 字段与状态常量
- 更新 memory/repo/item_repo.go / job_repo.go / audit_repo.go / settings_repo.go:补齐 CRUD 与查询能力
- 更新 memory/worker/runner.go:Runner 对接 Module 与 LLM 抽取器,任务状态机完整化
- 更新 memory/README.md:同步模块现状说明
4. newAgent 接入 Memory 读取注入与工具注册依赖预埋
- 新建 service/agentsvc/agent_memory.go:定义 MemoryReader 接口 + injectMemoryContext,在 graph
执行前统一补充记忆上下文
- 更新 service/agentsvc/agent.go:新增 memoryReader 字段与 SetMemoryReader 方法
- 更新 service/agentsvc/agent_newagent.go:调用 injectMemoryContext 注入 pinned block,检索失败仅降级不阻断主链路
- 更新 newAgent/tools/registry.go:新增 DefaultRegistryDeps(含 RAGRuntime),工具注册表支持依赖注入
5. 启动流程与事件处理器接线更新
- 更新 cmd/start.go:初始化 RAG Runtime → Memory Module → 注册事件处理器 → 启动 Worker 后台轮询
- 更新 service/events/memory_extract_requested.go:改用 memory.Module.WithTx(tx) 统一门面,事件处理器不再直接依赖
repo/service 内部包
6. 缓存插件与配置同步
- 更新 middleware/cache_deleter.go:静默忽略 MemoryJob / MemoryItem / MemoryAuditLog / MemoryUserSetting
等新模型,避免日志刷屏;清理冗余注释
- 更新 config.example.yaml:补齐 rag / memory / websearch 配置段及默认值
- 更新 go.mod / go.sum:新增 eino-ext/openai / json-patch / go-openai 依赖
前端:无 仓库:无
348 lines
8.6 KiB
Go
348 lines
8.6 KiB
Go
package service
|
||
|
||
import (
|
||
"context"
|
||
"fmt"
|
||
"sort"
|
||
"strconv"
|
||
"strings"
|
||
"time"
|
||
|
||
infrarag "github.com/LoveLosita/smartflow/backend/infra/rag"
|
||
memorymodel "github.com/LoveLosita/smartflow/backend/memory/model"
|
||
memoryrepo "github.com/LoveLosita/smartflow/backend/memory/repo"
|
||
memoryutils "github.com/LoveLosita/smartflow/backend/memory/utils"
|
||
"github.com/LoveLosita/smartflow/backend/model"
|
||
)
|
||
|
||
const (
|
||
defaultRetrieveLimit = 5
|
||
maxRetrieveLimit = 20
|
||
)
|
||
|
||
// ReadService 负责 memory 模块内部的读取、门控与轻量重排。
|
||
//
|
||
// 职责边界:
|
||
// 1. 负责把 memory_items 读出来并做用户设置过滤;
|
||
// 2. 负责最小可用的排序与截断,为后续 prompt 注入提供稳定入口;
|
||
// 3. 不直接依赖 newAgent,不负责真正把记忆拼进 prompt。
|
||
type ReadService struct {
|
||
itemRepo *memoryrepo.ItemRepo
|
||
settingsRepo *memoryrepo.SettingsRepo
|
||
ragRuntime infrarag.Runtime
|
||
cfg memorymodel.Config
|
||
}
|
||
|
||
func NewReadService(
|
||
itemRepo *memoryrepo.ItemRepo,
|
||
settingsRepo *memoryrepo.SettingsRepo,
|
||
ragRuntime infrarag.Runtime,
|
||
cfg memorymodel.Config,
|
||
) *ReadService {
|
||
return &ReadService{
|
||
itemRepo: itemRepo,
|
||
settingsRepo: settingsRepo,
|
||
ragRuntime: ragRuntime,
|
||
cfg: cfg,
|
||
}
|
||
}
|
||
|
||
// Retrieve 读取可供后续注入使用的候选记忆。
|
||
func (s *ReadService) Retrieve(ctx context.Context, req memorymodel.RetrieveRequest) ([]memorymodel.ItemDTO, error) {
|
||
if s == nil || s.itemRepo == nil || s.settingsRepo == nil {
|
||
return nil, nil
|
||
}
|
||
if req.UserID <= 0 {
|
||
return nil, nil
|
||
}
|
||
|
||
now := req.Now
|
||
if now.IsZero() {
|
||
now = time.Now()
|
||
}
|
||
|
||
setting, err := s.settingsRepo.GetByUserID(ctx, req.UserID)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
effectiveSetting := memoryutils.EffectiveUserSetting(setting, req.UserID)
|
||
if !effectiveSetting.MemoryEnabled {
|
||
return nil, nil
|
||
}
|
||
|
||
limit := normalizeLimit(req.Limit, defaultRetrieveLimit, maxRetrieveLimit)
|
||
if s.cfg.RAGEnabled && s.ragRuntime != nil && strings.TrimSpace(req.Query) != "" {
|
||
items, ragErr := s.retrieveByRAG(ctx, req, effectiveSetting, limit, now)
|
||
if ragErr == nil && len(items) > 0 {
|
||
return items, nil
|
||
}
|
||
}
|
||
|
||
return s.retrieveByLegacy(ctx, req, limit, now, effectiveSetting)
|
||
}
|
||
|
||
func (s *ReadService) retrieveByLegacy(
|
||
ctx context.Context,
|
||
req memorymodel.RetrieveRequest,
|
||
limit int,
|
||
now time.Time,
|
||
effectiveSetting model.MemoryUserSetting,
|
||
) ([]memorymodel.ItemDTO, error) {
|
||
if !effectiveSetting.MemoryEnabled {
|
||
return nil, nil
|
||
}
|
||
query := memorymodel.ItemQuery{
|
||
UserID: req.UserID,
|
||
ConversationID: req.ConversationID,
|
||
AssistantID: req.AssistantID,
|
||
RunID: req.RunID,
|
||
Statuses: []string{model.MemoryItemStatusActive},
|
||
MemoryTypes: normalizeRetrieveMemoryTypes(req.MemoryTypes),
|
||
IncludeGlobal: true,
|
||
OnlyUnexpired: true,
|
||
Limit: normalizeLimit(limit*3, limit*3, maxRetrieveLimit*3),
|
||
Now: now,
|
||
}
|
||
|
||
items, err := s.itemRepo.FindByQuery(ctx, query)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
items = memoryutils.FilterItemsBySetting(items, effectiveSetting)
|
||
if len(items) == 0 {
|
||
return nil, nil
|
||
}
|
||
|
||
sort.SliceStable(items, func(i, j int) bool {
|
||
left := scoreRetrievedItem(items[i], now, req.ConversationID)
|
||
right := scoreRetrievedItem(items[j], now, req.ConversationID)
|
||
if left == right {
|
||
return items[i].ID > items[j].ID
|
||
}
|
||
return left > right
|
||
})
|
||
|
||
if len(items) > limit {
|
||
items = items[:limit]
|
||
}
|
||
_ = s.itemRepo.TouchLastAccessAt(ctx, collectMemoryIDs(items), now)
|
||
return toItemDTOs(items), nil
|
||
}
|
||
|
||
func (s *ReadService) retrieveByRAG(
|
||
ctx context.Context,
|
||
req memorymodel.RetrieveRequest,
|
||
effectiveSetting model.MemoryUserSetting,
|
||
limit int,
|
||
now time.Time,
|
||
) ([]memorymodel.ItemDTO, error) {
|
||
if !effectiveSetting.MemoryEnabled {
|
||
return nil, nil
|
||
}
|
||
|
||
result, err := s.ragRuntime.RetrieveMemory(ctx, infrarag.MemoryRetrieveRequest{
|
||
Query: req.Query,
|
||
TopK: limit,
|
||
Threshold: s.cfg.Threshold,
|
||
Action: "search",
|
||
UserID: req.UserID,
|
||
ConversationID: req.ConversationID,
|
||
AssistantID: req.AssistantID,
|
||
RunID: req.RunID,
|
||
MemoryTypes: normalizeRetrieveMemoryTypes(req.MemoryTypes),
|
||
})
|
||
if err != nil || result == nil || len(result.Items) == 0 {
|
||
return nil, err
|
||
}
|
||
|
||
items := make([]memorymodel.ItemDTO, 0, len(result.Items))
|
||
ids := make([]int64, 0, len(result.Items))
|
||
for _, hit := range result.Items {
|
||
dto, memoryID := buildMemoryDTOFromRetrieveHit(hit)
|
||
if !effectiveSetting.ImplicitMemoryEnabled && !dto.IsExplicit {
|
||
continue
|
||
}
|
||
if !effectiveSetting.SensitiveMemoryEnabled && dto.SensitivityLevel > 0 {
|
||
continue
|
||
}
|
||
if dto.ID <= 0 && memoryID > 0 {
|
||
dto.ID = memoryID
|
||
}
|
||
items = append(items, dto)
|
||
if dto.ID > 0 {
|
||
ids = append(ids, dto.ID)
|
||
}
|
||
}
|
||
if len(items) > limit {
|
||
items = items[:limit]
|
||
}
|
||
_ = s.itemRepo.TouchLastAccessAt(ctx, ids, now)
|
||
return items, nil
|
||
}
|
||
|
||
func normalizeRetrieveMemoryTypes(raw []string) []string {
|
||
normalized := normalizeMemoryTypes(raw)
|
||
if len(normalized) > 0 {
|
||
return normalized
|
||
}
|
||
return []string{
|
||
memorymodel.MemoryTypeConstraint,
|
||
memorymodel.MemoryTypePreference,
|
||
memorymodel.MemoryTypeTodoHint,
|
||
memorymodel.MemoryTypeFact,
|
||
}
|
||
}
|
||
|
||
func scoreRetrievedItem(item model.MemoryItem, now time.Time, conversationID string) float64 {
|
||
score := 0.35*clamp01(item.Importance) + 0.3*clamp01(item.Confidence) + 0.2*recencyScore(item, now)
|
||
if item.IsExplicit {
|
||
score += 0.1
|
||
}
|
||
if strValue(item.ConversationID) != "" && strValue(item.ConversationID) == conversationID {
|
||
score += 0.08
|
||
}
|
||
switch item.MemoryType {
|
||
case memorymodel.MemoryTypeConstraint:
|
||
score += 0.12
|
||
case memorymodel.MemoryTypePreference:
|
||
score += 0.08
|
||
case memorymodel.MemoryTypeTodoHint:
|
||
score += 0.05
|
||
}
|
||
return score
|
||
}
|
||
|
||
func recencyScore(item model.MemoryItem, now time.Time) float64 {
|
||
base := item.UpdatedAt
|
||
if base == nil {
|
||
base = item.CreatedAt
|
||
}
|
||
if base == nil || now.Before(*base) {
|
||
return 0.5
|
||
}
|
||
age := now.Sub(*base)
|
||
switch {
|
||
case age <= 24*time.Hour:
|
||
return 1
|
||
case age <= 7*24*time.Hour:
|
||
return 0.85
|
||
case age <= 30*24*time.Hour:
|
||
return 0.65
|
||
case age <= 90*24*time.Hour:
|
||
return 0.45
|
||
default:
|
||
return 0.25
|
||
}
|
||
}
|
||
|
||
func clamp01(v float64) float64 {
|
||
if v < 0 {
|
||
return 0
|
||
}
|
||
if v > 1 {
|
||
return 1
|
||
}
|
||
return v
|
||
}
|
||
|
||
func collectMemoryIDs(items []model.MemoryItem) []int64 {
|
||
if len(items) == 0 {
|
||
return nil
|
||
}
|
||
ids := make([]int64, 0, len(items))
|
||
for _, item := range items {
|
||
if item.ID <= 0 {
|
||
continue
|
||
}
|
||
ids = append(ids, item.ID)
|
||
}
|
||
return ids
|
||
}
|
||
|
||
func buildMemoryDTOFromRetrieveHit(hit infrarag.RetrieveHit) (memorymodel.ItemDTO, int64) {
|
||
memoryID := parseMemoryIDFromDocumentID(hit.DocumentID)
|
||
metadata := hit.Metadata
|
||
dto := memorymodel.ItemDTO{
|
||
ID: memoryID,
|
||
UserID: int(readFloatLike(metadata["user_id"])),
|
||
ConversationID: readString(metadata["conversation_id"]),
|
||
AssistantID: readString(metadata["assistant_id"]),
|
||
RunID: readString(metadata["run_id"]),
|
||
MemoryType: readString(metadata["memory_type"]),
|
||
Title: readString(metadata["title"]),
|
||
Content: strings.TrimSpace(hit.Text),
|
||
Confidence: readFloatLike(metadata["confidence"]),
|
||
Importance: readFloatLike(metadata["importance"]),
|
||
SensitivityLevel: int(readFloatLike(metadata["sensitivity_level"])),
|
||
IsExplicit: readBoolLike(metadata["is_explicit"]),
|
||
Status: readString(metadata["status"]),
|
||
TTLAt: readTimeLike(metadata["ttl_at"]),
|
||
}
|
||
return dto, memoryID
|
||
}
|
||
|
||
func parseMemoryIDFromDocumentID(documentID string) int64 {
|
||
documentID = strings.TrimSpace(documentID)
|
||
if !strings.HasPrefix(documentID, "memory:") {
|
||
return 0
|
||
}
|
||
raw := strings.TrimPrefix(documentID, "memory:")
|
||
if strings.HasPrefix(raw, "uid:") {
|
||
return 0
|
||
}
|
||
parsed, err := strconv.ParseInt(raw, 10, 64)
|
||
if err != nil {
|
||
return 0
|
||
}
|
||
return parsed
|
||
}
|
||
|
||
func readString(v any) string {
|
||
if v == nil {
|
||
return ""
|
||
}
|
||
return strings.TrimSpace(fmt.Sprintf("%v", v))
|
||
}
|
||
|
||
func readFloatLike(v any) float64 {
|
||
switch value := v.(type) {
|
||
case float64:
|
||
return value
|
||
case float32:
|
||
return float64(value)
|
||
case int:
|
||
return float64(value)
|
||
case int64:
|
||
return float64(value)
|
||
case string:
|
||
parsed, err := strconv.ParseFloat(strings.TrimSpace(value), 64)
|
||
if err == nil {
|
||
return parsed
|
||
}
|
||
}
|
||
return 0
|
||
}
|
||
|
||
func readBoolLike(v any) bool {
|
||
switch value := v.(type) {
|
||
case bool:
|
||
return value
|
||
case string:
|
||
return strings.EqualFold(strings.TrimSpace(value), "true")
|
||
default:
|
||
return false
|
||
}
|
||
}
|
||
|
||
func readTimeLike(v any) *time.Time {
|
||
text := readString(v)
|
||
if text == "" {
|
||
return nil
|
||
}
|
||
parsed, err := time.Parse(time.RFC3339, text)
|
||
if err != nil {
|
||
return nil
|
||
}
|
||
return &parsed
|
||
}
|