Version: 0.9.65.dev.260503
后端: 1. 阶段 1.5/1.6 收口 llm-service / rag-service,统一模型出口与检索基础设施入口,清退 backend/infra/llm 与 backend/infra/rag 旧实现; 2. 同步更新相关调用链与微服务迁移计划文档
This commit is contained in:
17
backend/services/rag/core/errors.go
Normal file
17
backend/services/rag/core/errors.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package core
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrInvalidQuery 表示检索请求缺少有效 query。
|
||||
ErrInvalidQuery = errors.New("invalid query")
|
||||
// ErrInvalidTopK 表示 topK 非法。
|
||||
ErrInvalidTopK = errors.New("invalid top_k")
|
||||
// ErrNilDependency 表示 pipeline 关键依赖未注入。
|
||||
ErrNilDependency = errors.New("nil dependency")
|
||||
)
|
||||
|
||||
const (
|
||||
// FallbackReasonRerankFailed 表示 rerank 失败后降级。
|
||||
FallbackReasonRerankFailed = "RERANK_FAILED"
|
||||
)
|
||||
38
backend/services/rag/core/interfaces.go
Normal file
38
backend/services/rag/core/interfaces.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package core
|
||||
|
||||
import "context"
|
||||
|
||||
// Chunker 负责文本切块。
|
||||
type Chunker interface {
|
||||
Chunk(ctx context.Context, doc SourceDocument, opt ChunkOption) ([]Chunk, error)
|
||||
}
|
||||
|
||||
// Embedder 负责向量化。
|
||||
type Embedder interface {
|
||||
Embed(ctx context.Context, texts []string, action string) ([][]float32, error)
|
||||
}
|
||||
|
||||
// Retriever 负责召回候选。
|
||||
type Retriever interface {
|
||||
Retrieve(ctx context.Context, req RetrieveRequest) ([]ScoredChunk, error)
|
||||
}
|
||||
|
||||
// Reranker 负责重排候选。
|
||||
type Reranker interface {
|
||||
Rerank(ctx context.Context, query string, candidates []ScoredChunk, topK int) ([]ScoredChunk, error)
|
||||
}
|
||||
|
||||
// VectorStore 负责向量库读写。
|
||||
type VectorStore interface {
|
||||
Upsert(ctx context.Context, rows []VectorRow) error
|
||||
Search(ctx context.Context, req VectorSearchRequest) ([]ScoredVectorRow, error)
|
||||
Delete(ctx context.Context, ids []string) error
|
||||
Get(ctx context.Context, ids []string) ([]VectorRow, error)
|
||||
}
|
||||
|
||||
// CorpusAdapter 负责把业务语料映射成统一文档/过滤条件。
|
||||
type CorpusAdapter interface {
|
||||
Name() string
|
||||
BuildIngestDocuments(ctx context.Context, input any) ([]SourceDocument, error)
|
||||
BuildRetrieveFilter(ctx context.Context, req any) (map[string]any, error)
|
||||
}
|
||||
190
backend/services/rag/core/observer.go
Normal file
190
backend/services/rag/core/observer.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ObserveLevel 表示观测事件等级。
|
||||
type ObserveLevel string
|
||||
|
||||
const (
|
||||
ObserveLevelInfo ObserveLevel = "info"
|
||||
ObserveLevelWarn ObserveLevel = "warn"
|
||||
ObserveLevelError ObserveLevel = "error"
|
||||
)
|
||||
|
||||
// ObserveEvent 描述一次统一观测事件。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只承载 RAG service 的结构化运行信息;
|
||||
// 2. 不绑定具体日志系统、指标系统或 tracing 实现;
|
||||
// 3. 字段内容应尽量稳定,便于后续统一接入全局观测平台。
|
||||
type ObserveEvent struct {
|
||||
Level ObserveLevel
|
||||
Component string
|
||||
Operation string
|
||||
Fields map[string]any
|
||||
}
|
||||
|
||||
// Observer 是 RAG service 的最小观测接口。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责消费结构化事件;
|
||||
// 2. 不负责决定业务逻辑是否继续执行;
|
||||
// 3. 任一实现都不应反向影响主链路稳定性。
|
||||
type Observer interface {
|
||||
Observe(ctx context.Context, event ObserveEvent)
|
||||
}
|
||||
|
||||
// ObserverFunc 允许用函数快速适配 Observer。
|
||||
type ObserverFunc func(ctx context.Context, event ObserveEvent)
|
||||
|
||||
func (f ObserverFunc) Observe(ctx context.Context, event ObserveEvent) {
|
||||
if f == nil {
|
||||
return
|
||||
}
|
||||
f(ctx, event)
|
||||
}
|
||||
|
||||
// NewNopObserver 返回空实现,适合在未接入统一观测平台时兜底。
|
||||
func NewNopObserver() Observer {
|
||||
return ObserverFunc(func(context.Context, ObserveEvent) {})
|
||||
}
|
||||
|
||||
// NewLoggerObserver 返回标准日志适配器。
|
||||
//
|
||||
// 说明:
|
||||
// 1. 当前项目尚未建立统一日志平台时,先把结构化字段稳定打印出来;
|
||||
// 2. 后续若项目引入统一 logger/metrics/tracing,只需替换该 Observer 注入实现;
|
||||
// 3. 该适配器默认保持单行输出,减少和现有日志风格的割裂感。
|
||||
func NewLoggerObserver(logger *log.Logger) Observer {
|
||||
if logger == nil {
|
||||
logger = log.Default()
|
||||
}
|
||||
return &loggerObserver{logger: logger}
|
||||
}
|
||||
|
||||
type loggerObserver struct {
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func (o *loggerObserver) Observe(ctx context.Context, event ObserveEvent) {
|
||||
if o == nil || o.logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
level := strings.TrimSpace(string(event.Level))
|
||||
if level == "" {
|
||||
level = string(ObserveLevelInfo)
|
||||
}
|
||||
component := strings.TrimSpace(event.Component)
|
||||
if component == "" {
|
||||
component = "unknown"
|
||||
}
|
||||
operation := strings.TrimSpace(event.Operation)
|
||||
if operation == "" {
|
||||
operation = "unknown"
|
||||
}
|
||||
|
||||
fields := ObserveFieldsFromContext(ctx)
|
||||
for key, value := range event.Fields {
|
||||
key = strings.TrimSpace(key)
|
||||
if key == "" || !shouldKeepObserveField(value) {
|
||||
continue
|
||||
}
|
||||
fields[key] = value
|
||||
}
|
||||
|
||||
parts := []string{
|
||||
"rag",
|
||||
fmt.Sprintf("level=%s", level),
|
||||
fmt.Sprintf("component=%s", component),
|
||||
fmt.Sprintf("operation=%s", operation),
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(fields))
|
||||
for key := range fields {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
parts = append(parts, fmt.Sprintf("%s=%v", key, fields[key]))
|
||||
}
|
||||
|
||||
o.logger.Print(strings.Join(parts, " "))
|
||||
}
|
||||
|
||||
type observeFieldsContextKey struct{}
|
||||
|
||||
// WithObserveFields 把通用观测字段挂入上下文,便于下游组件复用。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先读取已有上下文字段,保证 Runtime / Pipeline / Store 能逐层补充信息;
|
||||
// 2. 后写字段覆盖同名旧值,确保下游拿到的是最新语义;
|
||||
// 3. 仅保存“有意义”的字段,避免日志长期堆积大量空值。
|
||||
func WithObserveFields(ctx context.Context, fields map[string]any) context.Context {
|
||||
if len(fields) == 0 {
|
||||
return ctx
|
||||
}
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
merged := ObserveFieldsFromContext(ctx)
|
||||
for key, value := range fields {
|
||||
key = strings.TrimSpace(key)
|
||||
if key == "" || !shouldKeepObserveField(value) {
|
||||
continue
|
||||
}
|
||||
merged[key] = value
|
||||
}
|
||||
if len(merged) == 0 {
|
||||
return ctx
|
||||
}
|
||||
return context.WithValue(ctx, observeFieldsContextKey{}, merged)
|
||||
}
|
||||
|
||||
// ObserveFieldsFromContext 提取上下文中已经累积的观测字段。
|
||||
func ObserveFieldsFromContext(ctx context.Context) map[string]any {
|
||||
if ctx == nil {
|
||||
return map[string]any{}
|
||||
}
|
||||
raw, ok := ctx.Value(observeFieldsContextKey{}).(map[string]any)
|
||||
if !ok || len(raw) == 0 {
|
||||
return map[string]any{}
|
||||
}
|
||||
result := make(map[string]any, len(raw))
|
||||
for key, value := range raw {
|
||||
result[key] = value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ClassifyErrorCode 统一把常见错误压缩为稳定错误码,便于后续接入全局观测平台。
|
||||
func ClassifyErrorCode(err error) string {
|
||||
switch {
|
||||
case err == nil:
|
||||
return ""
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
return "DEADLINE_EXCEEDED"
|
||||
case errors.Is(err, context.Canceled):
|
||||
return "CANCELED"
|
||||
default:
|
||||
return "RAG_ERROR"
|
||||
}
|
||||
}
|
||||
|
||||
func shouldKeepObserveField(value any) bool {
|
||||
if value == nil {
|
||||
return false
|
||||
}
|
||||
if text, ok := value.(string); ok {
|
||||
return strings.TrimSpace(text) != ""
|
||||
}
|
||||
return true
|
||||
}
|
||||
366
backend/services/rag/core/pipeline.go
Normal file
366
backend/services/rag/core/pipeline.go
Normal file
@@ -0,0 +1,366 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTopK = 8
|
||||
defaultThreshold = 0
|
||||
defaultChunkSize = 400
|
||||
defaultChunkOvLap = 80
|
||||
)
|
||||
|
||||
// Pipeline 是 RAG Core 编排器。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责统一 chunk/embed/retrieve/rerank 流程;
|
||||
// 2. 负责失败降级语义;
|
||||
// 3. 不承载任何具体业务语义(由 CorpusAdapter 提供)。
|
||||
type Pipeline struct {
|
||||
chunker Chunker
|
||||
embedder Embedder
|
||||
store VectorStore
|
||||
reranker Reranker
|
||||
logger *log.Logger
|
||||
observer Observer
|
||||
}
|
||||
|
||||
func NewPipeline(chunker Chunker, embedder Embedder, store VectorStore, reranker Reranker) *Pipeline {
|
||||
return &Pipeline{
|
||||
chunker: chunker,
|
||||
embedder: embedder,
|
||||
store: store,
|
||||
reranker: reranker,
|
||||
logger: log.Default(),
|
||||
observer: NewNopObserver(),
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger 设置 Pipeline 使用的日志器。
|
||||
func (p *Pipeline) SetLogger(logger *log.Logger) {
|
||||
if p == nil || logger == nil {
|
||||
return
|
||||
}
|
||||
p.logger = logger
|
||||
}
|
||||
|
||||
// SetObserver 设置 Pipeline 使用的统一观测器。
|
||||
func (p *Pipeline) SetObserver(observer Observer) {
|
||||
if p == nil || observer == nil {
|
||||
return
|
||||
}
|
||||
p.observer = observer
|
||||
}
|
||||
|
||||
// Ingest 执行统一入库流程。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先由 CorpusAdapter 生成统一文档,确保不同语料入口一致;
|
||||
// 2. 再统一切块与向量化,避免业务侧重复实现;
|
||||
// 3. 最后一次性 Upsert,失败直接返回,交由上层决定是否重试。
|
||||
func (p *Pipeline) Ingest(
|
||||
ctx context.Context,
|
||||
corpus CorpusAdapter,
|
||||
input any,
|
||||
opt IngestOption,
|
||||
) (result *IngestResult, err error) {
|
||||
defer p.recoverExecutionPanic(ctx, "ingest", &err)
|
||||
|
||||
if p == nil || p.chunker == nil || p.embedder == nil || p.store == nil {
|
||||
return nil, ErrNilDependency
|
||||
}
|
||||
if corpus == nil {
|
||||
return nil, errors.New("nil corpus adapter")
|
||||
}
|
||||
|
||||
docs, err := corpus.BuildIngestDocuments(ctx, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.IngestDocuments(ctx, corpus.Name(), docs, opt)
|
||||
}
|
||||
|
||||
// IngestDocuments 执行“已标准化文档”的统一入库流程。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责处理已经完成 CorpusAdapter 映射的标准文档;
|
||||
// 2. 负责统一切块、向量化与 Upsert;
|
||||
// 3. 不负责再做业务输入解析,避免 Runtime 为拿到 document_id 重复 build 文档。
|
||||
func (p *Pipeline) IngestDocuments(
|
||||
ctx context.Context,
|
||||
corpusName string,
|
||||
docs []SourceDocument,
|
||||
opt IngestOption,
|
||||
) (result *IngestResult, err error) {
|
||||
defer p.recoverExecutionPanic(ctx, "ingest_documents", &err)
|
||||
|
||||
if p == nil || p.chunker == nil || p.embedder == nil || p.store == nil {
|
||||
return nil, ErrNilDependency
|
||||
}
|
||||
if len(docs) == 0 {
|
||||
return &IngestResult{DocumentCount: 0, ChunkCount: 0}, nil
|
||||
}
|
||||
|
||||
chunkOpt := normalizeChunkOption(opt.Chunk)
|
||||
chunks := make([]Chunk, 0, len(docs)*2)
|
||||
for _, doc := range docs {
|
||||
// 1. 对每个文档独立切块,失败直接中断,避免写入半成品。
|
||||
docChunks, chunkErr := p.chunker.Chunk(ctx, doc, chunkOpt)
|
||||
if chunkErr != nil {
|
||||
return nil, chunkErr
|
||||
}
|
||||
chunks = append(chunks, docChunks...)
|
||||
}
|
||||
if len(chunks) == 0 {
|
||||
return &IngestResult{DocumentCount: len(docs), ChunkCount: 0}, nil
|
||||
}
|
||||
|
||||
texts := make([]string, 0, len(chunks))
|
||||
for _, chunk := range chunks {
|
||||
texts = append(texts, chunk.Text)
|
||||
}
|
||||
|
||||
action := strings.TrimSpace(opt.Action)
|
||||
if action == "" {
|
||||
action = "add"
|
||||
}
|
||||
vectors, err := p.embedder.Embed(ctx, texts, action)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vectors) != len(chunks) {
|
||||
return nil, fmt.Errorf("embedding result length mismatch: chunks=%d vectors=%d", len(chunks), len(vectors))
|
||||
}
|
||||
|
||||
rows := make([]VectorRow, 0, len(chunks))
|
||||
now := time.Now()
|
||||
for i, chunk := range chunks {
|
||||
metadata := cloneMap(chunk.Metadata)
|
||||
metadata["corpus"] = corpusName
|
||||
metadata["document_id"] = chunk.DocumentID
|
||||
metadata["chunk_order"] = chunk.Order
|
||||
rows = append(rows, VectorRow{
|
||||
ID: chunk.ID,
|
||||
Vector: vectors[i],
|
||||
Text: chunk.Text,
|
||||
Metadata: metadata,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
})
|
||||
}
|
||||
|
||||
if err = p.store.Upsert(ctx, rows); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &IngestResult{
|
||||
DocumentCount: len(docs),
|
||||
ChunkCount: len(chunks),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Retrieve 执行统一检索流程。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先做 query 向量化与向量检索;
|
||||
// 2. 再执行阈值过滤,减少低质量候选;
|
||||
// 3. 最后可选 rerank,若失败则降级回原排序并打日志。
|
||||
func (p *Pipeline) Retrieve(
|
||||
ctx context.Context,
|
||||
corpus CorpusAdapter,
|
||||
req RetrieveRequest,
|
||||
) (result *RetrieveResult, err error) {
|
||||
defer p.recoverExecutionPanic(ctx, "retrieve", &err)
|
||||
|
||||
if p == nil || p.embedder == nil || p.store == nil {
|
||||
return nil, ErrNilDependency
|
||||
}
|
||||
query := strings.TrimSpace(req.Query)
|
||||
if query == "" {
|
||||
return nil, ErrInvalidQuery
|
||||
}
|
||||
|
||||
topK := req.TopK
|
||||
if topK <= 0 {
|
||||
topK = defaultTopK
|
||||
}
|
||||
threshold := req.Threshold
|
||||
if threshold < 0 {
|
||||
threshold = defaultThreshold
|
||||
}
|
||||
|
||||
filter := cloneMap(req.Filter)
|
||||
if corpus != nil {
|
||||
// 1. 先拼接 corpus 过滤条件,避免跨语料串召回。
|
||||
corpusFilter, err := corpus.BuildRetrieveFilter(ctx, req.CorpusInput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filter = mergeMap(filter, corpusFilter)
|
||||
filter["corpus"] = corpus.Name()
|
||||
}
|
||||
|
||||
action := strings.TrimSpace(req.Action)
|
||||
if action == "" {
|
||||
action = "search"
|
||||
}
|
||||
|
||||
vectors, err := p.embedder.Embed(ctx, []string{query}, action)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vectors) != 1 {
|
||||
return nil, fmt.Errorf("embedding query length mismatch: %d", len(vectors))
|
||||
}
|
||||
|
||||
scoredRows, err := p.store.Search(ctx, VectorSearchRequest{
|
||||
QueryVector: vectors[0],
|
||||
TopK: topK,
|
||||
Filter: filter,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawCount := len(scoredRows)
|
||||
candidates := make([]ScoredChunk, 0, len(scoredRows))
|
||||
for _, row := range scoredRows {
|
||||
if row.Score < threshold {
|
||||
continue
|
||||
}
|
||||
candidates = append(candidates, ScoredChunk{
|
||||
ChunkID: row.Row.ID,
|
||||
DocumentID: asString(row.Row.Metadata["document_id"]),
|
||||
Text: row.Row.Text,
|
||||
Score: row.Score,
|
||||
Metadata: cloneMap(row.Row.Metadata),
|
||||
})
|
||||
}
|
||||
|
||||
result = &RetrieveResult{
|
||||
Items: candidates,
|
||||
RawCount: rawCount,
|
||||
FallbackUsed: false,
|
||||
}
|
||||
if len(candidates) == 0 || p.reranker == nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
reranked, rerankErr := p.reranker.Rerank(ctx, query, candidates, topK)
|
||||
if rerankErr != nil {
|
||||
// 2. rerank 异常不终止主流程,统一降级为原排序。
|
||||
result.FallbackUsed = true
|
||||
result.FallbackReason = FallbackReasonRerankFailed
|
||||
if p.observer != nil {
|
||||
p.observer.Observe(ctx, ObserveEvent{
|
||||
Level: ObserveLevelWarn,
|
||||
Component: "pipeline",
|
||||
Operation: "rerank_fallback",
|
||||
Fields: map[string]any{
|
||||
"status": "fallback",
|
||||
"fallback_reason": FallbackReasonRerankFailed,
|
||||
"candidate_count": len(candidates),
|
||||
"top_k": topK,
|
||||
"error": rerankErr,
|
||||
"error_code": ClassifyErrorCode(rerankErr),
|
||||
},
|
||||
})
|
||||
} else if p.logger != nil {
|
||||
p.logger.Printf("rag rerank fallback: reason=%s err=%v", FallbackReasonRerankFailed, rerankErr)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
result.Items = reranked
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Delete 删除指定 ID 的向量。
|
||||
func (p *Pipeline) Delete(ctx context.Context, ids []string) error {
|
||||
if p == nil || p.store == nil {
|
||||
return nil
|
||||
}
|
||||
return p.store.Delete(ctx, ids)
|
||||
}
|
||||
|
||||
func (p *Pipeline) recoverExecutionPanic(ctx context.Context, operation string, errPtr *error) {
|
||||
recovered := recover()
|
||||
if recovered == nil || errPtr == nil {
|
||||
return
|
||||
}
|
||||
|
||||
panicErr := fmt.Errorf("rag pipeline panic recovered: operation=%s panic=%v", operation, recovered)
|
||||
*errPtr = panicErr
|
||||
|
||||
// 1. Pipeline 是 chunk/embed/store/rerank 的统一编排边界,第三方依赖异常不应直接杀掉上层请求。
|
||||
// 2. 这里统一 recover 后继续走 error 语义,让 runtime/service 决定降级、回退或记日志。
|
||||
// 3. stack 只写观测层,不塞进返回值,避免把超长堆栈直接暴露给上层业务错误文案。
|
||||
if p != nil && p.observer != nil {
|
||||
p.observer.Observe(ctx, ObserveEvent{
|
||||
Level: ObserveLevelError,
|
||||
Component: "pipeline",
|
||||
Operation: operation + "_panic_recovered",
|
||||
Fields: map[string]any{
|
||||
"status": "failed",
|
||||
"panic": fmt.Sprintf("%v", recovered),
|
||||
"panic_type": fmt.Sprintf("%T", recovered),
|
||||
"error": panicErr,
|
||||
"error_code": ClassifyErrorCode(panicErr),
|
||||
"stack": string(debug.Stack()),
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
if p != nil && p.logger != nil {
|
||||
p.logger.Printf("rag pipeline panic recovered: operation=%s panic=%v stack=%s", operation, recovered, string(debug.Stack()))
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeChunkOption(opt ChunkOption) ChunkOption {
|
||||
if opt.ChunkSize <= 0 {
|
||||
opt.ChunkSize = defaultChunkSize
|
||||
}
|
||||
if opt.ChunkOverlap < 0 {
|
||||
opt.ChunkOverlap = 0
|
||||
}
|
||||
if opt.ChunkOverlap >= opt.ChunkSize {
|
||||
opt.ChunkOverlap = defaultChunkOvLap
|
||||
if opt.ChunkOverlap >= opt.ChunkSize {
|
||||
opt.ChunkOverlap = opt.ChunkSize / 5
|
||||
}
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
||||
func cloneMap(src map[string]any) map[string]any {
|
||||
if len(src) == 0 {
|
||||
return map[string]any{}
|
||||
}
|
||||
dst := make(map[string]any, len(src))
|
||||
for key, value := range src {
|
||||
dst[key] = value
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func mergeMap(base map[string]any, ext map[string]any) map[string]any {
|
||||
if base == nil {
|
||||
base = map[string]any{}
|
||||
}
|
||||
for key, value := range ext {
|
||||
base[key] = value
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
func asString(v any) string {
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
94
backend/services/rag/core/types.go
Normal file
94
backend/services/rag/core/types.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package core
|
||||
|
||||
import "time"
|
||||
|
||||
// SourceDocument 是统一语料文档模型。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只描述“可被切块与索引”的原始文档;
|
||||
// 2. 不承载业务流程状态。
|
||||
type SourceDocument struct {
|
||||
ID string
|
||||
Text string
|
||||
Title string
|
||||
Metadata map[string]any
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// Chunk 是标准切块结果。
|
||||
type Chunk struct {
|
||||
ID string
|
||||
DocumentID string
|
||||
Text string
|
||||
Order int
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// ChunkOption 控制切块参数。
|
||||
type ChunkOption struct {
|
||||
ChunkSize int
|
||||
ChunkOverlap int
|
||||
}
|
||||
|
||||
// IngestOption 控制入库参数。
|
||||
type IngestOption struct {
|
||||
Chunk ChunkOption
|
||||
// Action 用于 embedding 分型(add/update/search)。
|
||||
Action string
|
||||
}
|
||||
|
||||
// IngestResult 描述一次入库执行摘要。
|
||||
type IngestResult struct {
|
||||
DocumentCount int
|
||||
ChunkCount int
|
||||
}
|
||||
|
||||
// RetrieveRequest 是统一检索请求。
|
||||
type RetrieveRequest struct {
|
||||
Query string
|
||||
TopK int
|
||||
Threshold float64
|
||||
Action string
|
||||
Filter map[string]any
|
||||
CorpusInput any
|
||||
}
|
||||
|
||||
// ScoredChunk 是统一召回结果。
|
||||
type ScoredChunk struct {
|
||||
ChunkID string
|
||||
DocumentID string
|
||||
Text string
|
||||
Score float64
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// RetrieveResult 是检索链路执行摘要。
|
||||
type RetrieveResult struct {
|
||||
Items []ScoredChunk
|
||||
RawCount int
|
||||
FallbackUsed bool
|
||||
FallbackReason string
|
||||
}
|
||||
|
||||
// VectorRow 是向量存储标准行。
|
||||
type VectorRow struct {
|
||||
ID string
|
||||
Vector []float32
|
||||
Text string
|
||||
Metadata map[string]any
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// VectorSearchRequest 是向量检索请求。
|
||||
type VectorSearchRequest struct {
|
||||
QueryVector []float32
|
||||
TopK int
|
||||
Filter map[string]any
|
||||
}
|
||||
|
||||
// ScoredVectorRow 是向量检索结果。
|
||||
type ScoredVectorRow struct {
|
||||
Row VectorRow
|
||||
Score float64
|
||||
}
|
||||
Reference in New Issue
Block a user