Version: 0.9.14.dev.260410

后端:
  1. LLM 客户端从 newAgent/llm 提升为 infra/llm 基础设施层
     - 删除 backend/newAgent/llm/(ark.go / ark_adapter.go / client.go / json.go)
     - 等价迁移至 backend/infra/llm/,所有 newAgent node 与 service 统一改引用 infrallm
     - 消除 newAgent 对模型客户端的私有依赖,为 memory / websearch 等多模块复用铺路
  2. RAG 基础设施完成可运行态接入(factory / runtime / observer / service 四层成型)
     - 新建 backend/infra/rag/factory.go / runtime.go / observe.go / observer.go /
  service.go:工厂创建、运行时生命周期、轻量观测接口、检索服务门面
     - 更新 infra/rag/config/config.go:补齐 Milvus / Embed / Reranker 全部配置项与默认值
     - 更新 infra/rag/embed/eino_embedder.go:增强 Eino embedding 适配,支持 BaseURL / APIKey 环境变量 / 超时 /
  维度等参数
     - 更新 infra/rag/store/milvus_store.go:完整实现 Milvus 向量存储(建集合 / 建 Index / Upsert / Search /
  Delete),支持 COSINE / L2 / IP 度量
     - 更新 infra/rag/core/pipeline.go:适配 Runtime 接口,Pipeline 由 factory 注入而非手动拼装
     - 更新 infra/rag/corpus/memory_corpus.go / vector_store.go:对接 Memory 模块数据源与 Store 接口扩展
  3. Memory 模块从 Day1 骨架升级为 Day2 完整可运行态
     - 新建 memory/module.go:统一门面 Module,对外封装 EnqueueExtract / ReadService / ManageService / WithTx /
  StartWorker,启动层只依赖这一个入口
     - 新建 memory/orchestrator/llm_write_orchestrator.go:LLM 驱动的记忆抽取编排器,替代原 mock 抽取
     - 新建 memory/service/read_service.go:按用户开关过滤 + 轻量重排 + 访问时间刷新的读取链路
     - 新建 memory/service/manage_service.go:记忆管理面能力(列出 / 软删除 / 开关读写),删除同步写审计日志
     - 新建 memory/service/common.go:服务层公共工具
     - 新建 memory/worker/loop.go:后台轮询循环 RunPollingLoop,定时抢占 pending 任务并推进
     - 新建 memory/utils/audit.go / settings.go:审计日志构造、用户设置过滤等纯函数
     - 更新 memory/model/item.go / job.go / settings.go / config.go / status.go:补齐 DTO 字段与状态常量
     - 更新 memory/repo/item_repo.go / job_repo.go / audit_repo.go / settings_repo.go:补齐 CRUD 与查询能力
     - 更新 memory/worker/runner.go:Runner 对接 Module 与 LLM 抽取器,任务状态机完整化
     - 更新 memory/README.md:同步模块现状说明
  4. newAgent 接入 Memory 读取注入与工具注册依赖预埋
     - 新建 service/agentsvc/agent_memory.go:定义 MemoryReader 接口 + injectMemoryContext,在 graph
  执行前统一补充记忆上下文
     - 更新 service/agentsvc/agent.go:新增 memoryReader 字段与 SetMemoryReader 方法
     - 更新 service/agentsvc/agent_newagent.go:调用 injectMemoryContext 注入 pinned block,检索失败仅降级不阻断主链路
     - 更新 newAgent/tools/registry.go:新增 DefaultRegistryDeps(含 RAGRuntime),工具注册表支持依赖注入
  5. 启动流程与事件处理器接线更新
     - 更新 cmd/start.go:初始化 RAG Runtime → Memory Module → 注册事件处理器 → 启动 Worker 后台轮询
     - 更新 service/events/memory_extract_requested.go:改用 memory.Module.WithTx(tx) 统一门面,事件处理器不再直接依赖
  repo/service 内部包
  6. 缓存插件与配置同步
     - 更新 middleware/cache_deleter.go:静默忽略 MemoryJob / MemoryItem / MemoryAuditLog / MemoryUserSetting
  等新模型,避免日志刷屏;清理冗余注释
     - 更新 config.example.yaml:补齐 rag / memory / websearch 配置段及默认值
     - 更新 go.mod / go.sum:新增 eino-ext/openai / json-patch / go-openai 依赖
  前端:无 仓库:无
This commit is contained in:
Losita
2026-04-10 23:17:38 +08:00
parent fae162162a
commit bf1f1defa5
53 changed files with 5875 additions and 231 deletions

View File

@@ -9,7 +9,7 @@ import (
"github.com/cloudwego/eino/schema"
newagentllm "github.com/LoveLosita/smartflow/backend/newAgent/llm"
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
@@ -46,7 +46,7 @@ type ChatNodeInput struct {
ConversationContext *newagentmodel.ConversationContext
UserInput string
ConfirmAction string
Client *newagentllm.Client
Client *infrallm.Client
ChunkEmitter *newagentstream.ChunkEmitter
}
@@ -91,14 +91,14 @@ func RunChatNode(ctx context.Context, input ChatNodeInput) error {
}
messages := newagentprompt.BuildChatRoutingMessages(conversationContext, input.UserInput, flowState)
decision, rawResult, err := newagentllm.GenerateJSON[newagentmodel.ChatRoutingDecision](
decision, rawResult, err := infrallm.GenerateJSON[newagentmodel.ChatRoutingDecision](
ctx,
input.Client,
messages,
newagentllm.GenerateOptions{
infrallm.GenerateOptions{
Temperature: 0.1,
MaxTokens: 500,
Thinking: newagentllm.ThinkingModeDisabled,
Thinking: infrallm.ThinkingModeDisabled,
Metadata: map[string]any{
"stage": chatStageName,
"phase": "routing",
@@ -412,10 +412,10 @@ func handleDeepAnswer(
// 2. 第二次 LLM 调用:开 thinking深度回答。
deepMessages := newagentprompt.BuildDeepAnswerMessages(conversationContext, input.UserInput)
deepResult, err := input.Client.GenerateText(ctx, deepMessages, newagentllm.GenerateOptions{
deepResult, err := input.Client.GenerateText(ctx, deepMessages, infrallm.GenerateOptions{
Temperature: 0.5,
MaxTokens: 2000,
Thinking: newagentllm.ThinkingModeEnabled,
Thinking: infrallm.ThinkingModeEnabled,
Metadata: map[string]any{
"stage": chatStageName,
"phase": "deep_answer",

View File

@@ -8,7 +8,7 @@ import (
"github.com/cloudwego/eino/schema"
newagentllm "github.com/LoveLosita/smartflow/backend/newAgent/llm"
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
@@ -30,7 +30,7 @@ const (
type DeliverNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
Client *newagentllm.Client
Client *infrallm.Client
ChunkEmitter *newagentstream.ChunkEmitter
}
@@ -95,7 +95,7 @@ func RunDeliverNode(ctx context.Context, input DeliverNodeInput) error {
// generateDeliverSummary 尝试调用 LLM 生成交付总结,失败时降级到机械格式化。
func generateDeliverSummary(
ctx context.Context,
client *newagentllm.Client,
client *infrallm.Client,
flowState *newagentmodel.CommonState,
conversationContext *newagentmodel.ConversationContext,
) string {
@@ -116,10 +116,10 @@ func generateDeliverSummary(
result, err := client.GenerateText(
ctx,
messages,
newagentllm.GenerateOptions{
infrallm.GenerateOptions{
Temperature: 0.5,
MaxTokens: 800,
Thinking: newagentllm.ThinkingModeDisabled,
Thinking: infrallm.ThinkingModeDisabled,
Metadata: map[string]any{
"stage": deliverStageName,
},

View File

@@ -10,7 +10,7 @@ import (
"strings"
"time"
newagentllm "github.com/LoveLosita/smartflow/backend/newAgent/llm"
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
@@ -48,7 +48,7 @@ type ExecuteNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
UserInput string
Client *newagentllm.Client
Client *infrallm.Client
ChunkEmitter *newagentstream.ChunkEmitter
ResumeNode string
ToolRegistry *newagenttools.ToolRegistry
@@ -188,14 +188,14 @@ func RunExecuteNode(ctx context.Context, input ExecuteNodeInput) error {
flowState.ConversationID,
flowState.RoundUsed,
)
decision, rawResult, err := newagentllm.GenerateJSON[newagentmodel.ExecuteDecision](
decision, rawResult, err := infrallm.GenerateJSON[newagentmodel.ExecuteDecision](
ctx,
input.Client,
messages,
newagentllm.GenerateOptions{
infrallm.GenerateOptions{
Temperature: 1.0, // thinking 模式强制要求 temperature=1
MaxTokens: 16000, // 需为 thinking chain 留出足够预算
Thinking: newagentllm.ThinkingModeEnabled,
Thinking: infrallm.ThinkingModeEnabled,
Metadata: map[string]any{
"stage": executeStageName,
"step_index": flowState.CurrentStep,

View File

@@ -8,7 +8,7 @@ import (
"github.com/google/uuid"
newagentllm "github.com/LoveLosita/smartflow/backend/newAgent/llm"
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
@@ -31,7 +31,7 @@ type PlanNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
UserInput string
Client *newagentllm.Client
Client *infrallm.Client
ChunkEmitter *newagentstream.ChunkEmitter
ResumeNode string
AlwaysExecute bool // true 时计划生成后自动确认,不进入 confirm 节点
@@ -70,14 +70,14 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
messages := newagentprompt.BuildPlanMessages(flowState, conversationContext, input.UserInput)
// 3. Phase 1快速评估开 thinking让 LLM 同时产出复杂度评估和规划结果。
decision, rawResult, err := newagentllm.GenerateJSON[newagentmodel.PlanDecision](
decision, rawResult, err := infrallm.GenerateJSON[newagentmodel.PlanDecision](
ctx,
input.Client,
messages,
newagentllm.GenerateOptions{
infrallm.GenerateOptions{
Temperature: 0.2,
MaxTokens: 1600,
Thinking: newagentllm.ThinkingModeEnabled,
Thinking: infrallm.ThinkingModeEnabled,
Metadata: map[string]any{
"stage": planStageName,
"phase": "assessment",
@@ -108,14 +108,14 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
return fmt.Errorf("深度规划状态推送失败: %w", err)
}
deepDecision, _, deepErr := newagentllm.GenerateJSON[newagentmodel.PlanDecision](
deepDecision, _, deepErr := infrallm.GenerateJSON[newagentmodel.PlanDecision](
ctx,
input.Client,
messages,
newagentllm.GenerateOptions{
infrallm.GenerateOptions{
Temperature: 0.2,
MaxTokens: 3200,
Thinking: newagentllm.ThinkingModeEnabled,
Thinking: infrallm.ThinkingModeEnabled,
Metadata: map[string]any{
"stage": planStageName,
"phase": "deep_planning",