Version: 0.9.65.dev.260503

后端:
1. 阶段 1.5/1.6
收口 llm-service / rag-service,统一模型出口与检索基础设施入口,清退 backend/infra/llm 与 backend/infra/rag 旧实现;
2. 同步更新相关调用链与微服务迁移计划文档
This commit is contained in:
Losita
2026-05-03 23:21:03 +08:00
parent a6c1e5d077
commit 9902ca3563
65 changed files with 550 additions and 376 deletions

View File

@@ -8,11 +8,11 @@ import (
"log"
"strings"
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentrouter "github.com/LoveLosita/smartflow/backend/newAgent/router"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
)
@@ -38,7 +38,7 @@ func collectExecuteDecisionFromLLM(
reader, err := input.Client.Stream(
ctx,
messages,
infrallm.GenerateOptions{
llmservice.GenerateOptions{
Temperature: 1.0,
MaxTokens: 131072,
Thinking: newagentshared.ResolveThinkingMode(input.ThinkingEnabled),
@@ -123,7 +123,7 @@ func collectExecuteDecisionFromLLM(
return nil, nil
}
decision, parseErr := infrallm.ParseJSONObject[newagentmodel.ExecuteDecision](result.DecisionJSON)
decision, parseErr := llmservice.ParseJSONObject[newagentmodel.ExecuteDecision](result.DecisionJSON)
if parseErr != nil {
log.Printf(
"[DEBUG] execute LLM JSON 解析失败 chat=%s round=%d json=%s raw=%s",

View File

@@ -5,12 +5,12 @@ import (
"fmt"
newagentshared "github.com/LoveLosita/smartflow/backend/newAgent/shared"
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
"github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
)
const (
@@ -29,7 +29,7 @@ type ExecuteNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
UserInput string
Client *infrallm.Client
Client *llmservice.Client
ChunkEmitter *newagentstream.ChunkEmitter
ResumeNode string
ToolRegistry *newagenttools.ToolRegistry