🐛 fix(agent/schedulerefine): 修复复合微调分支链路问题,并将 MinContextSwitch 重构为固定坑位重排语义 - 🔧 修复 `schedulerefine` 复合路由中参数透传不完整、缺少 deterministic objective 时错误降级,以及“复合工具执行成功”与“终审通过”语义混淆的问题 - ✅ 保证新的独立复合分支能够正确执行、正确出站,并统一交由 `hard_check` 裁决最终结果 - 🔍 排查时发现 `MinContextSwitch` 上游 `context_tag` 存在整体退化为 `General` 的风险,影响MinContextSwitch - 🛡️ 为 `MinContextSwitch` 增加兜底策略:当标签整体退化时,按任务名关键词推断学科分组,避免分组能力失效 - ♻️ 将 `MinContextSwitch` 从“整周重新寻找新坑位”调整为“坑位不变,任务顺序改变” - 🎯 将落地方式从顺序 `BatchMove` 改为固定坑位原子重写,避免出现远距离跳位、跨天错迁、异常嵌入课位及循环换位冲突 - 🧹 修复 `hard_check` 在 `MinContextSwitch` 成功后仍执行 `origin_rank` 顺序归位、并导致逆序终审误判的问题 - 🚦 命中该分支后跳过顺序归位与顺序硬校验,避免 `summary` / `hard_check` 将有效重排结果误判为失败 📈 当前连续微调规划涉及的全部功能已可以稳定运行;下一步将继续扩展能力边界,并进一步优化 `schedule_plan` 流程 ♻️ refactor: 重整 agent2 架构,并迁移 quicknote/chat 新链路,目前还剩3个模块未迁移,后续迁移完成后会删除原agent并将此目录命名为agent - 🏗️ 明确 `agent2` 采用“统一分层目录 + 文件分层 + 依赖注入”的重构方案,不再沿用模块目录多层嵌套结构 - 🧩 完善 `agent2` 基础骨架,统一收口 `entrance` / `router` / `llm` / `stream` / `shared` / `model` / `prompt` / `node` / `graph` 等层级职责 - 🚚 将通用路由能力迁移至 `agent2/router`,沉淀统一的 `Action`、`RoutingDecision`、控制码解析,以及 `Dispatcher` / `Resolver` 抽象 - 💬 将普通聊天链路迁移至 `agent2/chat`,复用 `stream` 的 OpenAI 兼容输出协议与 LLM usage 聚合能力 - 📝 将 `quicknote` 链路迁移到 `agent2` 新结构,拆分为 `model` / `prompt` / `llm` / `node` / `graph` 多层实现,替换对旧 `agent/quicknote` 的直接依赖 - 🔌 调整 `agentsvc` 对 `agent2` 的引用,普通聊天、通用分流与 `quicknote` 全部切换到新链路 - ✂️ 去除 graph 内部 `runner` 转接层,改为由 node 层直接持有请求级依赖,并向 graph 暴露节点方法 - 🧹 合并 `graph/quicknote` 与 `graph/quicknote_run`,删除冗余骨架文件,收敛为单一 `quicknote graph` 文件 - 📚 新增 `agent2`《通用能力接入文档》,明确公共能力边界、接入方式以及 graph/node 协作约定 - 📝 更新 `AGENTS.md`,要求后续扩展 `agent2` 通用能力时必须同步维护接入文档 ♻️ refactor: 删除了现Agent目录内Chat模块的两条冗余Prompt
132 lines
3.4 KiB
Go
132 lines
3.4 KiB
Go
package agentchat
|
||
|
||
import (
|
||
"context"
|
||
"io"
|
||
"strings"
|
||
"time"
|
||
|
||
agentllm "github.com/LoveLosita/smartflow/backend/agent2/llm"
|
||
agentstream "github.com/LoveLosita/smartflow/backend/agent2/stream"
|
||
"github.com/cloudwego/eino-ext/components/model/ark"
|
||
"github.com/cloudwego/eino/schema"
|
||
"github.com/google/uuid"
|
||
arkModel "github.com/volcengine/volcengine-go-sdk/service/arkruntime/model"
|
||
)
|
||
|
||
// StreamChat 负责模型流式输出,并在关键节点打点:
|
||
// 1) 流连接建立(llm.Stream 返回)
|
||
// 2) 首包到达(首字延迟)
|
||
// 3) 流式输出结束
|
||
func StreamChat(
|
||
ctx context.Context,
|
||
llm *ark.ChatModel,
|
||
modelName string,
|
||
userInput string,
|
||
ifThinking bool,
|
||
chatHistory []*schema.Message,
|
||
outChan chan<- string,
|
||
traceID string,
|
||
chatID string,
|
||
requestStart time.Time,
|
||
) (string, *schema.TokenUsage, error) {
|
||
/*callStart := time.Now()*/
|
||
|
||
messages := make([]*schema.Message, 0)
|
||
messages = append(messages, schema.SystemMessage(SystemPrompt))
|
||
if len(chatHistory) > 0 {
|
||
messages = append(messages, chatHistory...)
|
||
}
|
||
messages = append(messages, schema.UserMessage(userInput))
|
||
|
||
var thinking *ark.Thinking
|
||
if ifThinking {
|
||
thinking = &arkModel.Thinking{Type: arkModel.ThinkingTypeEnabled}
|
||
} else {
|
||
thinking = &arkModel.Thinking{Type: arkModel.ThinkingTypeDisabled}
|
||
}
|
||
|
||
/*connectStart := time.Now()*/
|
||
reader, err := llm.Stream(ctx, messages, ark.WithThinking(thinking))
|
||
if err != nil {
|
||
return "", nil, err
|
||
}
|
||
defer reader.Close()
|
||
|
||
if strings.TrimSpace(modelName) == "" {
|
||
modelName = "smartflow-worker"
|
||
}
|
||
requestID := "chatcmpl-" + uuid.NewString()
|
||
created := time.Now().Unix()
|
||
firstChunk := true
|
||
chunkCount := 0
|
||
var tokenUsage *schema.TokenUsage
|
||
/*streamRecvStart := time.Now()
|
||
|
||
log.Printf("打点|流连接建立|trace_id=%s|chat_id=%s|request_id=%s|本步耗时_ms=%d|请求累计_ms=%d|history_len=%d",
|
||
traceID,
|
||
chatID,
|
||
requestID,
|
||
time.Since(connectStart).Milliseconds(),
|
||
time.Since(requestStart).Milliseconds(),
|
||
len(chatHistory),
|
||
)*/
|
||
|
||
var fullText strings.Builder
|
||
for {
|
||
chunk, err := reader.Recv()
|
||
if err == io.EOF {
|
||
break
|
||
}
|
||
if err != nil {
|
||
return "", nil, err
|
||
}
|
||
|
||
// 优先记录模型真实 usage(通常在尾块返回,部分模型也可能中途返回)。
|
||
if chunk != nil && chunk.ResponseMeta != nil && chunk.ResponseMeta.Usage != nil {
|
||
tokenUsage = agentllm.MergeUsage(tokenUsage, chunk.ResponseMeta.Usage)
|
||
}
|
||
|
||
fullText.WriteString(chunk.Content)
|
||
|
||
payload, err := agentstream.ToOpenAIStream(chunk, requestID, modelName, created, firstChunk)
|
||
if err != nil {
|
||
return "", nil, err
|
||
}
|
||
if payload != "" {
|
||
outChan <- payload
|
||
chunkCount++
|
||
firstChunk = false
|
||
/*if firstChunk {
|
||
log.Printf("打点|首包到达|trace_id=%s|chat_id=%s|request_id=%s|本步耗时_ms=%d|请求累计_ms=%d",
|
||
traceID,
|
||
chatID,
|
||
requestID,
|
||
time.Since(streamRecvStart).Milliseconds(),
|
||
time.Since(requestStart).Milliseconds(),
|
||
)
|
||
firstChunk = false
|
||
}*/
|
||
}
|
||
}
|
||
|
||
finishChunk, err := agentstream.ToOpenAIFinishStream(requestID, modelName, created)
|
||
if err != nil {
|
||
return "", nil, err
|
||
}
|
||
outChan <- finishChunk
|
||
outChan <- "[DONE]"
|
||
|
||
/*log.Printf("打点|流式输出结束|trace_id=%s|chat_id=%s|request_id=%s|chunks=%d|reply_chars=%d|本步耗时_ms=%d|请求累计_ms=%d",
|
||
traceID,
|
||
chatID,
|
||
requestID,
|
||
chunkCount,
|
||
len(fullText.String()),
|
||
time.Since(callStart).Milliseconds(),
|
||
time.Since(requestStart).Milliseconds(),
|
||
)*/
|
||
|
||
return fullText.String(), tokenUsage, nil
|
||
}
|