Version: 0.9.26.dev.260417

后端:
1. Prompt 层从 execute 专属骨架重构为全节点统一四段式 buildUnifiedStageMessages
  - 新增 unified_context.go:定义 StageMessagesConfig + buildUnifiedStageMessages 统一骨架,所有节点(Chat/Plan/Execute/Deliver/DeepAnswer)共用同一套 msg0~msg3 拼装逻辑
  - 新增 conversation_view.go:通用对话历史渲染 buildConversationHistoryMessage,各节点复用,不再各自维护提取逻辑
  - 新增 chat_context.go / plan_context.go / deliver_context.go:各节点自行渲染 msg1(对话视图)和 msg2(工作区),统一层只负责"怎么拼",不再替节点决定"放什么"
  - Chat/Plan/Deliver/Execute 的 BuildXXXMessages 全部从 buildStageMessages 切到 buildUnifiedStageMessages,移除旧路径
  - 删除 execute_pinned.go:execute 记忆渲染合并到统一层 renderUnifiedMemoryContext
  - Plan prompt 不再在 user prompt 中拼装任务类 ID 列表和 renderStateSummary,改为依赖 msg2 规划工作区;Chat 粗排判断从"上下文有任务类 ID"改为"批量调度需求"
  - Deliver prompt 新增 IsAborted/IsExhaustedTerminal 区分,支持粗排收口和主动终止场景
2. Execute ReAct 上下文简化——移除归档搬运、窗口裁剪和重复工具压缩
  - 移除 splitExecuteLoopRecordsByBoundary、findLatestExecuteBoundaryMarker、tailExecuteLoops、compressExecuteLoopObservationsByTool、buildEarlyExecuteReactSummary、trimExecuteMessage1ByBudget 等六个函数
  - 移除 executeLoopWindowLimit / executeConversationTurnLimit / executeMessage1MaxRunes 等预算常量
  - msg1 不再从历史中归档上一轮 ReAct 结果,只保留真实对话流(user + assistant speak),全量注入
  - msg2 不再按 loop_closed / step_advanced 边界切分"归档/活跃",直接全量注入全部 ReAct Loop 记录
  - token 预算由统一压缩层兜底,prompt 层不再做提前裁剪
3. 压缩层从 Execute 专属提升为全节点通用 UnifiedCompact
  - 删除 execute_compact.go(Execute 专属压缩文件)
  - 新增 unified_compact.go:UnifiedCompactInput 参数化,各节点(Plan/Chat/Deliver/Execute)构造时从自己的 NodeInput 提取公共字段,消除对 Execute 的直接依赖
  - CompactionStore 接口扩展 LoadStageCompaction / SaveStageCompaction,各节点按 stageKey 独立维护压缩状态互不覆盖
  - 非 4 段式消息时退化成按角色汇总统计,确保 context_token_stats 仍然刷新
4. Retry 重试机制全面下线
  - dao/agent.go:saveChatHistoryCore / SaveChatHistory / SaveChatHistoryInTx 移除 retry_group_id / retry_index /
  retry_from_user_message_id / retry_from_assistant_message_id 四个参数,修复乱码注释
  - dao/agent-cache.go:移除 ApplyRetrySeed 和 extractMessageHistoryID 两个方法
  - conv/agent.go:ToEinoMessages 不再回灌 retry_* 字段到运行期上下文
  - service/agentsvc/agent.go:移除 chatRetryMeta 及 resolveRetryGroupID / buildRetrySeed 等全部重试逻辑
  - service/agentsvc/agent_quick_note.go:整个文件删除(retry 快速补写路径已无用)
  - service/events/chat_history_persist.go:移除 retry 参数传递
5. 节点层瘦身 + 可见消息逐条持久化
  - agent_nodes.go 大幅简化:Chat/Plan/Execute/Deliver 节点方法移除 ToolSchema 注入、状态摘要渲染等逻辑,只做参数转发和状态落盘
  - 新增 visible_message.go:persistVisibleAssistantMessage 统一处理可见 assistant speak 的实时持久化,失败仅记日志不中断主流程
  - 新增 llm_debug.go:logNodeLLMContext 统一打印 LLM 上下文调试日志
  - graph_run_state.go 新增 PersistVisibleMessageFunc 类型 + AgentGraphDeps.PersistVisibleMessage 字段
  - service/agentsvc/agent_newagent.go 精简主循环,注入 PersistVisibleMessage 回调;agent_history.go 精简历史构建
  - token_budget.go 移除 Execute 专属预算检查,统一到通用预算

前端:
1. 移除 retry 相关 UI 和类型
  - agent.ts 移除 retry_group_id / retry_index / retry_total 字段及 normalize 逻辑
  - AssistantPanel.vue 移除 retry 相关 UI 和交互代码(约 700 行精简)
  - dashboard.ts 移除 retry 相关类型定义
  - AssistantView.vue 微调
2. ContextWindowMeter 压缩次数展示和数值格式优化
  - 新增 formatCompactCount 工具函数,千位以上用 k 单位压缩(如 80k)
  - 新增压缩次数显示
3.修复了新对话发消息时,user和assistant消息被自动调换的bug

仓库:无
This commit is contained in:
Losita
2026-04-17 22:19:38 +08:00
parent d47a8bcabd
commit d8280cc647
39 changed files with 2095 additions and 2386 deletions

View File

@@ -23,23 +23,12 @@ func ToEinoMessages(dbMsgs []model.ChatHistory) []*schema.Message {
Content: safeChatHistoryText(m.MessageContent),
ReasoningContent: safeChatHistoryText(m.ReasoningContent),
}
// retry 机制已整体下线:历史数据里的 retry_* 列不再回灌到运行期上下文。
extra := make(map[string]any)
extra["history_id"] = m.ID
if m.ReasoningDurationSeconds > 0 {
extra["reasoning_duration_seconds"] = m.ReasoningDurationSeconds
}
if m.RetryGroupID != nil && *m.RetryGroupID != "" {
extra["retry_group_id"] = *m.RetryGroupID
}
if m.RetryIndex != nil && *m.RetryIndex > 0 {
extra["retry_index"] = *m.RetryIndex
}
if m.RetryFromUserMessageID != nil && *m.RetryFromUserMessageID > 0 {
extra["retry_from_user_message_id"] = *m.RetryFromUserMessageID
}
if m.RetryFromAssistantMessageID != nil && *m.RetryFromAssistantMessageID > 0 {
extra["retry_from_assistant_message_id"] = *m.RetryFromAssistantMessageID
}
if len(extra) > 0 {
msg.Extra = extra
}

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/cloudwego/eino/schema"
@@ -164,79 +163,6 @@ func (m *AgentCache) BackfillHistory(ctx context.Context, sessionID string, mess
return err
}
func (m *AgentCache) ApplyRetrySeed(ctx context.Context, sessionID, retryGroupID string, sourceUserMessageID, sourceAssistantMessageID int) error {
if m == nil || m.client == nil {
return nil
}
groupID := strings.TrimSpace(retryGroupID)
if groupID == "" {
return nil
}
vals, err := m.client.LRange(ctx, m.historyKey(sessionID), 0, -1).Result()
if err != nil {
return err
}
if len(vals) == 0 {
return nil
}
changed := false
targets := map[int]struct{}{}
if sourceUserMessageID > 0 {
targets[sourceUserMessageID] = struct{}{}
}
if sourceAssistantMessageID > 0 {
targets[sourceAssistantMessageID] = struct{}{}
}
if len(targets) == 0 {
return nil
}
indexOne := 1
for idx, raw := range vals {
var msg schema.Message
if err := json.Unmarshal([]byte(raw), &msg); err != nil {
return err
}
historyID := extractMessageHistoryID(&msg)
if historyID <= 0 {
continue
}
if _, ok := targets[historyID]; !ok {
continue
}
if msg.Extra == nil {
msg.Extra = make(map[string]any)
}
msg.Extra["retry_group_id"] = groupID
msg.Extra["retry_index"] = indexOne
updated, err := json.Marshal(&msg)
if err != nil {
return err
}
vals[idx] = string(updated)
changed = true
}
if !changed {
return nil
}
pipe := m.client.Pipeline()
key := m.historyKey(sessionID)
pipe.Del(ctx, key)
values := make([]interface{}, 0, len(vals))
for _, item := range vals {
values = append(values, item)
}
pipe.RPush(ctx, key, values...)
pipe.LTrim(ctx, key, 0, int64(len(vals)-1))
pipe.Expire(ctx, key, m.expiration)
_, err = pipe.Exec(ctx)
return err
}
func (m *AgentCache) ClearHistory(ctx context.Context, sessionID string) error {
historyKey := m.historyKey(sessionID)
windowKey := m.historyWindowKey(sessionID)
@@ -263,49 +189,6 @@ func (m *AgentCache) DeleteConversationStatus(ctx context.Context, sessionID str
return m.client.Del(ctx, key).Err()
}
func extractMessageHistoryID(msg *schema.Message) int {
if msg == nil || msg.Extra == nil {
return 0
}
raw, ok := msg.Extra["history_id"]
if !ok {
return 0
}
// 1. history_id 主要来自 DB 回填,正常情况下是 number。
// 2. 但 Redis 往返、灰度期数据修复或手工写入时,仍可能出现字符串数字。
// 3. 这里做一次宽松解析,避免重试分组补种时因为类型差异找不到源消息。
switch v := raw.(type) {
case int:
return v
case int32:
return int(v)
case int64:
return int(v)
case float64:
return int(v)
case json.Number:
if parsed, err := v.Int64(); err == nil {
return int(parsed)
}
if parsed, err := v.Float64(); err == nil {
return int(parsed)
}
return 0
case string:
trimmed := strings.TrimSpace(v)
if trimmed == "" {
return 0
}
parsed, err := strconv.Atoi(trimmed)
if err != nil {
return 0
}
return parsed
default:
return 0
}
}
// ---- Compaction 缓存 ----
func (m *AgentCache) compactionKey(chatID string) string {

View File

@@ -2,6 +2,7 @@ package dao
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
@@ -23,18 +24,23 @@ func (r *AgentDAO) WithTx(tx *gorm.DB) *AgentDAO {
return &AgentDAO{db: tx}
}
// saveChatHistoryCore 鏄€滆亰澶╂秷鎭惤搴?+ 浼氳瘽缁熻鏇存柊鈥濈殑鏍稿績瀹炵幇銆?
// saveChatHistoryCore 是"聊天消息落库 + 会话统计更新"的核心实现。
//
// 鑱岃矗杈圭晫锛?
// 1. 鍙墽琛屽綋鍓?DAO 鍙ユ焺涓婄殑鏁版嵁搴撳啓鍏ュ姩浣滐紱
// 2. 涓嶄富鍔ㄥ紑鍚簨鍔★紙浜嬪姟鐢辫皟鐢ㄦ柟鍐冲畾锛夛紱
// 3. 淇濊瘉 chat_histories 涓?agent_chats.message_count 鐨勪竴鑷存€у彛寰勩€?
// 职责边界:
// 1. 只执行当前 DAO 句柄上的数据库写入动作;
// 2. 不主动开启事务(事务由调用方决定);
// 3. 保证 chat_histories agent_chats.message_count 的一致性口径。
//
// 澶辫触澶勭悊锛?
// 1. 浠讳竴姝ラ澶辫触閮借繑鍥?error锛?
// 2. 鑻ヨ皟鐢ㄦ柟澶勪簬浜嬪姟涓紝杩斿洖 error 浼氳Е鍙戜簨鍔″洖婊氥€?
func (a *AgentDAO) saveChatHistoryCore(ctx context.Context, userID int, conversationID string, role, message, reasoningContent string, reasoningDurationSeconds int, retryGroupID *string, retryIndex *int, retryFromUserMessageID *int, retryFromAssistantMessageID *int, tokensConsumed int) error {
// 0. token 鍏ュ簱鍓嶅厹搴曪細璐熸暟缁熶竴褰掗浂锛岄伩鍏嶅紓甯稿€兼薄鏌撶疮璁$粺璁°€?
// 失败处理:
// 1. 任一步骤失败都返回 error
// 2. 若调用方处于事务中,返回 error 会触发事务回滚。
//
// 关于 retry 字段:
// 1. retry 机制已整体下线,本函数不再写入 retry_group_id / retry_index / retry_from_* 四列;
// 2. 这些列在 GORM ChatHistory 模型上暂时保留,列本身可空,历史数据不受影响;
// 3. Step B 会做 DROP COLUMN 的 migration。
func (a *AgentDAO) saveChatHistoryCore(ctx context.Context, userID int, conversationID string, role, message, reasoningContent string, reasoningDurationSeconds int, tokensConsumed int) error {
// 0. token 入库前兜底:负数统一归零,避免异常值污染累计统计。
if tokensConsumed < 0 {
tokensConsumed = 0
}
@@ -43,7 +49,7 @@ func (a *AgentDAO) saveChatHistoryCore(ctx context.Context, userID int, conversa
reasoningDurationSeconds = 0
}
// 1. 鍏堝啓 chat_histories 鍘熷娑堟伅銆?
// 1. 先写 chat_histories 原始消息。
var reasoningContentPtr *string
if reasoningContent != "" {
reasoningContentPtr = &reasoningContent
@@ -53,10 +59,6 @@ func (a *AgentDAO) saveChatHistoryCore(ctx context.Context, userID int, conversa
MessageContent: &message,
ReasoningContent: reasoningContentPtr,
ReasoningDurationSeconds: reasoningDurationSeconds,
RetryGroupID: retryGroupID,
RetryIndex: retryIndex,
RetryFromUserMessageID: retryFromUserMessageID,
RetryFromAssistantMessageID: retryFromAssistantMessageID,
Role: &role,
ChatID: conversationID,
TokensConsumed: tokensConsumed,
@@ -65,10 +67,10 @@ func (a *AgentDAO) saveChatHistoryCore(ctx context.Context, userID int, conversa
return err
}
// 2. 鍐嶆洿鏂颁細璇濈粺璁★細
// 2.1 message_count +1锛屼繚鎸佸拰 chat_histories 琛屾暟鍙e緞涓€鑷达紱
// 2.2 tokens_total 绱姞鏈潯娑堟伅 token锛?
// 2.3 last_message_at 鍒锋柊涓哄綋鍓嶆椂闂达紝渚涗細璇濇帓搴忎娇鐢ㄣ€?
// 2. 再更新会话统计:
// 2.1 message_count +1,保持和 chat_histories 行数口径一致;
// 2.2 tokens_total 累加本条消息 token
// 2.3 last_message_at 刷新为当前时间,供会话排序使用。
now := time.Now()
updates := map[string]interface{}{
"message_count": gorm.Expr("message_count + ?", 1),
@@ -82,14 +84,14 @@ func (a *AgentDAO) saveChatHistoryCore(ctx context.Context, userID int, conversa
return result.Error
}
if result.RowsAffected == 0 {
// 浼氳瘽涓嶅瓨鍦ㄦ椂鐩存帴澶辫触锛岄伩鍏嶅嚭鐜扳€滃鍎垮巻鍙叉秷鎭€濄€?
// 会话不存在时直接失败,避免出现"孤儿历史消息"。
return fmt.Errorf("conversation not found when updating stats: user_id=%d chat_id=%s", userID, conversationID)
}
// 3. 鏈€鍚庢洿鏂?users.token_usage锛堝悓涓€浜嬪姟鍐咃級锛?
// 3.1 鍙湪 tokensConsumed>0 鏃舵墽琛岋紝閬垮厤鏃犳剰涔夊啓鍏ワ紱
// 3.2 鍜?chat_histories/agent_chats 鏀惧湪鍚屼竴浜嬪姟閲岋紝淇濊瘉缁熻鍙e緞鍘熷瓙涓€鑷达紱
// 3.3 鑻ョ敤鎴疯涓嶅瓨鍦ㄥ垯杩斿洖閿欒锛岃Е鍙戜簨鍔″洖婊氾紝闃叉鍑虹幇鈥滀細璇濈粺璁℃垚鍔熶絾鐢ㄦ埛缁熻涓㈠け鈥濄€?
// 3. 最后更新 users.token_usage(同一事务内):
// 3.1 只在 tokensConsumed>0 时执行,避免无意义写入;
// 3.2 chat_histories/agent_chats 放在同一事务里,保证统计口径原子一致;
// 3.3 若用户行不存在则返回错误,触发事务回滚,防止出现"会话统计成功但用户统计丢失"。
if tokensConsumed > 0 {
userUpdate := a.db.WithContext(ctx).
Model(&model.User{}).
@@ -105,38 +107,38 @@ func (a *AgentDAO) saveChatHistoryCore(ctx context.Context, userID int, conversa
return nil
}
// SaveChatHistoryInTx 鍦ㄨ皟鐢ㄦ柟鈥滃凡寮€鍚簨鍔♀€濈殑鍦烘櫙涓嬪啓鍏ヨ亰澶╁巻鍙层€?
// SaveChatHistoryInTx 在调用方"已开启事务"的场景下写入聊天历史。
//
// 璁捐鐩殑锛?
// 1. 缁欐湇鍔″眰缁勫悎澶氫釜 DAO 鎿嶄綔鏃跺鐢紝閬垮厤宓屽浜嬪姟锛?
// 2. 璁?outbox 娑堣垂澶勭悊鍣ㄥ彲浠ュ拰涓氬姟鍐欏叆鍏变韩鍚屼竴涓?tx銆?
func (a *AgentDAO) SaveChatHistoryInTx(ctx context.Context, userID int, conversationID string, role, message, reasoningContent string, reasoningDurationSeconds int, retryGroupID *string, retryIndex *int, retryFromUserMessageID *int, retryFromAssistantMessageID *int, tokensConsumed int) error {
return a.saveChatHistoryCore(ctx, userID, conversationID, role, message, reasoningContent, reasoningDurationSeconds, retryGroupID, retryIndex, retryFromUserMessageID, retryFromAssistantMessageID, tokensConsumed)
// 设计目的:
// 1. 给服务层组合多个 DAO 操作时复用,避免嵌套事务;
// 2. outbox 消费处理器可以和业务写入共享同一个 tx。
func (a *AgentDAO) SaveChatHistoryInTx(ctx context.Context, userID int, conversationID string, role, message, reasoningContent string, reasoningDurationSeconds int, tokensConsumed int) error {
return a.saveChatHistoryCore(ctx, userID, conversationID, role, message, reasoningContent, reasoningDurationSeconds, tokensConsumed)
}
// SaveChatHistory 鍦ㄥ悓姝ョ洿鍐欒矾寰勪笅鍐欏叆鑱婂ぉ鍘嗗彶銆?
// SaveChatHistory 在同步直写路径下写入聊天历史。
//
// 璇存槑锛?
// 1. 璇ユ柟娉曚細鑷寮€鍚簨鍔★紱
// 2. 鍐呴儴澶嶇敤 saveChatHistoryCore锛岀‘淇濆拰 SaveChatHistoryInTx 鐨勪笟鍔″彛寰勫畬鍏ㄤ竴鑷淬€?
func (a *AgentDAO) SaveChatHistory(ctx context.Context, userID int, conversationID string, role, message, reasoningContent string, reasoningDurationSeconds int, retryGroupID *string, retryIndex *int, retryFromUserMessageID *int, retryFromAssistantMessageID *int, tokensConsumed int) error {
// 说明:
// 1. 该方法会自行开启事务;
// 2. 内部复用 saveChatHistoryCore,确保和 SaveChatHistoryInTx 的业务口径完全一致。
func (a *AgentDAO) SaveChatHistory(ctx context.Context, userID int, conversationID string, role, message, reasoningContent string, reasoningDurationSeconds int, tokensConsumed int) error {
return a.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
return a.WithTx(tx).saveChatHistoryCore(ctx, userID, conversationID, role, message, reasoningContent, reasoningDurationSeconds, retryGroupID, retryIndex, retryFromUserMessageID, retryFromAssistantMessageID, tokensConsumed)
return a.WithTx(tx).saveChatHistoryCore(ctx, userID, conversationID, role, message, reasoningContent, reasoningDurationSeconds, tokensConsumed)
})
}
// adjustTokenUsageCore 鍦ㄥ悓涓€浜嬪姟璇箟涓嬪仛鈥滀細璇?鐢ㄦ埛鈥漷oken 璐︽湰澧為噺璋冩暣銆?
// adjustTokenUsageCore 在同一事务语义下做"会话/用户"token 账本增量调整。
//
// 鑱岃矗杈圭晫锛?
// 1. 鍙洿鏂?agent_chats.tokens_total 涓?users.token_usage锛?
// 2. 涓嶅啓 chat_histories锛堟秷鎭惤搴撶敱 SaveChatHistory* 璺緞璐熻矗锛夛紱
// 3. deltaTokens<=0 鏃惰涓烘棤鎿嶄綔锛岀洿鎺ヨ繑鍥炪€?
// 职责边界:
// 1. 只更新 agent_chats.tokens_total users.token_usage
// 2. 不写 chat_histories(消息落库由 SaveChatHistory* 路径负责);
// 3. deltaTokens<=0 时视为无操作,直接返回。
func (a *AgentDAO) adjustTokenUsageCore(ctx context.Context, userID int, conversationID string, deltaTokens int) error {
if deltaTokens <= 0 {
return nil
}
// 1. 鍏堟洿鏂颁細璇濈疮璁?token銆?
// 1. 先更新会话累计 token
chatUpdate := a.db.WithContext(ctx).
Model(&model.AgentChat{}).
Where("user_id = ? AND chat_id = ?", userID, conversationID).
@@ -148,7 +150,7 @@ func (a *AgentDAO) adjustTokenUsageCore(ctx context.Context, userID int, convers
return fmt.Errorf("conversation not found when adjusting tokens: user_id=%d chat_id=%s", userID, conversationID)
}
// 2. 鍐嶆洿鏂扮敤鎴风疮璁?token銆?
// 2. 再更新用户累计 token
userUpdate := a.db.WithContext(ctx).
Model(&model.User{}).
Where("id = ?", userID).
@@ -162,12 +164,12 @@ func (a *AgentDAO) adjustTokenUsageCore(ctx context.Context, userID int, convers
return nil
}
// AdjustTokenUsageInTx 鍦ㄨ皟鐢ㄦ柟宸插紑鍚簨鍔℃椂鎵ц token 璐︽湰澧為噺璋冩暣銆?
// AdjustTokenUsageInTx 在调用方已开启事务时执行 token 账本增量调整。
func (a *AgentDAO) AdjustTokenUsageInTx(ctx context.Context, userID int, conversationID string, deltaTokens int) error {
return a.adjustTokenUsageCore(ctx, userID, conversationID, deltaTokens)
}
// AdjustTokenUsage 鍦ㄥ悓姝ヨ矾寰勪笅鎵ц token 璐︽湰澧為噺璋冩暣锛堝唴閮ㄨ嚜甯︿簨鍔★級銆?
// AdjustTokenUsage 在同步路径下执行 token 账本增量调整(内部自带事务)。
func (a *AgentDAO) AdjustTokenUsage(ctx context.Context, userID int, conversationID string, deltaTokens int) error {
return a.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
return a.WithTx(tx).adjustTokenUsageCore(ctx, userID, conversationID, deltaTokens)
@@ -197,110 +199,13 @@ func (a *AgentDAO) GetUserChatHistories(ctx context.Context, userID, limit int,
if err != nil {
return nil, err
}
// 淇濈暀鈥滄渶杩?N 鏉♀€濆悗锛屽弽杞垚鏃堕棿姝e簭锛屾柟渚挎ā鍨嬫秷璐广€?
// 保留"最近 N 条"后,反转成时间正序,方便模型消费。
for i, j := 0, len(histories)-1; i < j; i, j = i+1, j-1 {
histories[i], histories[j] = histories[j], histories[i]
}
return histories, nil
}
func (a *AgentDAO) EnsureRetryGroupSeed(ctx context.Context, userID int, chatID, retryGroupID string, sourceUserMessageID, sourceAssistantMessageID int) error {
normalizedGroupID := strings.TrimSpace(retryGroupID)
if normalizedGroupID == "" {
return nil
}
indexOne := 1
ids := make([]int, 0, 2)
if sourceUserMessageID > 0 {
ids = append(ids, sourceUserMessageID)
}
if sourceAssistantMessageID > 0 {
ids = append(ids, sourceAssistantMessageID)
}
if len(ids) == 0 {
return nil
}
return a.db.WithContext(ctx).
Model(&model.ChatHistory{
UserID: userID,
ChatID: chatID,
}).
Where("user_id = ? AND chat_id = ? AND id IN ?", userID, chatID, ids).
Where("(retry_group_id IS NULL OR retry_group_id = '')").
Updates(map[string]any{
"retry_group_id": normalizedGroupID,
"retry_index": indexOne,
}).Error
}
// ValidateRetrySourceMessages 校验重试父消息是否真实存在且角色匹配。
//
// 职责边界:
// 1. 负责校验 retry 请求引用的父 user/assistant 消息是否属于当前用户、当前会话。
// 2. 负责校验两条父消息的角色语义,避免把占位 id、串号 id 或交换角色的 id 写进数据库。
// 3. 不负责补种 retry_group_id分组补种仍由 EnsureRetryGroupSeed 负责。
func (a *AgentDAO) ValidateRetrySourceMessages(ctx context.Context, userID int, chatID string, sourceUserMessageID, sourceAssistantMessageID int) error {
// 1. retry 是“基于既有一问一答重新生成”,因此两条父消息 id 必须同时有效。
// 2. 只要任意一个缺失,就直接返回错误,禁止继续写出 index=1 的脏重试数据。
if sourceUserMessageID <= 0 || sourceAssistantMessageID <= 0 {
return errors.New("retry source message ids are invalid")
}
type retrySourceRow struct {
ID int
Role *string
}
ids := []int{sourceUserMessageID, sourceAssistantMessageID}
rows := make([]retrySourceRow, 0, len(ids))
if err := a.db.WithContext(ctx).
Model(&model.ChatHistory{}).
Select("id", "role").
Where("user_id = ? AND chat_id = ? AND id IN ?", userID, chatID, ids).
Find(&rows).Error; err != nil {
return err
}
if len(rows) != len(ids) {
return errors.New("retry source messages not found in current conversation")
}
roleByID := make(map[int]string, len(rows))
for _, row := range rows {
if row.Role == nil {
roleByID[row.ID] = ""
continue
}
roleByID[row.ID] = strings.ToLower(strings.TrimSpace(*row.Role))
}
if roleByID[sourceUserMessageID] != "user" {
return errors.New("retry source user message is invalid")
}
if roleByID[sourceAssistantMessageID] != "assistant" {
return errors.New("retry source assistant message is invalid")
}
return nil
}
func (a *AgentDAO) GetRetryGroupNextIndex(ctx context.Context, userID int, chatID, retryGroupID string) (int, error) {
normalizedGroupID := strings.TrimSpace(retryGroupID)
if normalizedGroupID == "" {
return 0, errors.New("retry_group_id is empty")
}
var maxIndex int
if err := a.db.WithContext(ctx).
Model(&model.ChatHistory{}).
Where("user_id = ? AND chat_id = ? AND retry_group_id = ?", userID, chatID, normalizedGroupID).
Select("COALESCE(MAX(retry_index), 0)").
Scan(&maxIndex).Error; err != nil {
return 0, err
}
return maxIndex + 1, nil
}
func (a *AgentDAO) IfChatExists(ctx context.Context, userID int, chatID string) (bool, error) {
var chat model.AgentChat
err := a.db.WithContext(ctx).Where("user_id = ? AND chat_id = ?", userID, chatID).First(&chat).Error
@@ -313,7 +218,7 @@ func (a *AgentDAO) IfChatExists(ctx context.Context, userID int, chatID string)
return true, nil
}
// GetConversationMeta 鏌ヨ鍗曚釜浼氳瘽鍏冧俊鎭€?
// GetConversationMeta 查询单个会话元信息。
func (a *AgentDAO) GetConversationMeta(ctx context.Context, userID int, chatID string) (*model.AgentChat, error) {
var chat model.AgentChat
err := a.db.WithContext(ctx).
@@ -326,7 +231,7 @@ func (a *AgentDAO) GetConversationMeta(ctx context.Context, userID int, chatID s
return &chat, nil
}
// GetConversationTitle 璇诲彇褰撳墠浼氳瘽鏍囬銆?
// GetConversationTitle 读取当前会话标题。
func (a *AgentDAO) GetConversationTitle(ctx context.Context, userID int, chatID string) (title string, exists bool, err error) {
var chat model.AgentChat
queryErr := a.db.WithContext(ctx).
@@ -345,7 +250,7 @@ func (a *AgentDAO) GetConversationTitle(ctx context.Context, userID int, chatID
return strings.TrimSpace(*chat.Title), true, nil
}
// UpdateConversationTitleIfEmpty 浠呭湪鏍囬涓虹┖鏃舵洿鏂颁細璇濇爣棰樸€?
// UpdateConversationTitleIfEmpty 仅在标题为空时更新会话标题。
func (a *AgentDAO) UpdateConversationTitleIfEmpty(ctx context.Context, userID int, chatID, title string) error {
normalized := strings.TrimSpace(title)
if normalized == "" {
@@ -357,20 +262,20 @@ func (a *AgentDAO) UpdateConversationTitleIfEmpty(ctx context.Context, userID in
Update("title", normalized).Error
}
// GetConversationList 鎸夊垎椤垫煡璇㈡寚瀹氱敤鎴风殑浼氳瘽鍒楄〃銆?
// GetConversationList 按分页查询指定用户的会话列表。
//
// 鑱岃矗杈圭晫锛?
// 1. 鍙礋璐h搴擄紝涓嶈礋璐g紦瀛橈紱
// 2. 鍙礋璐?user_id 鏁版嵁闅旂锛屼笉璐熻矗鍙傛暟鍚堟硶鎬у厹搴曪紙鐢?service 璐熻矗锛夛紱
// 3. 杩斿洖鎬绘暟 total 渚涗笂灞傝绠?has_more銆?
// 职责边界:
// 1. 只负责读库,不负责缓存;
// 2. 只负责 user_id 数据隔离,不负责参数合法性兜底(由 service 负责);
// 3. 返回总数 total 供上层计算 has_more
func (a *AgentDAO) GetConversationList(ctx context.Context, userID, page, pageSize int, status string) ([]model.AgentChat, int64, error) {
// 1. 鍏堟瀯閫犵粺涓€杩囨护鏉′欢锛屼繚璇?total 涓?list 鐨勭粺璁″彛寰勪竴鑷淬€?
// 1. 先构造统一过滤条件,保证 total list 的统计口径一致。
baseQuery := a.db.WithContext(ctx).Model(&model.AgentChat{}).Where("user_id = ?", userID)
if strings.TrimSpace(status) != "" {
baseQuery = baseQuery.Where("status = ?", status)
}
// 2. 鍏堟煡鎬绘潯鏁帮紝缁欏墠绔垎椤靛櫒鎻愪緵瀹屾暣鍏冧俊鎭€?
// 2. 先查总条数,给前端分页器提供完整元信息。
var total int64
if err := baseQuery.Count(&total).Error; err != nil {
return nil, 0, err
@@ -379,9 +284,9 @@ func (a *AgentDAO) GetConversationList(ctx context.Context, userID, page, pageSi
return make([]model.AgentChat, 0), 0, nil
}
// 3. 鍐嶆煡褰撳墠椤垫暟鎹細
// 3.1 鎸夋渶杩戞秷鎭椂闂村€掑簭锛屼繚璇佲€滄渶杩戞椿璺冣€濅紭鍏堝睍绀猴紱
// 3.2 鍚屾椂闂存埑涓嬫寜 id 鍊掑簭锛岄伩鍏嶇炕椤垫椂椤哄簭鎶栧姩銆?
// 3. 再查当前页数据:
// 3.1 按最近消息时间倒序,保证"最近活跃"优先展示;
// 3.2 同时间戳下按 id 倒序,避免翻页时顺序抖动。
offset := (page - 1) * pageSize
var chats []model.AgentChat
query := a.db.WithContext(ctx).
@@ -402,34 +307,17 @@ func (a *AgentDAO) GetConversationList(ctx context.Context, userID, page, pageSi
return chats, total, nil
}
// ---- Compaction 相关 ----
// SaveCompaction 保存压缩摘要和水位线
// ---- 压缩摘要持久化 ----
//
// 1. 旧接口 SaveCompaction / LoadCompaction 继续保留,默认只读写 execute 阶段
// 2. 新接口按 stageKey 分桶读写,数据仍然落在 agent_chats.compaction_summary。
// 3. 为兼容历史数据,若 compaction_summary 仍是旧字符串格式,则自动回退读取。
func (a *AgentDAO) SaveCompaction(ctx context.Context, userID int, chatID string, summary string, watermark int) error {
return a.db.WithContext(ctx).
Model(&model.AgentChat{}).
Where("user_id = ? AND chat_id = ?", userID, chatID).
Updates(map[string]any{
"compaction_summary": summary,
"compaction_watermark": watermark,
}).Error
return a.SaveStageCompaction(ctx, userID, chatID, "execute", summary, watermark)
}
// LoadCompaction 读取压缩摘要和水位线。
func (a *AgentDAO) LoadCompaction(ctx context.Context, userID int, chatID string) (summary string, watermark int, err error) {
var chat model.AgentChat
err = a.db.WithContext(ctx).
Select("compaction_summary", "compaction_watermark").
Where("user_id = ? AND chat_id = ?", userID, chatID).
First(&chat).Error
if err != nil {
return "", 0, err
}
if chat.CompactionSummary != nil {
summary = *chat.CompactionSummary
}
watermark = chat.CompactionWatermark
return
return a.LoadStageCompaction(ctx, userID, chatID, "execute")
}
// SaveContextTokenStats 保存上下文窗口 token 分布统计。
@@ -455,3 +343,132 @@ func (a *AgentDAO) LoadContextTokenStats(ctx context.Context, userID int, chatID
}
return "", nil
}
type stageCompactionRecord struct {
Summary string `json:"summary"`
Watermark int `json:"watermark"`
}
type stageCompactionEnvelope struct {
Version int `json:"version"`
Stages map[string]stageCompactionRecord `json:"stages"`
}
// normalizeCompactionStageKey 统一 stageKey 的写法,避免 "Execute" 和 "execute" 被当成两个键。
func normalizeCompactionStageKey(stageKey string) string {
key := strings.ToLower(strings.TrimSpace(stageKey))
if key == "" {
return "execute"
}
return key
}
// loadStageCompactionStages 负责把数据库里的压缩摘要统一解包成 stage -> record。
//
// 1. 先处理空值,避免后续逻辑误判。
// 2. 如果已经是 JSON envelope就按 stage 逐项读取。
// 3. 如果还是旧版纯字符串,就把它当作 execute 阶段的兼容数据。
func loadStageCompactionStages(summary *string, watermark int) map[string]stageCompactionRecord {
stages := map[string]stageCompactionRecord{}
if summary == nil {
return stages
}
raw := strings.TrimSpace(*summary)
if raw == "" {
return stages
}
var env stageCompactionEnvelope
if err := json.Unmarshal([]byte(raw), &env); err == nil && len(env.Stages) > 0 {
for key, record := range env.Stages {
stages[normalizeCompactionStageKey(key)] = stageCompactionRecord{
Summary: strings.TrimSpace(record.Summary),
Watermark: record.Watermark,
}
}
return stages
}
stages["execute"] = stageCompactionRecord{
Summary: raw,
Watermark: watermark,
}
return stages
}
// marshalStageCompactionStages 负责把按阶段分桶后的摘要重新编码为 JSON envelope。
func marshalStageCompactionStages(stages map[string]stageCompactionRecord) (string, error) {
env := stageCompactionEnvelope{
Version: 1,
Stages: stages,
}
data, err := json.Marshal(env)
if err != nil {
return "", err
}
return string(data), nil
}
// LoadStageCompaction 按 stageKey 读取压缩摘要和水位线。
func (a *AgentDAO) LoadStageCompaction(ctx context.Context, userID int, chatID string, stageKey string) (summary string, watermark int, err error) {
stageKey = normalizeCompactionStageKey(stageKey)
var chat model.AgentChat
err = a.db.WithContext(ctx).
Select("compaction_summary", "compaction_watermark").
Where("user_id = ? AND chat_id = ?", userID, chatID).
First(&chat).Error
if err != nil {
return "", 0, err
}
stages := loadStageCompactionStages(chat.CompactionSummary, chat.CompactionWatermark)
if record, ok := stages[stageKey]; ok {
return record.Summary, record.Watermark, nil
}
return "", 0, nil
}
// SaveStageCompaction 按 stageKey 保存压缩摘要和水位线。
//
// 1. 先读取现有摘要,避免覆盖其他阶段已经写入的数据。
// 2. 再更新当前阶段对应的分桶内容。
// 3. 最后整体回写 JSON envelope并保留 execute 阶段的 legacy watermark 兼容字段。
func (a *AgentDAO) SaveStageCompaction(ctx context.Context, userID int, chatID string, stageKey string, summary string, watermark int) error {
stageKey = normalizeCompactionStageKey(stageKey)
var chat model.AgentChat
err := a.db.WithContext(ctx).
Select("compaction_summary", "compaction_watermark").
Where("user_id = ? AND chat_id = ?", userID, chatID).
First(&chat).Error
if err != nil {
return err
}
stages := loadStageCompactionStages(chat.CompactionSummary, chat.CompactionWatermark)
stages[stageKey] = stageCompactionRecord{
Summary: strings.TrimSpace(summary),
Watermark: watermark,
}
payload, err := marshalStageCompactionStages(stages)
if err != nil {
return err
}
legacyWatermark := watermark
if executeRecord, ok := stages["execute"]; ok {
legacyWatermark = executeRecord.Watermark
}
return a.db.WithContext(ctx).
Model(&model.AgentChat{}).
Where("user_id = ? AND chat_id = ?", userID, chatID).
Updates(map[string]any{
"compaction_summary": payload,
"compaction_watermark": legacyWatermark,
}).Error
}

View File

@@ -181,10 +181,6 @@ type ChatHistoryPersistPayload struct {
Message string `json:"message"`
ReasoningContent string `json:"reasoning_content,omitempty"`
ReasoningDurationSeconds int `json:"reasoning_duration_seconds,omitempty"`
RetryGroupID *string `json:"retry_group_id,omitempty"`
RetryIndex *int `json:"retry_index,omitempty"`
RetryFromUserMessageID *int `json:"retry_from_user_message_id,omitempty"`
RetryFromAssistantMessageID *int `json:"retry_from_assistant_message_id,omitempty"`
TokensConsumed int `json:"tokens_consumed"`
}
@@ -231,9 +227,6 @@ type GetConversationHistoryItem struct {
CreatedAt *time.Time `json:"created_at,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
ReasoningDurationSeconds int `json:"reasoning_duration_seconds,omitempty"`
RetryGroupID *string `json:"retry_group_id"`
RetryIndex *int `json:"retry_index"`
RetryTotal *int `json:"retry_total"`
}
type SchedulePlanPreviewCache struct {

View File

@@ -479,24 +479,17 @@ LLM 的一次性文本输出通过 `SplitPseudoStreamText` 拆分成多个 chunk
### 9.1 prompt 构造模式
所有阶段共享 `buildStageMessages()` 函数:
所有阶段现在统一共享 `buildUnifiedStageMessages()` 函数:
```
System Prompt(节点专属)
v
Pinned Blocks置顶上下文块作为独立 system 消息注入)
v
Tool Schemas工具 schema作为独立 system 消息注入)
v
History对话历史Tool 消息降级为 User 消息以兼容 API
v
User Prompt节点专属用户提示
msg0(system) = 全局 system prompt + 阶段 system prompt + 工具简表
msg1(assistant) = 对话历史 + 归档摘要
msg2(assistant) = 阶段工作区
msg3(system) = 阶段状态 + 记忆 + 本轮指令
```
统一构造由 `StageMessagesConfig` 驱动,具体阶段只负责填充各自的 `Msg2Content``Msg3StageState``UserInstruction`
### 9.2 各阶段 prompt 要点
| 阶段 | 核心指令 | 关键约束 |

View File

@@ -93,7 +93,6 @@ type CommonState struct {
// TaskClasses 本次排课涉及的任务类约束元数据(含日期、策略、时段预算等),
// 在 Service 层从 DB 加载并注入,供 Plan prompt 直接消费,避免 LLM 因信息不足而追问用户。
TaskClasses []schedule.TaskClassMeta `json:"task_classes,omitempty"`
// NeedsRoughBuild 由 Plan 节点在 plan_done 时写入,标记 Confirm 后是否需要走粗排节点。
// 粗排节点执行完毕后会将此字段重置为 false。
NeedsRoughBuild bool `json:"needs_rough_build,omitempty"`

View File

@@ -9,6 +9,7 @@ import (
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
schedule "github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
"github.com/cloudwego/eino/schema"
)
// AgentGraphRequest 描述一次 agent graph 运行的请求级输入。
@@ -52,6 +53,14 @@ type RoughBuildFunc func(ctx context.Context, userID int, taskClassIDs []int) ([
// 2. deliver 结束时再做最终覆盖写,保障收口状态一致。
type WriteSchedulePreviewFunc func(ctx context.Context, state *schedule.ScheduleState, userID int, conversationID string, taskClassIDs []int) error
// PersistVisibleMessageFunc 是 newAgent 主循环逐条持久化可见消息的回调签名。
//
// 职责边界:
// 1. 只处理真正对用户可见的 assistant speak不处理工具结果或内部纠错提示
// 2. 由节点在 AppendHistory 之后主动调用,让上层同步把这条消息写入 Redis + MySQL
// 3. 执行方可以做无损降级(例如 Redis 写失败只记日志),但应返回 error 便于上层记录。
type PersistVisibleMessageFunc func(ctx context.Context, state *CommonState, msg *schema.Message) error
// AgentGraphDeps 描述 graph/node 层运行时真正依赖的可插拔能力。
//
// 设计目的:
@@ -81,6 +90,10 @@ type AgentGraphDeps struct {
// channel 携带已渲染的文本内容(非原始 ItemDTO节点直接写入 pinned block。
MemoryFuture chan string // buffered(1),携带 renderMemoryPinnedContentByMode 的输出
MemoryConsumed bool // 保证 channel 只读一次,后续 Execute ReAct 循环跳过等待
// PersistVisibleMessage 按 Service 注入newAgent 每个节点产出的可见 speak
// 都会在 AppendHistory 之后立刻调用这个回调,把消息同步落到 Redis + MySQL。
PersistVisibleMessage PersistVisibleMessageFunc
}
// --- 记忆 pinned block 常量(供 agentsvc 和 node 层共享) ---

View File

@@ -81,9 +81,16 @@ type SchedulePersistor interface {
}
// CompactionStore 定义上下文压缩的持久化接口。
// 由 Service 层实现(组合 DAO + Redis Cache注入到 ExecuteNodeInput。
// 由 Service 层实现(组合 DAO + Redis Cache注入到各阶段 NodeInput。
type CompactionStore interface {
LoadCompaction(ctx context.Context, userID int, chatID string) (summary string, watermark int, err error)
SaveCompaction(ctx context.Context, userID int, chatID string, summary string, watermark int) error
SaveContextTokenStats(ctx context.Context, userID int, chatID string, statsJSON string) error
// LoadStageCompaction 按 stageKey 加载压缩摘要和水位线。
// stageKey 区分不同节点(如 "execute"/"plan"/"chat"/"deliver"
// 使各节点可以独立维护各自的压缩状态,互不覆盖。
LoadStageCompaction(ctx context.Context, userID int, chatID string, stageKey string) (summary string, watermark int, err error)
// SaveStageCompaction 按 stageKey 保存压缩摘要和水位线。
SaveStageCompaction(ctx context.Context, userID int, chatID string, stageKey string, summary string, watermark int) error
}

View File

@@ -12,12 +12,12 @@ import (
"github.com/LoveLosita/smartflow/backend/newAgent/tools/schedule"
)
// AgentNodes 是 newAgent 通用图的节点容器
// AgentNodes 负责把 graph 层的节点调用统一转成 node 层真正的执行入口
//
// 职责边界:
// 1. 负责把 node 层真正实现的方法统一暴露给 graph 注册;
// 2. 负责收口"graph 只编排、node 真执行"的结构约束;
// 3. 负责在每个节点执行成功后统一做状态持久化Save/Delete
// 1. 这里只做参数转发、依赖注入和状态落盘,不承载业务决策。
// 2. 各节点真正的执行逻辑仍在对应的 RunXXXNode 内。
// 3. 节点成功后统一保存快照,方便断线恢复
type AgentNodes struct{}
// NewAgentNodes 创建通用节点容器。
@@ -25,94 +25,60 @@ func NewAgentNodes() *AgentNodes {
return &AgentNodes{}
}
// Chat 是聊天入口的正式节点方法
//
// 职责边界:
// 1. 这里只做 graph -> node 的参数转接;
// 2. 真正的入口逻辑仍由 RunChatNode 负责;
// 3. Chat 的 Save 交给 Service 层处理,这里不做持久化。
// Chat 负责把 graph 的 chat 节点请求转给 RunChatNode
func (n *AgentNodes) Chat(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("chat node: state is nil")
}
// 注入工具 schema 到 ConversationContext让路由决策更智能
if st.Deps.ToolRegistry != nil {
schemas := st.Deps.ToolRegistry.Schemas()
toolSchemas := make([]newagentmodel.ToolSchemaContext, len(schemas))
for i, s := range schemas {
toolSchemas[i] = newagentmodel.ToolSchemaContext{
Name: s.Name,
Desc: s.Desc,
SchemaText: s.SchemaText,
}
}
st.EnsureConversationContext().SetToolSchemas(toolSchemas)
}
// 1. Chat 阶段只负责路由与纯对话,不需要看到工具目录,避免能力细节干扰判断
st.EnsureConversationContext().SetToolSchemas(nil)
if err := RunChatNode(
ctx,
ChatNodeInput{
if err := RunChatNode(ctx, ChatNodeInput{
RuntimeState: st.EnsureRuntimeState(),
ConversationContext: st.EnsureConversationContext(),
UserInput: st.Request.UserInput,
ConfirmAction: st.Request.ConfirmAction,
Client: st.Deps.ResolveChatClient(),
ChunkEmitter: st.EnsureChunkEmitter(),
},
); err != nil {
CompactionStore: st.Deps.CompactionStore,
PersistVisibleMessage: st.Deps.PersistVisibleMessage,
}); err != nil {
return nil, err
}
return st, nil
}
// Confirm 是确认阶段的正式节点方法。
//
// 职责边界:
// 1. 这里只做 graph -> node 的参数转接;
// 2. 真正的确认逻辑仍由 RunConfirmNode 负责;
// 3. 不需要 LLM Client — 确认内容由已有状态机械格式化。
// 4. Confirm 执行成功后保存状态,因为它创建了 PendingInteraction。
func (n *AgentNodes) Confirm(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("confirm node: state is nil")
}
if err := RunConfirmNode(
ctx,
ConfirmNodeInput{
RuntimeState: st.EnsureRuntimeState(),
ConversationContext: st.EnsureConversationContext(),
ChunkEmitter: st.EnsureChunkEmitter(),
},
); err != nil {
return nil, err
} else if st.Deps.WriteSchedulePreview != nil && st.ScheduleState == nil {
flowState := st.EnsureFlowState()
log.Printf("[WARN] deliver: schedule state is nil, skip preview write chat=%s", flowState.ConversationID)
}
saveAgentState(ctx, st)
return st, nil
}
// Plan 是规划阶段的正式节点方法
//
// 职责边界:
// 1. 这里只做 graph -> node 的参数转接;
// 2. 真正的单轮规划逻辑仍由 RunPlanNode 负责;
// 3. Plan 执行成功后保存状态,支持意外断线恢复。
// Confirm 负责把 graph 的 confirm 节点请求转给 RunConfirmNode
func (n *AgentNodes) Confirm(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("confirm node: state is nil")
}
if err := RunConfirmNode(ctx, ConfirmNodeInput{
RuntimeState: st.EnsureRuntimeState(),
ConversationContext: st.EnsureConversationContext(),
ChunkEmitter: st.EnsureChunkEmitter(),
}); err != nil {
return nil, err
}
saveAgentState(ctx, st)
return st, nil
}
// Plan 负责把 graph 的 plan 节点请求转给 RunPlanNode。
func (n *AgentNodes) Plan(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("plan node: state is nil")
}
// 等待后记忆检索完成,注入最新记忆后再启动 Plan
// 等待后记忆检索完成,再把最新结果注入上下文
ensureFreshMemory(st)
if err := RunPlanNode(
ctx,
PlanNodeInput{
if err := RunPlanNode(ctx, PlanNodeInput{
RuntimeState: st.EnsureRuntimeState(),
ConversationContext: st.EnsureConversationContext(),
UserInput: st.Request.UserInput,
@@ -121,8 +87,9 @@ func (n *AgentNodes) Plan(ctx context.Context, st *newagentmodel.AgentGraphState
ResumeNode: "plan",
AlwaysExecute: st.Request.AlwaysExecute,
ThinkingEnabled: st.Deps.ThinkingPlan,
},
); err != nil {
CompactionStore: st.Deps.CompactionStore,
PersistVisibleMessage: st.Deps.PersistVisibleMessage,
}); err != nil {
return nil, err
}
@@ -130,12 +97,7 @@ func (n *AgentNodes) Plan(ctx context.Context, st *newagentmodel.AgentGraphState
return st, nil
}
// RoughBuild 是粗排阶段的正式节点方法
//
// 职责边界:
// 1. 调用注入的 RoughBuildFunc 执行粗排算法;
// 2. 把粗排结果写入 ScheduleState
// 3. 完成后保存状态,支持意外断线恢复。
// RoughBuild 负责把 graph 的 rough_build 节点请求转给 RunRoughBuildNode
func (n *AgentNodes) RoughBuild(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("rough_build node: state is nil")
@@ -149,48 +111,31 @@ func (n *AgentNodes) RoughBuild(ctx context.Context, st *newagentmodel.AgentGrap
return st, nil
}
// Interrupt 是中断阶段的正式节点方法
//
// 职责边界:
// 1. 这里只做 graph -> node 的参数转接;
// 2. 真正的中断逻辑仍由 RunInterruptNode 负责;
// 3. 不需要 LLM Client — 所有文本已在 PendingInteraction 里。
// 4. 不需要 Save — 上游节点Plan/Execute/Confirm已经存过了。
// Interrupt 负责把 graph 的 interrupt 节点请求转给 RunInterruptNode
func (n *AgentNodes) Interrupt(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("interrupt node: state is nil")
}
if err := RunInterruptNode(
ctx,
InterruptNodeInput{
if err := RunInterruptNode(ctx, InterruptNodeInput{
RuntimeState: st.EnsureRuntimeState(),
ConversationContext: st.EnsureConversationContext(),
ChunkEmitter: st.EnsureChunkEmitter(),
},
); err != nil {
PersistVisibleMessage: st.Deps.PersistVisibleMessage,
}); err != nil {
return nil, err
}
return st, nil
}
// Execute 是执行阶段的正式节点方法
//
// 职责边界:
// 1. 这里只做 graph -> node 的参数转接;
// 2. 真正的单轮执行逻辑仍由 RunExecuteNode 负责。
//
// 设计原则:
// 1. LLM 主导LLM 自己判断 done_when 是否满足,自己决定何时推进/完成;
// 2. 后端兜底:只做资源控制、安全兜底、证据记录;
// 3. 不做硬校验:后端不质疑 LLM 的 advance/complete 决策。
// 4. Execute 每轮执行成功后保存状态,支持意外断线恢复。
// Execute 负责把 graph 的 execute 节点请求转给 RunExecuteNode
func (n *AgentNodes) Execute(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("execute node: state is nil")
}
// 按需加载 ScheduleState首次执行时从 DB 加载,后续复用内存中的 state
// 1. 首次进入时按需加载日程状态,后续轮次复用内存状态
var scheduleState *schedule.ScheduleState
if ss, loadErr := st.EnsureScheduleState(ctx); loadErr != nil {
return nil, fmt.Errorf("execute node: 加载日程状态失败: %w", loadErr)
@@ -198,7 +143,7 @@ func (n *AgentNodes) Execute(ctx context.Context, st *newagentmodel.AgentGraphSt
scheduleState = ss
}
// 注入工具 schema 到 ConversationContext LLM 看到可用工具列表
// 2. 把工具 schema 注入上下文,供 LLM 看到真实工具边界
if st.Deps.ToolRegistry != nil {
schemas := st.Deps.ToolRegistry.Schemas()
toolSchemas := make([]newagentmodel.ToolSchemaContext, len(schemas))
@@ -212,12 +157,10 @@ func (n *AgentNodes) Execute(ctx context.Context, st *newagentmodel.AgentGraphSt
st.EnsureConversationContext().SetToolSchemas(toolSchemas)
}
// 等待后记忆检索完成,注入最新记忆后再启动 Execute
// 3. 等待后记忆检索结果,再把最新结果注入上下文
ensureFreshMemory(st)
if err := RunExecuteNode(
ctx,
ExecuteNodeInput{
if err := RunExecuteNode(ctx, ExecuteNodeInput{
RuntimeState: st.EnsureRuntimeState(),
ConversationContext: st.EnsureConversationContext(),
UserInput: st.Request.UserInput,
@@ -232,8 +175,8 @@ func (n *AgentNodes) Execute(ctx context.Context, st *newagentmodel.AgentGraphSt
OriginalScheduleState: st.OriginalScheduleState,
AlwaysExecute: st.Request.AlwaysExecute,
ThinkingEnabled: st.Deps.ThinkingExecute,
},
); err != nil {
PersistVisibleMessage: st.Deps.PersistVisibleMessage,
}); err != nil {
return nil, err
}
@@ -241,12 +184,7 @@ func (n *AgentNodes) Execute(ctx context.Context, st *newagentmodel.AgentGraphSt
return st, nil
}
// OrderGuard 是顺序守卫阶段的正式节点方法
//
// 职责边界:
// 1. 只负责调用 RunOrderGuardNode 做 suggested 相对顺序校验;
// 2. 不负责交付文案生成,校验结果统一交给 Deliver 节点收口;
// 3. 节点执行后保存状态,保证异常中断后仍可复盘守卫结果。
// OrderGuard 负责把 graph 的 order_guard 节点请求转给 RunOrderGuardNode
func (n *AgentNodes) OrderGuard(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("order_guard node: state is nil")
@@ -260,38 +198,32 @@ func (n *AgentNodes) OrderGuard(ctx context.Context, st *newagentmodel.AgentGrap
return st, nil
}
// Deliver 是交付阶段的正式节点方法
//
// 职责边界:
// 1. 这里只做 graph -> node 的参数转接;
// 2. 真正的交付逻辑仍由 RunDeliverNode 负责;
// 3. 调 LLM 生成任务总结,失败时降级到机械格式化。
// 4. 任务完成后保存最终状态到 Redis2h TTL支持断线恢复和 MySQL outbox 异步持久化。
// Deliver 负责把 graph 的 deliver 节点请求转给 RunDeliverNode
func (n *AgentNodes) Deliver(ctx context.Context, st *newagentmodel.AgentGraphState) (*newagentmodel.AgentGraphState, error) {
if st == nil {
return nil, errors.New("deliver node: state is nil")
}
if err := RunDeliverNode(
ctx,
DeliverNodeInput{
// 1. Deliver 只做最终收口总结,不需要工具目录,避免无关能力信息污染总结。
st.EnsureConversationContext().SetToolSchemas(nil)
if err := RunDeliverNode(ctx, DeliverNodeInput{
RuntimeState: st.EnsureRuntimeState(),
ConversationContext: st.EnsureConversationContext(),
Client: st.Deps.ResolveDeliverClient(),
ChunkEmitter: st.EnsureChunkEmitter(),
ThinkingEnabled: st.Deps.ThinkingDeliver,
},
); err != nil {
CompactionStore: st.Deps.CompactionStore,
PersistVisibleMessage: st.Deps.PersistVisibleMessage,
}); err != nil {
return nil, err
}
// 任务完成后写排程预览缓存:只有走到 Deliver 才代表排程结果已稳定,
// 中断confirm/ask_user路径不写避免把中间态暴露给前端。
// 只有真正完成时才写入排程预览,避免中间态污染前端展示。
if st.Deps.WriteSchedulePreview != nil && st.ScheduleState != nil {
flowState := st.EnsureFlowState()
if flowState != nil && flowState.IsCompleted() {
if err := st.Deps.WriteSchedulePreview(ctx, st.ScheduleState, flowState.UserID, flowState.ConversationID, flowState.TaskClassIDs); err != nil {
// 写缓存失败不阻断主流程,降级为仅 log。
log.Printf("[WARN] deliver: 写入排程预览缓存失败 chat=%s: %v", flowState.ConversationID, err)
}
} else if flowState != nil {
@@ -303,19 +235,16 @@ func (n *AgentNodes) Deliver(ctx context.Context, st *newagentmodel.AgentGraphSt
return st, nil
}
// --- 记忆预取消费辅助 ---
// ensureFreshMemory 等待后台记忆检索完成,将最新结果注入 ConversationContext。
// ensureFreshMemory 等待后端记忆检索完成,并把最新结果写入 ConversationContext。
//
// 设计说明:
// 1. 只在首次调用时等待 channel最多 500ms后续调用直接跳过
// 2. 覆盖 ConversationContext 中已有的缓存记忆UpsertPinnedBlock 按 key 覆盖);
// 3. timeout 后保留缓存记忆不替换,保证 Execute ReAct 循环不会因超时丢失记忆。
// 1. 只在首次调用时等待 channel后续调用直接跳过。
// 2. 超时后保留原有上下文,不额外覆盖。
// 3. 记忆为空时也不做额外写入,避免污染 prompt。
func ensureFreshMemory(st *newagentmodel.AgentGraphState) {
if st == nil || st.Deps.MemoryConsumed || st.Deps.MemoryFuture == nil {
return
}
st.Deps.MemoryConsumed = true // 标记已消费,后续调用直接跳过
st.Deps.MemoryConsumed = true
select {
case content := <-st.Deps.MemoryFuture:
@@ -327,20 +256,11 @@ func ensureFreshMemory(st *newagentmodel.AgentGraphState) {
})
}
case <-time.After(newagentmodel.MemoryFreshTimeout):
// timeout保留 ConversationContext 中已有的缓存记忆,不做额外操作
// 超时后保留原有上下文即可。
}
}
// --- 持久化辅助 ---
// saveAgentState 在节点执行成功后,将当前运行态快照保存到 Redis。
//
// 设计原则:
// 1. Save 失败只记日志,不中断 Graph 流程;
// 2. StateStore 为空时静默跳过(骨架期 / 测试环境);
// 3. conversationID 为空时也静默跳过,避免写入无效 key。
//
// TODO: 接入项目统一的日志框架后,把 _ = err 改成结构化日志。
// saveAgentState 在节点成功执行后保存运行快照。
func saveAgentState(ctx context.Context, st *newagentmodel.AgentGraphState) {
if st == nil {
return
@@ -370,14 +290,7 @@ func saveAgentState(ctx context.Context, st *newagentmodel.AgentGraphState) {
_ = store.Save(ctx, flowState.ConversationID, snapshot)
}
// deleteAgentState 在任务完成后删除 Redis 中的运行快照。
//
// 设计原则:
// 1. Delete 失败只记日志,不中断 Graph 流程;
// 2. 删除是幂等的key 不存在也视为成功;
// 3. StateStore 为空时静默跳过。
//
// TODO: 接入项目统一的日志框架后,把 _ = err 改成结构化日志。
// deleteAgentState 在任务完成后删除运行快照。
func deleteAgentState(ctx context.Context, st *newagentmodel.AgentGraphState) {
if st == nil {
return

View File

@@ -51,6 +51,8 @@ type ChatNodeInput struct {
ConfirmAction string
Client *infrallm.Client
ChunkEmitter *newagentstream.ChunkEmitter
CompactionStore newagentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
}
// RunChatNode 执行一轮聊天节点逻辑。
@@ -94,6 +96,15 @@ func RunChatNode(ctx context.Context, input ChatNodeInput) error {
}
nonce := uuid.NewString()
messages := newagentprompt.BuildChatRoutingMessages(conversationContext, input.UserInput, flowState, nonce)
messages = compactUnifiedMessagesIfNeeded(ctx, messages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
FlowState: flowState,
Emitter: emitter,
StageName: chatStageName,
StatusBlockID: chatStatusBlockID,
})
logNodeLLMContext(chatStageName, "routing", flowState, messages)
reader, err := input.Client.Stream(ctx, messages, infrallm.GenerateOptions{
Temperature: 0.7,
@@ -281,7 +292,7 @@ func handleDirectReplyStream(
if effectiveThinking {
return handleThinkingReplyStream(ctx, reader, input, emitter, conversationContext, flowState)
}
return handleDirectReplyContinueStream(ctx, reader, emitter, conversationContext, flowState, firstVisible)
return handleDirectReplyContinueStream(ctx, reader, input, emitter, conversationContext, flowState, firstVisible)
}
// handleThinkingReplyStream 处理需要思考的回复:关闭路由流 → 第二次 thinking 流式调用。
@@ -295,7 +306,16 @@ func handleThinkingReplyStream(
) error {
_ = reader.Close()
deepMessages := newagentprompt.BuildDeepAnswerMessages(conversationContext, input.UserInput)
deepMessages := newagentprompt.BuildDeepAnswerMessages(flowState, conversationContext, input.UserInput)
deepMessages = compactUnifiedMessagesIfNeeded(ctx, deepMessages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
FlowState: flowState,
Emitter: emitter,
StageName: chatStageName,
StatusBlockID: chatStatusBlockID,
})
logNodeLLMContext(chatStageName, "direct_reply_thinking", flowState, deepMessages)
deepReader, err := input.Client.Stream(ctx, deepMessages, infrallm.GenerateOptions{
Temperature: 0.5,
MaxTokens: 2000,
@@ -322,6 +342,7 @@ func handleThinkingReplyStream(
deepText = strings.TrimSpace(deepText)
if deepText != "" {
conversationContext.AppendHistory(schema.AssistantMessage(deepText, nil))
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, schema.AssistantMessage(deepText, nil))
}
flowState.Phase = newagentmodel.PhaseChatting
@@ -332,6 +353,7 @@ func handleThinkingReplyStream(
func handleDirectReplyContinueStream(
ctx context.Context,
reader infrallm.StreamReader,
input ChatNodeInput,
emitter *newagentstream.ChunkEmitter,
conversationContext *newagentmodel.ConversationContext,
flowState *newagentmodel.CommonState,
@@ -370,7 +392,9 @@ func handleDirectReplyContinueStream(
text := fullText.String()
if strings.TrimSpace(text) != "" {
conversationContext.AppendHistory(schema.AssistantMessage(text, nil))
msg := schema.AssistantMessage(text, nil)
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
flowState.Phase = newagentmodel.PhaseChatting
@@ -568,7 +592,16 @@ func handleDeepAnswerStream(
if effectiveThinking {
thinkingOpt = infrallm.ThinkingModeEnabled
}
deepMessages := newagentprompt.BuildDeepAnswerMessages(conversationContext, input.UserInput)
deepMessages := newagentprompt.BuildDeepAnswerMessages(flowState, conversationContext, input.UserInput)
deepMessages = compactUnifiedMessagesIfNeeded(ctx, deepMessages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
FlowState: flowState,
Emitter: emitter,
StageName: chatStageName,
StatusBlockID: chatStatusBlockID,
})
logNodeLLMContext(chatStageName, "deep_answer", flowState, deepMessages)
deepReader, err := input.Client.Stream(ctx, deepMessages, infrallm.GenerateOptions{
Temperature: 0.5,
MaxTokens: 2000,
@@ -601,7 +634,9 @@ func handleDeepAnswerStream(
}
// 4. 完整回复写入 history。
conversationContext.AppendHistory(schema.AssistantMessage(deepText, nil))
msg := schema.AssistantMessage(deepText, nil)
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
flowState.Phase = newagentmodel.PhaseChatting
return nil

View File

@@ -33,6 +33,8 @@ type DeliverNodeInput struct {
Client *infrallm.Client
ChunkEmitter *newagentstream.ChunkEmitter
ThinkingEnabled bool // 是否开启 thinking由 config.yaml 的 agent.thinking.deliver 注入
CompactionStore newagentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
}
// RunDeliverNode 执行一轮交付节点逻辑。
@@ -65,10 +67,11 @@ func RunDeliverNode(ctx context.Context, input DeliverNodeInput) error {
}
// 2. 调 LLM 生成交付总结。
summary := generateDeliverSummary(ctx, input.Client, flowState, conversationContext, input.ThinkingEnabled)
summary := generateDeliverSummary(ctx, input.Client, flowState, conversationContext, input.ThinkingEnabled, input.CompactionStore, emitter)
// 3. 伪流式推送总结。
if strings.TrimSpace(summary) != "" {
msg := schema.AssistantMessage(summary, nil)
if err := emitter.EmitPseudoAssistantText(
ctx,
deliverSpeakBlockID,
@@ -78,7 +81,8 @@ func RunDeliverNode(ctx context.Context, input DeliverNodeInput) error {
); err != nil {
return fmt.Errorf("交付总结推送失败: %w", err)
}
conversationContext.AppendHistory(schema.AssistantMessage(summary, nil))
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
// 4. 推送最终完成状态。
@@ -100,6 +104,8 @@ func generateDeliverSummary(
flowState *newagentmodel.CommonState,
conversationContext *newagentmodel.ConversationContext,
thinkingEnabled bool,
compactionStore newagentmodel.CompactionStore,
emitter *newagentstream.ChunkEmitter,
) string {
if flowState != nil {
switch {
@@ -115,6 +121,15 @@ func generateDeliverSummary(
}
messages := newagentprompt.BuildDeliverMessages(flowState, conversationContext)
messages = compactUnifiedMessagesIfNeeded(ctx, messages, UnifiedCompactInput{
Client: client,
CompactionStore: compactionStore,
FlowState: flowState,
Emitter: emitter,
StageName: deliverStageName,
StatusBlockID: deliverStatusBlockID,
})
logNodeLLMContext(deliverStageName, "summarizing", flowState, messages)
result, err := client.GenerateText(
ctx,
messages,

View File

@@ -60,6 +60,7 @@ type ExecuteNodeInput struct {
OriginalScheduleState *schedule.ScheduleState
AlwaysExecute bool // true 时写工具跳过确认闸门直接执行
ThinkingEnabled bool // 是否开启 thinking由 config.yaml 的 agent.thinking.execute 注入
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
}
// ExecuteRoundObservation 记录执行阶段每轮的关键观察。
@@ -184,19 +185,16 @@ func RunExecuteNode(ctx context.Context, input ExecuteNodeInput) error {
messages := newagentprompt.BuildExecuteMessages(flowState, conversationContext)
// 5.1 Token 预算检查 & 上下文压缩。
messages = compactExecuteMessagesIfNeeded(
ctx, messages, input, flowState, emitter,
)
messages = compactUnifiedMessagesIfNeeded(ctx, messages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
FlowState: flowState,
Emitter: emitter,
StageName: executeStageName,
StatusBlockID: executeStatusBlockID,
})
log.Printf(
"[DEBUG] execute LLM context begin chat=%s round=%d message_count=%d\n%s\n[DEBUG] execute LLM context end chat=%s round=%d",
flowState.ConversationID,
flowState.RoundUsed,
len(messages),
formatExecuteLLMMessagesForDebug(messages),
flowState.ConversationID,
flowState.RoundUsed,
)
logNodeLLMContext(executeStageName, "decision", flowState, messages)
decision, rawResult, err := infrallm.GenerateJSON[newagentmodel.ExecuteDecision](
ctx,
input.Client,
@@ -337,6 +335,7 @@ func RunExecuteNode(ctx context.Context, input ExecuteNodeInput) error {
if !isConfirmWithCard && !isAskUser && !isAbort {
// 推流给前端
msg := schema.AssistantMessage(speakText, nil)
if err := emitter.EmitPseudoAssistantText(
ctx,
executeSpeakBlockID,
@@ -346,6 +345,7 @@ func RunExecuteNode(ctx context.Context, input ExecuteNodeInput) error {
); err != nil {
return fmt.Errorf("执行文案推送失败: %w", err)
}
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
// 1. confirm / ask_user 的 speak 仍要写入历史,避免下一轮 LLM 丢失自己的执行上下文。
// 2. abort 不在这里写历史,避免先输出中间 speak再在 deliver 收到第二份终止文案。
@@ -1674,79 +1674,3 @@ func flattenForLog(text string) string {
text = strings.ReplaceAll(text, "\r", " ")
return strings.TrimSpace(text)
}
// formatExecuteLLMMessagesForDebug 将本轮送入 LLM 的完整消息上下文展开成可读多行日志。
//
// 说明:
// 1. 按消息索引逐条输出,便于和上游上下文构造步骤逐项对齐;
// 2. 完整输出 content / reasoning_content / tool_calls / extra不做截断
// 3. 仅用于调试打点,不参与业务决策。
func formatExecuteLLMMessagesForDebug(messages []*schema.Message) string {
if len(messages) == 0 {
return "(empty messages)"
}
var sb strings.Builder
for i, msg := range messages {
sb.WriteString(fmt.Sprintf("----- message[%d] -----\n", i))
if msg == nil {
sb.WriteString("role: <nil>\n\n")
continue
}
sb.WriteString(fmt.Sprintf("role: %s\n", msg.Role))
if strings.TrimSpace(msg.ToolCallID) != "" {
sb.WriteString(fmt.Sprintf("tool_call_id: %s\n", msg.ToolCallID))
}
if strings.TrimSpace(msg.ToolName) != "" {
sb.WriteString(fmt.Sprintf("tool_name: %s\n", msg.ToolName))
}
if len(msg.ToolCalls) > 0 {
sb.WriteString("tool_calls:\n")
for j, call := range msg.ToolCalls {
sb.WriteString(fmt.Sprintf(" - [%d] id=%s type=%s function=%s\n", j, call.ID, call.Type, call.Function.Name))
sb.WriteString(" arguments:\n")
sb.WriteString(indentMultilineForDebug(call.Function.Arguments, " "))
sb.WriteString("\n")
}
}
if strings.TrimSpace(msg.ReasoningContent) != "" {
sb.WriteString("reasoning_content:\n")
sb.WriteString(indentMultilineForDebug(msg.ReasoningContent, " "))
sb.WriteString("\n")
}
sb.WriteString("content:\n")
sb.WriteString(indentMultilineForDebug(msg.Content, " "))
sb.WriteString("\n")
if len(msg.Extra) > 0 {
sb.WriteString("extra:\n")
raw, err := json.MarshalIndent(msg.Extra, "", " ")
if err != nil {
sb.WriteString(indentMultilineForDebug("<marshal_error>", " "))
} else {
sb.WriteString(indentMultilineForDebug(string(raw), " "))
}
sb.WriteString("\n")
}
sb.WriteString("\n")
}
return sb.String()
}
// indentMultilineForDebug 为多行文本统一添加前缀缩进,避免日志折行后难以阅读。
func indentMultilineForDebug(text, prefix string) string {
if text == "" {
return prefix + "<empty>"
}
lines := strings.Split(text, "\n")
for i := range lines {
lines[i] = prefix + lines[i]
}
return strings.Join(lines, "\n")
}

View File

@@ -1,197 +0,0 @@
package newagentnode
import (
"context"
"encoding/json"
"fmt"
"log"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
"github.com/LoveLosita/smartflow/backend/pkg"
"github.com/cloudwego/eino/schema"
)
// compactExecuteMessagesIfNeeded 检查 Execute prompt 的 token 预算,
// 超限时对 msg1历史对话和 msg2ReAct Loop执行 LLM 压缩。
//
// 消息布局约定(由 BuildExecuteMessages 返回):
//
// [0] system — msg0: 系统规则
// [1] assistant — msg1: 历史对话上下文
// [2] assistant — msg2: 当轮 ReAct Loop 记录
// [3] system — msg3: 当前状态 + 用户提示
func compactExecuteMessagesIfNeeded(
ctx context.Context,
messages []*schema.Message,
input ExecuteNodeInput,
flowState *newagentmodel.CommonState,
emitter *newagentstream.ChunkEmitter,
) []*schema.Message {
if len(messages) != 4 {
return messages
}
// 提取四条消息的文本内容
msg0 := messages[0].Content
msg1 := messages[1].Content
msg2 := messages[2].Content
msg3 := messages[3].Content
// Token 预算检查
breakdown, overBudget, needCompactMsg1, needCompactMsg2 := pkg.CheckExecuteTokenBudget(msg0, msg1, msg2, msg3)
log.Printf(
"[COMPACT] token budget check: total=%d budget=%d over=%v compactMsg1=%v compactMsg2=%v (msg0=%d msg1=%d msg2=%d msg3=%d)",
breakdown.Total, breakdown.Budget, overBudget, needCompactMsg1, needCompactMsg2,
breakdown.Msg0, breakdown.Msg1, breakdown.Msg2, breakdown.Msg3,
)
if !overBudget {
// 未超限,记录 token 分布后直接返回
saveTokenStats(ctx, input, flowState, breakdown)
return messages
}
// ---- msg1 压缩 ----
if needCompactMsg1 {
msg1 = compactMsg1IfNeeded(ctx, input, flowState, emitter, msg1)
messages[1].Content = msg1
// 压缩 msg1 后重算预算
breakdown = pkg.EstimateExecuteMessagesTokens(msg0, msg1, msg2, msg3)
}
// ---- msg2 压缩 ----
if needCompactMsg2 || breakdown.Total > pkg.ExecuteTokenBudget {
msg2 = compactMsg2IfNeeded(ctx, input, flowState, emitter, msg2)
messages[2].Content = msg2
breakdown = pkg.EstimateExecuteMessagesTokens(msg0, msg1, msg2, msg3)
}
// 记录最终 token 分布
saveTokenStats(ctx, input, flowState, breakdown)
log.Printf(
"[COMPACT] after compaction: total=%d budget=%d (msg0=%d msg1=%d msg2=%d msg3=%d)",
breakdown.Total, breakdown.Budget,
breakdown.Msg0, breakdown.Msg1, breakdown.Msg2, breakdown.Msg3,
)
return messages
}
// compactMsg1IfNeeded 对 msg1历史对话执行 LLM 压缩。
func compactMsg1IfNeeded(
ctx context.Context,
input ExecuteNodeInput,
flowState *newagentmodel.CommonState,
emitter *newagentstream.ChunkEmitter,
msg1 string,
) string {
compactionStore := input.CompactionStore
if compactionStore == nil {
log.Printf("[COMPACT] CompactionStore is nil, skip msg1 compaction")
return msg1
}
// 加载已有压缩摘要
existingSummary, _, err := compactionStore.LoadCompaction(ctx, flowState.UserID, flowState.ConversationID)
if err != nil {
log.Printf("[COMPACT] load existing compaction failed: %v, proceed without cache", err)
}
// SSE: 压缩开始
tokenBefore := pkg.EstimateTextTokens(msg1)
_ = emitter.EmitStatus(
executeStatusBlockID, "compact_msg1", "context_compact_start",
fmt.Sprintf("正在压缩对话历史(%d tokens...", tokenBefore),
false,
)
// 调用 LLM 压缩
newSummary, err := newagentprompt.CompactMsg1(ctx, input.Client, msg1, existingSummary)
if err != nil {
log.Printf("[COMPACT] compact msg1 failed: %v", err)
_ = emitter.EmitStatus(
executeStatusBlockID, "compact_msg1", "context_compact_done",
"对话历史压缩失败,使用原始文本",
false,
)
return msg1
}
// SSE: 压缩完成
tokenAfter := pkg.EstimateTextTokens(newSummary)
_ = emitter.EmitStatus(
executeStatusBlockID, "compact_msg1", "context_compact_done",
fmt.Sprintf("对话历史已压缩:%d → %d tokens", tokenBefore, tokenAfter),
false,
)
// 持久化压缩结果
if err := compactionStore.SaveCompaction(ctx, flowState.UserID, flowState.ConversationID, newSummary, flowState.RoundUsed); err != nil {
log.Printf("[COMPACT] save compaction failed: %v", err)
}
return newSummary
}
// compactMsg2IfNeeded 对 msg2ReAct Loop 记录)执行 LLM 压缩。
func compactMsg2IfNeeded(
ctx context.Context,
input ExecuteNodeInput,
flowState *newagentmodel.CommonState,
emitter *newagentstream.ChunkEmitter,
msg2 string,
) string {
// SSE: 压缩开始
tokenBefore := pkg.EstimateTextTokens(msg2)
_ = emitter.EmitStatus(
executeStatusBlockID, "compact_msg2", "context_compact_start",
fmt.Sprintf("正在压缩执行记录(%d tokens...", tokenBefore),
false,
)
// 调用 LLM 压缩
compressed, err := newagentprompt.CompactMsg2(ctx, input.Client, msg2)
if err != nil {
log.Printf("[COMPACT] compact msg2 failed: %v", err)
_ = emitter.EmitStatus(
executeStatusBlockID, "compact_msg2", "context_compact_done",
"执行记录压缩失败,使用原始文本",
false,
)
return msg2
}
// SSE: 压缩完成
tokenAfter := pkg.EstimateTextTokens(compressed)
_ = emitter.EmitStatus(
executeStatusBlockID, "compact_msg2", "context_compact_done",
fmt.Sprintf("执行记录已压缩:%d → %d tokens", tokenBefore, tokenAfter),
false,
)
return compressed
}
// saveTokenStats 持久化当前 token 分布到 DB。
func saveTokenStats(
ctx context.Context,
input ExecuteNodeInput,
flowState *newagentmodel.CommonState,
breakdown pkg.ExecuteTokenBreakdown,
) {
compactionStore := input.CompactionStore
if compactionStore == nil {
return
}
statsJSON, err := json.Marshal(breakdown)
if err != nil {
log.Printf("[COMPACT] marshal token stats failed: %v", err)
return
}
if err := compactionStore.SaveContextTokenStats(ctx, flowState.UserID, flowState.ConversationID, string(statsJSON)); err != nil {
log.Printf("[COMPACT] save token stats failed: %v", err)
}
}

View File

@@ -27,6 +27,7 @@ type InterruptNodeInput struct {
RuntimeState *newagentmodel.AgentRuntimeState
ConversationContext *newagentmodel.ConversationContext
ChunkEmitter *newagentstream.ChunkEmitter
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
}
// RunInterruptNode 执行一轮中断节点逻辑。
@@ -55,7 +56,7 @@ func RunInterruptNode(ctx context.Context, input InterruptNodeInput) error {
switch pending.Type {
case newagentmodel.PendingInteractionTypeAskUser:
return handleInterruptAskUser(ctx, pending, conversationContext, emitter)
return handleInterruptAskUser(ctx, runtimeState, input.PersistVisibleMessage, pending, conversationContext, emitter)
case newagentmodel.PendingInteractionTypeConfirm:
return handleInterruptConfirm(pending, emitter)
default:
@@ -70,6 +71,8 @@ func RunInterruptNode(ctx context.Context, input InterruptNodeInput) error {
// 写入历史,然后结束。用户体验和正常对话一样 — 助手问了问题,停下来等回复。
func handleInterruptAskUser(
ctx context.Context,
runtimeState *newagentmodel.AgentRuntimeState,
persist newagentmodel.PersistVisibleMessageFunc,
pending *newagentmodel.PendingInteraction,
conversationContext *newagentmodel.ConversationContext,
emitter *newagentstream.ChunkEmitter,
@@ -89,7 +92,9 @@ func handleInterruptAskUser(
}
// 写入对话历史,下一轮 resume 时 LLM 能看到这个上下文。
conversationContext.AppendHistory(schema.AssistantMessage(text, nil))
msg := schema.AssistantMessage(text, nil)
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, persist, runtimeState.EnsureCommonState(), msg)
// 状态持久化已由 agent_nodes 层统一处理,此处不再需要自行存快照。

View File

@@ -0,0 +1,121 @@
package newagentnode
import (
"encoding/json"
"fmt"
"log"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
"github.com/cloudwego/eino/schema"
)
// logNodeLLMContext 将某个节点即将送入 LLM 的完整消息上下文按统一格式打印到日志。
//
// 步骤化说明:
// 1. 统一输出 stage / phase / chat / round方便按一次请求内的多次 LLM 调用串联排查;
// 2. 完整展开 messages不做截断保证问题复现时能直接对照 prompt 组装结果;
// 3. 该函数只负责调试日志,不参与任何业务判断,也不修改上下文内容。
func logNodeLLMContext(
stage string,
phase string,
flowState *newagentmodel.CommonState,
messages []*schema.Message,
) {
chatID := ""
roundUsed := 0
if flowState != nil {
chatID = flowState.ConversationID
roundUsed = flowState.RoundUsed
}
log.Printf(
"[DEBUG] %s LLM context begin phase=%s chat=%s round=%d message_count=%d\n%s\n[DEBUG] %s LLM context end phase=%s chat=%s round=%d",
stage,
strings.TrimSpace(phase),
chatID,
roundUsed,
len(messages),
formatLLMMessagesForDebug(messages),
stage,
strings.TrimSpace(phase),
chatID,
roundUsed,
)
}
// formatLLMMessagesForDebug 将本轮送入 LLM 的完整消息上下文展开成可读多行日志。
//
// 说明:
// 1. 按消息索引逐条输出,便于和上游上下文构造步骤逐项对齐;
// 2. 完整输出 content / reasoning_content / tool_calls / extra不做截断
// 3. 仅用于调试打点,不参与业务决策。
func formatLLMMessagesForDebug(messages []*schema.Message) string {
if len(messages) == 0 {
return "(empty messages)"
}
var sb strings.Builder
for i, msg := range messages {
sb.WriteString(fmt.Sprintf("----- message[%d] -----\n", i))
if msg == nil {
sb.WriteString("role: <nil>\n\n")
continue
}
sb.WriteString(fmt.Sprintf("role: %s\n", msg.Role))
if strings.TrimSpace(msg.ToolCallID) != "" {
sb.WriteString(fmt.Sprintf("tool_call_id: %s\n", msg.ToolCallID))
}
if strings.TrimSpace(msg.ToolName) != "" {
sb.WriteString(fmt.Sprintf("tool_name: %s\n", msg.ToolName))
}
if len(msg.ToolCalls) > 0 {
sb.WriteString("tool_calls:\n")
for j, call := range msg.ToolCalls {
sb.WriteString(fmt.Sprintf(" - [%d] id=%s type=%s function=%s\n", j, call.ID, call.Type, call.Function.Name))
sb.WriteString(" arguments:\n")
sb.WriteString(indentMultilineForDebug(call.Function.Arguments, " "))
sb.WriteString("\n")
}
}
if strings.TrimSpace(msg.ReasoningContent) != "" {
sb.WriteString("reasoning_content:\n")
sb.WriteString(indentMultilineForDebug(msg.ReasoningContent, " "))
sb.WriteString("\n")
}
sb.WriteString("content:\n")
sb.WriteString(indentMultilineForDebug(msg.Content, " "))
sb.WriteString("\n")
if len(msg.Extra) > 0 {
sb.WriteString("extra:\n")
raw, err := json.MarshalIndent(msg.Extra, "", " ")
if err != nil {
sb.WriteString(indentMultilineForDebug("<marshal_error>", " "))
} else {
sb.WriteString(indentMultilineForDebug(string(raw), " "))
}
sb.WriteString("\n")
}
sb.WriteString("\n")
}
return sb.String()
}
// indentMultilineForDebug 为多行文本统一添加前缀缩进,避免日志折行后难以阅读。
func indentMultilineForDebug(text, prefix string) string {
if text == "" {
return prefix + "<empty>"
}
lines := strings.Split(text, "\n")
for i := range lines {
lines[i] = prefix + lines[i]
}
return strings.Join(lines, "\n")
}

View File

@@ -36,6 +36,8 @@ type PlanNodeInput struct {
ResumeNode string
AlwaysExecute bool // true 时计划生成后自动确认,不进入 confirm 节点
ThinkingEnabled bool // 是否开启 thinking由 config.yaml 的 agent.thinking.plan 注入
CompactionStore newagentmodel.CompactionStore // 上下文压缩持久化
PersistVisibleMessage newagentmodel.PersistVisibleMessageFunc
}
// RunPlanNode 执行一轮规划节点逻辑。
@@ -68,6 +70,15 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
// 2. 构造本轮规划输入。
messages := newagentprompt.BuildPlanMessages(flowState, conversationContext, input.UserInput)
messages = compactUnifiedMessagesIfNeeded(ctx, messages, UnifiedCompactInput{
Client: input.Client,
CompactionStore: input.CompactionStore,
FlowState: flowState,
Emitter: emitter,
StageName: planStageName,
StatusBlockID: planStatusBlockID,
})
logNodeLLMContext(planStageName, "planning", flowState, messages)
// 3. 单轮深度规划:由配置决定是否开启 thinking不做 token 上限约束。
decision, rawResult, err := infrallm.GenerateJSON[newagentmodel.PlanDecision](
@@ -95,6 +106,7 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
// 4. 若模型先对用户说了话,且不是 ask_userask_user 交给 interrupt 收口),则先以伪流式推送,再写回 history。
if strings.TrimSpace(decision.Speak) != "" && decision.Action != newagentmodel.PlanActionAskUser {
msg := schema.AssistantMessage(decision.Speak, nil)
if err := emitter.EmitPseudoAssistantText(
ctx,
planSpeakBlockID,
@@ -104,7 +116,8 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
); err != nil {
return fmt.Errorf("规划文案推送失败: %w", err)
}
conversationContext.AppendHistory(schema.AssistantMessage(decision.Speak, nil))
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
// 5. 按规划动作推进流程状态。
@@ -139,6 +152,7 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
// 3. 推流后同步写入历史,确保后续 Execute 阶段的上下文也能看到这份计划。
summary := strings.TrimSpace(buildPlanSummary(decision.PlanSteps))
if summary != "" {
msg := schema.AssistantMessage(summary, nil)
if err := emitter.EmitPseudoAssistantText(
ctx,
planSummaryBlockID,
@@ -148,7 +162,8 @@ func RunPlanNode(ctx context.Context, input PlanNodeInput) error {
); err != nil {
return fmt.Errorf("自动执行前计划摘要推送失败: %w", err)
}
conversationContext.AppendHistory(schema.AssistantMessage(summary, nil))
conversationContext.AppendHistory(msg)
persistVisibleAssistantMessage(ctx, input.PersistVisibleMessage, flowState, msg)
}
flowState.ConfirmPlan()

View File

@@ -0,0 +1,301 @@
package newagentnode
import (
"context"
"encoding/json"
"fmt"
"log"
infrallm "github.com/LoveLosita/smartflow/backend/infra/llm"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagentprompt "github.com/LoveLosita/smartflow/backend/newAgent/prompt"
newagentstream "github.com/LoveLosita/smartflow/backend/newAgent/stream"
"github.com/LoveLosita/smartflow/backend/pkg"
"github.com/cloudwego/eino/schema"
)
// UnifiedCompactInput 是统一压缩入口的参数。
//
// 设计说明:
// 1. 从 ExecuteNodeInput 中提取压缩所需的公共字段,消除对 Execute 的直接依赖;
// 2. 各节点Plan/Chat/Deliver构造此参数时从自己的 NodeInput 中提取对应字段;
// 3. StageName 和 StatusBlockID 用于区分日志来源和 SSE 状态推送。
type UnifiedCompactInput struct {
// Client 用于调用 LLM 压缩 msg1/msg2。
Client *infrallm.Client
// CompactionStore 用于持久化压缩摘要和 token 统计,为 nil 时跳过持久化。
CompactionStore newagentmodel.CompactionStore
// FlowState 提供 userID / chatID / roundUsed 等定位信息。
FlowState *newagentmodel.CommonState
// Emitter 用于推送压缩进度 SSE 事件。
Emitter *newagentstream.ChunkEmitter
// StageName 标识当前阶段(如 "execute"/"plan"/"chat"/"deliver"),用于日志和缓存 key。
StageName string
// StatusBlockID 是 SSE 状态推送的 block ID各节点使用自己的 block ID。
StatusBlockID string
}
// compactUnifiedMessagesIfNeeded 检查统一消息结构的 token 预算,
// 超限时对 msg1历史对话和 msg2阶段工作区执行 LLM 压缩。
//
// 消息布局约定(由 buildUnifiedStageMessages 返回):
//
// [0] system — msg0: 系统规则 + 工具简表
// [1] assistant — msg1: 历史对话上下文
// [2] assistant — msg2: 阶段工作区Execute=ReAct Loop其余="暂无"
// [3] system — msg3: 阶段状态 + 记忆 + 指令
//
// 压缩策略:
// 1. msg1 超过可用预算一半时触发 LLM 压缩(合并已有摘要 + 新内容);
// 2. msg1 压缩后仍超限,则对 msg2 也做 LLM 压缩;
// 3. 压缩结果持久化到 CompactionStore下一轮可复用摘要避免重复计算。
func compactUnifiedMessagesIfNeeded(
ctx context.Context,
messages []*schema.Message,
input UnifiedCompactInput,
) []*schema.Message {
if input.FlowState == nil {
log.Printf("[COMPACT:%s] FlowState is nil, skip token stats refresh", input.StageName)
return messages
}
// 1. 非严格 4 段式时,退化成按角色汇总的统计,确保 context_token_stats 仍然刷新。
if len(messages) != 4 {
breakdown := estimateFallbackStageTokenBreakdown(messages)
log.Printf(
"[COMPACT:%s] fallback token stats refresh: total=%d budget=%d count=%d (msg0=%d msg1=%d msg2=%d msg3=%d)",
input.StageName, breakdown.Total, breakdown.Budget, len(messages),
breakdown.Msg0, breakdown.Msg1, breakdown.Msg2, breakdown.Msg3,
)
saveUnifiedTokenStats(ctx, input, breakdown)
return messages
}
// 2. 提取四条消息的文本内容。
msg0 := messages[0].Content
msg1 := messages[1].Content
msg2 := messages[2].Content
msg3 := messages[3].Content
// 3. Token 预算检查。
breakdown, overBudget, needCompactMsg1, needCompactMsg2 := pkg.CheckStageTokenBudget(msg0, msg1, msg2, msg3)
log.Printf(
"[COMPACT:%s] token budget check: total=%d budget=%d over=%v compactMsg1=%v compactMsg2=%v (msg0=%d msg1=%d msg2=%d msg3=%d)",
input.StageName, breakdown.Total, breakdown.Budget, overBudget, needCompactMsg1, needCompactMsg2,
breakdown.Msg0, breakdown.Msg1, breakdown.Msg2, breakdown.Msg3,
)
if !overBudget {
// 4. 未超限,记录 token 分布后直接返回。
saveUnifiedTokenStats(ctx, input, breakdown)
return messages
}
// 5. msg1 压缩(历史对话 → LLM 摘要)。
if needCompactMsg1 {
msg1 = compactUnifiedMsg1(ctx, input, msg1)
messages[1].Content = msg1
// 压缩 msg1 后重算预算。
breakdown = pkg.EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
}
// 6. msg2 压缩(阶段工作区 → LLM 摘要)。
if needCompactMsg2 || breakdown.Total > pkg.StageTokenBudget {
msg2 = compactUnifiedMsg2(ctx, input, msg2)
messages[2].Content = msg2
breakdown = pkg.EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
}
// 7. 记录最终 token 分布。
saveUnifiedTokenStats(ctx, input, breakdown)
log.Printf(
"[COMPACT:%s] after compaction: total=%d budget=%d (msg0=%d msg1=%d msg2=%d msg3=%d)",
input.StageName, breakdown.Total, breakdown.Budget,
breakdown.Msg0, breakdown.Msg1, breakdown.Msg2, breakdown.Msg3,
)
return messages
}
// estimateFallbackStageTokenBreakdown 在非统一 4 段式场景下按消息角色做近似统计。
//
// 步骤说明:
// 1. 先按消息类型汇总 token保证总量准确
// 2. 再把最后一个 user 消息尽量视作 msg3保留阶段指令语义
// 3. 其他历史内容归入 msg1 / msg2确保上下文统计不会因为结构不标准而断更。
func estimateFallbackStageTokenBreakdown(messages []*schema.Message) pkg.StageTokenBreakdown {
breakdown := pkg.StageTokenBreakdown{Budget: pkg.StageTokenBudget}
if len(messages) == 0 {
return breakdown
}
lastUserIndex := -1
for i := len(messages) - 1; i >= 0; i-- {
msg := messages[i]
if msg == nil {
continue
}
if msg.Role == schema.User {
lastUserIndex = i
break
}
}
for i, msg := range messages {
if msg == nil {
continue
}
tokens := pkg.EstimateMessageTokens(msg)
breakdown.Total += tokens
switch msg.Role {
case schema.System:
breakdown.Msg0 += tokens
case schema.User:
if i == lastUserIndex {
breakdown.Msg3 += tokens
} else {
breakdown.Msg1 += tokens
}
case schema.Tool:
breakdown.Msg2 += tokens
case schema.Assistant:
if len(msg.ToolCalls) > 0 {
breakdown.Msg2 += tokens
} else {
breakdown.Msg1 += tokens
}
default:
breakdown.Msg1 += tokens
}
}
return breakdown
}
// compactUnifiedMsg1 对 msg1历史对话执行 LLM 压缩。
//
// 步骤化说明:
// 1. CompactionStore 为 nil 时跳过(测试环境 / 骨架期);
// 2. 先加载该阶段已有的压缩摘要,与当前 msg1 合并后调 LLM 压缩;
// 3. 压缩失败时降级为原始文本,不中断主流程;
// 4. 压缩成功后持久化新摘要,供下一轮复用。
func compactUnifiedMsg1(
ctx context.Context,
input UnifiedCompactInput,
msg1 string,
) string {
// 1. CompactionStore 为 nil 时无法加载/保存摘要,跳过压缩。
if input.CompactionStore == nil {
log.Printf("[COMPACT:%s] CompactionStore is nil, skip msg1 compaction", input.StageName)
return msg1
}
// 2. 加载该阶段已有的压缩摘要(可能为空)。
existingSummary, _, err := input.CompactionStore.LoadStageCompaction(ctx, input.FlowState.UserID, input.FlowState.ConversationID, input.StageName)
if err != nil {
log.Printf("[COMPACT:%s] load existing compaction failed: %v, proceed without cache", input.StageName, err)
}
// 3. SSE: 压缩开始。
tokenBefore := pkg.EstimateTextTokens(msg1)
_ = input.Emitter.EmitStatus(
input.StatusBlockID, input.StageName, "context_compact_start",
fmt.Sprintf("正在压缩对话历史(%d tokens...", tokenBefore),
false,
)
// 4. 调用 LLM 压缩:将 msg1 全文 + 已有摘要合并为一份紧凑摘要。
newSummary, err := newagentprompt.CompactMsg1(ctx, input.Client, msg1, existingSummary)
if err != nil {
log.Printf("[COMPACT:%s] compact msg1 failed: %v", input.StageName, err)
_ = input.Emitter.EmitStatus(
input.StatusBlockID, input.StageName, "context_compact_done",
"对话历史压缩失败,使用原始文本",
false,
)
return msg1
}
// 5. SSE: 压缩完成。
tokenAfter := pkg.EstimateTextTokens(newSummary)
_ = input.Emitter.EmitStatus(
input.StatusBlockID, input.StageName, "context_compact_done",
fmt.Sprintf("对话历史已压缩:%d → %d tokens", tokenBefore, tokenAfter),
false,
)
// 6. 持久化压缩结果,下一轮可直接复用摘要。
if err := input.CompactionStore.SaveStageCompaction(ctx, input.FlowState.UserID, input.FlowState.ConversationID, input.StageName, newSummary, input.FlowState.RoundUsed); err != nil {
log.Printf("[COMPACT:%s] save compaction failed: %v", input.StageName, err)
}
return newSummary
}
// compactUnifiedMsg2 对 msg2阶段工作区执行 LLM 压缩。
//
// 步骤化说明:
// 1. 非 Execute 阶段的 msg2 通常是"暂无",压缩无意义但不会出错;
// 2. Execute 阶段的 msg2 包含 ReAct loop 记录,压缩可显著节省 token
// 3. 压缩失败时降级为原始文本,不中断主流程。
func compactUnifiedMsg2(
ctx context.Context,
input UnifiedCompactInput,
msg2 string,
) string {
// 1. SSE: 压缩开始。
tokenBefore := pkg.EstimateTextTokens(msg2)
_ = input.Emitter.EmitStatus(
input.StatusBlockID, input.StageName, "context_compact_start",
fmt.Sprintf("正在压缩执行记录(%d tokens...", tokenBefore),
false,
)
// 2. 调用 LLM 压缩。
compressed, err := newagentprompt.CompactMsg2(ctx, input.Client, msg2)
if err != nil {
log.Printf("[COMPACT:%s] compact msg2 failed: %v", input.StageName, err)
_ = input.Emitter.EmitStatus(
input.StatusBlockID, input.StageName, "context_compact_done",
"执行记录压缩失败,使用原始文本",
false,
)
return msg2
}
// 3. SSE: 压缩完成。
tokenAfter := pkg.EstimateTextTokens(compressed)
_ = input.Emitter.EmitStatus(
input.StatusBlockID, input.StageName, "context_compact_done",
fmt.Sprintf("执行记录已压缩:%d → %d tokens", tokenBefore, tokenAfter),
false,
)
return compressed
}
// saveUnifiedTokenStats 持久化当前 token 分布到 DB。
//
// 步骤化说明:
// 1. CompactionStore 为 nil 时跳过(测试环境 / 骨架期);
// 2. 序列化失败只记日志,不中断主流程;
// 3. 写入失败只记日志,不中断主流程。
func saveUnifiedTokenStats(
ctx context.Context,
input UnifiedCompactInput,
breakdown pkg.StageTokenBreakdown,
) {
if input.CompactionStore == nil || input.FlowState == nil {
return
}
statsJSON, err := json.Marshal(breakdown)
if err != nil {
log.Printf("[COMPACT:%s] marshal token stats failed: %v", input.StageName, err)
return
}
if err := input.CompactionStore.SaveContextTokenStats(ctx, input.FlowState.UserID, input.FlowState.ConversationID, string(statsJSON)); err != nil {
log.Printf("[COMPACT:%s] save token stats failed: %v", input.StageName, err)
}
}

View File

@@ -0,0 +1,37 @@
package newagentnode
import (
"context"
"log"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
"github.com/cloudwego/eino/schema"
)
// persistVisibleAssistantMessage 负责把“真正要展示给用户”的 assistant 文本交给 service 层持久化。
//
// 职责边界:
// 1. 只处理可见的 assistant 消息,不处理内部纠错提示、工具调用结果和纯状态文案;
// 2. 持久化失败只记日志,不反向中断节点主流程,避免“已经对外输出但后端补写失败”时把用户请求打断;
// 3. 具体的 Redis / MySQL / 乐观缓存写入由 service 回调统一完成。
func persistVisibleAssistantMessage(
ctx context.Context,
persist newagentmodel.PersistVisibleMessageFunc,
state *newagentmodel.CommonState,
msg *schema.Message,
) {
if persist == nil || state == nil || msg == nil {
return
}
role := strings.TrimSpace(string(msg.Role))
content := strings.TrimSpace(msg.Content)
if role != string(schema.Assistant) || content == "" {
return
}
if err := persist(ctx, state, msg); err != nil {
log.Printf("[WARN] persist visible assistant message failed chat=%s phase=%s err=%v", state.ConversationID, state.Phase, err)
}
}

View File

@@ -25,7 +25,7 @@ const chatRoutingSystemPrompt = `
- route=direct_reply 时,控制码后的可见内容应直接回应用户问题,而不是先讲能力边界。
- route=deep_answer 时,只输出控制码即可,不要补“让我想想”“这是个好问题”之类的占位话术。
粗排判断:当用户意图包含"批量安排/排课/把任务类排进日程",且上下文中有任务类 ID 时,设置 rough_build=true。
粗排判断:当用户意图包含"批量安排/排课/把任务类排进日程"等批量调度需求时,设置 rough_build=true;后端会结合真实请求范围决定是否真正进入粗排
二次粗排约束(强约束):
- 若上下文已出现 rough_build_done且用户未明确要求"重新粗排/从头重排",必须设置 rough_build=false。
- "移动/微调/优化/均匀化/调顺序"等请求默认视为 refine不得再次触发 rough build。
@@ -83,40 +83,25 @@ func BuildChatRoutingSystemPrompt() string {
// BuildChatRoutingMessages 组装路由阶段的 messages。
func BuildChatRoutingMessages(ctx *newagentmodel.ConversationContext, userInput string, state *newagentmodel.CommonState, nonce string) []*schema.Message {
return buildStageMessages(
BuildChatRoutingSystemPrompt(),
return buildUnifiedStageMessages(
ctx,
BuildChatRoutingUserPrompt(ctx, userInput, state, nonce),
StageMessagesConfig{
SystemPrompt: BuildChatRoutingSystemPrompt(),
Msg1Content: buildChatConversationMessage(ctx),
Msg2Content: buildChatRoutingWorkspace(ctx),
Msg3Suffix: BuildChatRoutingUserPrompt(userInput, nonce),
Msg3Role: schema.User,
},
)
}
// BuildChatRoutingUserPrompt 构造路由阶段的用户提示词。
func BuildChatRoutingUserPrompt(ctx *newagentmodel.ConversationContext, userInput string, state *newagentmodel.CommonState, nonce string) string {
func BuildChatRoutingUserPrompt(userInput string, nonce string) string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("nonce=%s\n", nonce))
sb.WriteString(fmt.Sprintf("当前时间=%s\n", time.Now().In(time.Local).Format("2006-01-02 15:04")))
sb.WriteString("\n请判断用户本轮意图的复杂度,选择最合适的路由,并输出控制码和对应内容。\n")
// 注入任务类上下文(供粗排判断参考)。
if state != nil && len(state.TaskClassIDs) > 0 {
parts := make([]string, len(state.TaskClassIDs))
for i, id := range state.TaskClassIDs {
parts[i] = fmt.Sprintf("%d", id)
}
sb.WriteString(fmt.Sprintf("\n本次请求涉及的任务类 ID[%s]\n", strings.Join(parts, ", ")))
}
if state != nil && len(state.TaskClasses) > 0 {
sb.WriteString("任务类约束:\n")
for _, tc := range state.TaskClasses {
line := fmt.Sprintf("- [ID=%d] %s策略=%s总时段预算=%d", tc.ID, tc.Name, tc.Strategy, tc.TotalSlots)
if tc.StartDate != "" || tc.EndDate != "" {
line += fmt.Sprintf(",日期范围=%s ~ %s", tc.StartDate, tc.EndDate)
}
sb.WriteString(line + "\n")
}
}
sb.WriteString("\n请基于最近真实对话和本轮输入选择最合适的路由,并严格按系统约定输出控制码。\n")
trimmedInput := strings.TrimSpace(userInput)
if trimmedInput != "" {
@@ -146,10 +131,23 @@ func BuildDeepAnswerSystemPrompt() string {
}
// BuildDeepAnswerMessages 组装深度回答阶段的 messages。
func BuildDeepAnswerMessages(ctx *newagentmodel.ConversationContext, userInput string) []*schema.Message {
return buildStageMessages(
BuildDeepAnswerSystemPrompt(),
func BuildDeepAnswerMessages(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext, userInput string) []*schema.Message {
return buildUnifiedStageMessages(
ctx,
userInput,
StageMessagesConfig{
SystemPrompt: BuildDeepAnswerSystemPrompt(),
Msg1Content: buildChatConversationMessage(ctx),
Msg2Content: buildDeepAnswerWorkspace(),
Msg3Suffix: buildDeepAnswerUserPrompt(userInput),
Msg3Role: schema.User,
},
)
}
func buildDeepAnswerUserPrompt(userInput string) string {
trimmedInput := strings.TrimSpace(userInput)
if trimmedInput != "" {
return trimmedInput
}
return "请直接回答用户刚才的问题。"
}

View File

@@ -0,0 +1,33 @@
package newagentprompt
import (
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
)
// buildChatConversationMessage 生成 chat / deep_answer 共用的真实对话视图。
func buildChatConversationMessage(ctx *newagentmodel.ConversationContext) string {
return buildConversationHistoryMessage(ctx, "真实对话记录")
}
// buildChatRoutingWorkspace 渲染 chat 路由节点的轻量补充区。
//
// 设计说明:
// 1. chat 只保留与路由判断直接相关的最小流程标记;
// 2. rough_build_done 仍需显式暴露,否则路由层会丢掉“不要重复粗排”的关键信号;
// 3. 不再展示轮次、阶段锚点、ReAct 摘要等 execute 专属信息。
func buildChatRoutingWorkspace(ctx *newagentmodel.ConversationContext) string {
lines := []string{"路由补充:"}
if hasExecuteRoughBuildDone(ctx) {
lines = append(lines, "- 已存在 rough_build_done除非用户明确要求重新粗排否则不要再次触发 rough_build。")
} else {
lines = append(lines, "- 暂无额外流程标记。")
}
return strings.Join(lines, "\n")
}
// buildDeepAnswerWorkspace 渲染 deep_answer 节点的轻量工作区。
func buildDeepAnswerWorkspace() string {
return "回答补充:请直接延续最近对话,聚焦回答用户本轮问题。"
}

View File

@@ -0,0 +1,37 @@
package newagentprompt
import (
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
)
// buildConversationHistoryMessage 将“真实对话流”渲染成节点可直接复用的 msg1。
//
// 职责边界:
// 1. 只负责把 user + assistant speak 组织成稳定文本;
// 2. 不拼接 tool_call / tool observation这些不属于“真实对话”
// 3. 不做长度裁剪,长度预算交给统一压缩层处理。
func buildConversationHistoryMessage(ctx *newagentmodel.ConversationContext, title string) string {
title = strings.TrimSpace(title)
if title == "" {
title = "真实对话记录"
}
lines := []string{title + ""}
if ctx == nil {
lines = append(lines, "暂无。")
return strings.Join(lines, "\n")
}
turns := CollectConversationTurns(ctx.HistorySnapshot())
if len(turns) == 0 {
lines = append(lines, "暂无。")
return strings.Join(lines, "\n")
}
for _, turn := range turns {
lines = append(lines, turn.Role+": \""+turn.Content+"\"")
}
return strings.Join(lines, "\n")
}

View File

@@ -14,16 +14,16 @@ const deliverSystemPrompt = `
请遵守以下规则:
1. 只基于已有历史和计划状态生成总结,不要编造未执行的操作。
2. 如果所有步骤都已完成,简要总结每一步的成果。
3. 如果因轮次耗尽提前结束,如实告知用户当前进度未完成部分。
4. 使用自然、友好的语气,不要机械罗列步骤
5. 如果用户后续可能需要继续操作,给出简短建议。
6. 只输出总结文本,不要输出 JSON不要输出 markdown 标题。
2. 如果所有步骤都已完成,请自然概括每一步的主要成果。
3. 如果流程因轮次耗尽或主动终止而提前结束,如实说明当前进度未完成部分。
4. 使用自然、友好的语气,不要机械罗列工具过程
5. 如果用户后续需要继续操作,可以给出一句简短建议。
6. 只输出总结文本,不要输出 JSON不要输出 markdown 标题。
你会看到:
- 原始计划步骤及完成判定
- 当前执行进度
- 执行阶段的对话历史
- 原始计划步骤及完成进度
- 最近真实对话
- 当前流程的收口状态
`
// BuildDeliverSystemPrompt 返回交付阶段系统提示词。
@@ -31,37 +31,52 @@ func BuildDeliverSystemPrompt() string {
return strings.TrimSpace(deliverSystemPrompt)
}
// BuildDeliverMessages 组装交付阶段 messages。
// BuildDeliverMessages 组装交付阶段 messages。
func BuildDeliverMessages(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) []*schema.Message {
return buildStageMessages(
BuildDeliverSystemPrompt(),
roughBuildPrefix := buildDeliverRoughBuildPrefix(ctx, state)
return buildUnifiedStageMessages(
ctx,
BuildDeliverUserPrompt(state),
StageMessagesConfig{
SystemPrompt: BuildDeliverSystemPrompt(),
Msg1Content: buildDeliverConversationMessage(ctx),
Msg2Content: buildDeliverWorkspace(state),
Msg3Prefix: roughBuildPrefix,
Msg3Suffix: BuildDeliverUserPrompt(state, ctx),
Msg3Role: schema.User,
},
)
}
// BuildDeliverUserPrompt 构造交付阶段的用户提示词。
func BuildDeliverUserPrompt(state *newagentmodel.CommonState) string {
func BuildDeliverUserPrompt(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext) string {
var sb strings.Builder
sb.WriteString("请为当前任务生成完成总结。\n")
sb.WriteString(renderStateSummary(state))
sb.WriteString("\n")
sb.WriteString("请基于最近对话和交付工作区,生成一段自然、诚实的完成总结。\n")
if state == nil || !state.HasPlan() {
sb.WriteString("当前没有正式计划,请基于对话历史简要总结本次交互。\n")
if hasExecuteRoughBuildDone(ctx) {
sb.WriteString("当前没有正式计划,但本轮已经完成粗排,请结合粗排补充和任务类详情总结粗排结果,不要把它说成正式完结。\n")
} else {
sb.WriteString("当前没有正式计划,请只概括本次互动,不要编造成果。\n")
}
return strings.TrimSpace(sb.String())
}
current, total := state.PlanProgress()
exhausted := state.Exhausted()
completed := countCompletedPlanSteps(state)
total := len(state.PlanSteps)
if exhausted {
sb.WriteString(fmt.Sprintf("注意:任务因轮次耗尽提前结束,当前进度 %d/%d。\n", current, total))
sb.WriteString("请如实说明已完成未完成的部分,并建议用户如何继续。\n")
} else {
sb.WriteString("所有计划步骤已执行完毕,请总结整体成果。\n")
if state.IsExhaustedTerminal() {
sb.WriteString(fmt.Sprintf("注意:任务因轮次耗尽提前结束,当前已完成 %d/%d。\n", completed, total))
sb.WriteString("请如实说明已完成未完成的部分,并给出一句继续建议。\n")
return strings.TrimSpace(sb.String())
}
if state.IsAborted() {
sb.WriteString(fmt.Sprintf("注意:流程已被主动终止,当前已完成 %d/%d 步。\n", completed, total))
sb.WriteString("请如实说明停在何处,以及用户若想继续应如何衔接。\n")
return strings.TrimSpace(sb.String())
}
sb.WriteString("若计划已正常完成,请概括整体成果;若仍有未完成步骤,也必须如实说明。\n")
return strings.TrimSpace(sb.String())
}

View File

@@ -0,0 +1,137 @@
package newagentprompt
import (
"fmt"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
)
// buildDeliverConversationMessage 生成 deliver 节点看到的真实对话视图。
func buildDeliverConversationMessage(ctx *newagentmodel.ConversationContext) string {
return buildConversationHistoryMessage(ctx, "执行对话记录")
}
// buildDeliverRoughBuildPrefix 构造 deliver 在“粗排已完成”场景下的专属前缀。
//
// 职责边界:
// 1. 这里只负责把粗排相关的任务类信息补进 msg3 前缀,不改写交付总结本身;
// 2. 只有在上下文里明确存在 rough_build_done 时才注入,避免普通交付场景被额外信息污染;
// 3. 这段前缀用于补齐第一次粗排没有正式计划时的任务类详情,优先让 deliver 看到 task_class_ids 和任务类约束。
func buildDeliverRoughBuildPrefix(ctx *newagentmodel.ConversationContext, state *newagentmodel.CommonState) string {
if !hasExecuteRoughBuildDone(ctx) {
return ""
}
lines := []string{
"粗排补充信息:",
"- 本轮已经完成粗排,相关任务类已进入 suggested/existing不要把它们说成正式计划。",
}
if taskClassIDs := renderPlanTaskClassIDs(state); taskClassIDs != "" {
lines = append(lines, "- "+taskClassIDs)
}
if taskClassMeta := renderPlanTaskClassMeta(state); taskClassMeta != "" {
lines = append(lines, "任务类详情:")
lines = append(lines, taskClassMeta)
}
if state == nil || !state.HasPlan() {
lines = append(lines, "- 当前没有正式计划,请把这批任务类的粗排结果作为总结重点。")
}
return strings.Join(lines, "\n")
}
// buildDeliverWorkspace 渲染 deliver 节点自己的结果视图。
//
// 设计说明:
// 1. deliver 只需要结果态信息:计划简表、完成进度、收口状态;
// 2. 不再注入工具目录、任务类约束、ReAct 摘要等过程噪声;
// 3. 没有正式计划时,明确退回“只基于对话做总结”。
func buildDeliverWorkspace(state *newagentmodel.CommonState) string {
lines := []string{"交付工作区:"}
if state == nil {
lines = append(lines, "- 当前缺少流程状态,请仅基于最近对话做诚实总结。")
return strings.Join(lines, "\n")
}
lines = append(lines, renderDeliverTerminalSummary(state))
if !state.HasPlan() {
lines = append(lines, "- 当前没有正式计划,请只概括本次互动。")
return strings.Join(lines, "\n")
}
total := len(state.PlanSteps)
completed := countCompletedPlanSteps(state)
lines = append(lines, fmt.Sprintf("- 计划进度:已完成 %d/%d 步。", completed, total))
lines = append(lines, "计划步骤:")
lines = append(lines, renderDeliverStepOutline(state, completed))
return strings.Join(lines, "\n")
}
// renderDeliverTerminalSummary 返回 deliver 节点需要知道的收口状态。
func renderDeliverTerminalSummary(state *newagentmodel.CommonState) string {
if state == nil || !state.HasTerminalOutcome() || state.TerminalOutcome == nil {
return "- 当前没有正式终止结果,请按最近对话和计划进度自然总结。"
}
outcome := state.TerminalOutcome
line := fmt.Sprintf("- 收口状态:%s", outcome.Status)
if stage := strings.TrimSpace(outcome.Stage); stage != "" {
line += fmt.Sprintf(";阶段:%s", stage)
}
if msg := strings.TrimSpace(outcome.UserMessage); msg != "" {
line += fmt.Sprintf(";用户提示:%s", msg)
}
return line
}
// renderDeliverStepOutline 生成 deliver 节点使用的步骤简表。
func renderDeliverStepOutline(state *newagentmodel.CommonState, completed int) string {
if state == nil || len(state.PlanSteps) == 0 {
return "- 暂无。"
}
lines := make([]string, 0, len(state.PlanSteps))
for i, step := range state.PlanSteps {
status := "未完成"
if i < completed {
status = "已完成"
}
content := strings.TrimSpace(step.Content)
if content == "" {
content = "(步骤正文为空)"
}
line := fmt.Sprintf("%d. [%s] %s", i+1, status, content)
if doneWhen := strings.TrimSpace(step.DoneWhen); doneWhen != "" {
line += fmt.Sprintf(" | 完成判定:%s", doneWhen)
}
lines = append(lines, line)
}
return strings.Join(lines, "\n")
}
// countCompletedPlanSteps 统计当前已经完成的计划步骤数。
func countCompletedPlanSteps(state *newagentmodel.CommonState) int {
if state == nil {
return 0
}
total := len(state.PlanSteps)
if total == 0 {
return 0
}
if state.CurrentStep <= 0 {
if state.IsCompleted() {
return total
}
return 0
}
if state.CurrentStep >= total {
return total
}
return state.CurrentStep
}

View File

@@ -12,20 +12,11 @@ import (
)
const (
// executeHistoryKindKey 用于在 history 中打运行态标记,供 prompt 分层识别。
// 说明loop_closed / step_advanced 等边界标记仍由节点层写入,但 prompt 层已不再消费它们——
// 因为 msg1/msg2 已经按"真实对话流 + 当前活跃 ReAct 记录"重构,不再做 msg2→msg1 的归档搬运。
executeHistoryKindKey = "newagent_history_kind"
executeHistoryKindCorrectionUser = "llm_correction_prompt"
executeHistoryKindLoopClosed = "execute_loop_closed"
executeHistoryKindStepAdvanced = "execute_step_advanced"
// executeLoopWindowLimit 控制当轮 ReAct Loop 窗口最多保留多少条记录。
executeLoopWindowLimit = 8
// executeTrimmedObservationText 是重复工具压缩后的 observation 占位文案。
executeTrimmedObservationText = "当前工具调用结果已经被使用过,当前无需使用,为节省上下文空间,已折叠"
// executeConversationTurnLimit 控制 msg1 注入的最大对话轮数user + assistant speak
// 超出时保留最近的条目,早期部分由 ReAct 摘要兜底。
executeConversationTurnLimit = 30
)
type executeToolSchemaDoc struct {
@@ -40,8 +31,6 @@ type executeLoopRecord struct {
Observation string
}
const executeMessage1MaxRunes = 1400
// buildExecuteStageMessages 组装 execute 阶段 4 条消息骨架。
//
// 消息结构(固定):
@@ -82,87 +71,24 @@ func buildExecuteMessage0(stageSystemPrompt string, ctx *newagentmodel.Conversat
return base + "\n\n" + toolCatalog
}
// splitExecuteLoopRecordsByBoundary 按已收口标记拆分归档/活跃 ReAct 记录
//
// 规则:
// 1. 标记之前的记录归档到 msg1
// 2. 标记之后的记录作为活跃 loop 进入 msg2
// 3. 若没有标记,则全部视为活跃记录(兼容旧会话快照)。
func splitExecuteLoopRecordsByBoundary(history []*schema.Message) (archived []executeLoopRecord, active []executeLoopRecord) {
if len(history) == 0 {
return nil, nil
}
boundary := findLatestExecuteBoundaryMarker(history)
if boundary < 0 {
return nil, collectExecuteLoopRecords(history)
}
if boundary > 0 {
archived = collectExecuteLoopRecords(history[:boundary])
}
if boundary+1 < len(history) {
active = collectExecuteLoopRecords(history[boundary+1:])
}
return archived, active
}
func findLatestExecuteBoundaryMarker(history []*schema.Message) int {
for i := len(history) - 1; i >= 0; i-- {
msg := history[i]
if msg == nil || msg.Extra == nil {
continue
}
kind, ok := msg.Extra[executeHistoryKindKey].(string)
if !ok {
continue
}
switch strings.TrimSpace(kind) {
case executeHistoryKindLoopClosed, executeHistoryKindStepAdvanced:
return i
}
}
return -1
}
func trimExecuteMessage1ByBudget(content string) string {
content = strings.TrimSpace(content)
if content == "" {
return ""
}
runes := []rune(content)
if len(runes) <= executeMessage1MaxRunes {
return content
}
if executeMessage1MaxRunes <= 3 {
return string(runes[:executeMessage1MaxRunes])
}
return string(runes[:executeMessage1MaxRunes-3]) + "..."
}
// buildExecuteMessage1V3 负责把真实对话流 + 上一轮 loop 归档并入 msg1并统一做长度裁剪。
// buildExecuteMessage1V3 只渲染"真实对话流 + 阶段锚点"
//
// 改造说明:
// 1. msg1 从人工提炼的摘要变为真实对话流,只注入 user + assistant speak
// 2. tool_call / observation 不在 msg1 中重复(已由 msg2 承载
// 3. 超出 executeConversationTurnLimit 的早期对话不注入,由 ReAct 摘要兜底。
// 1. msg1 只保留 user + assistant speak 组成的真实对话历史,全量注入
// 2. tool_call / observation 一律由 msg2 承载,这里不再重复
// 3. 不再从历史中"归档"上一轮 ReAct 结果到 msg1——归档搬运逻辑已随 splitExecuteLoopRecordsByBoundary 一并移除;
// 4. token 预算由统一压缩层兜底prompt 层不做提前裁剪。
func buildExecuteMessage1V3(ctx *newagentmodel.ConversationContext) string {
lines := []string{"历史上下文:"}
if ctx == nil {
lines = append(lines,
"- 对话历史:暂无。",
"- 阶段锚点:按当前工具事实推进执行。",
"- 历史归档 ReAct 摘要:暂无。",
"- 历史归档 ReAct 窗口:暂无。",
"- 当前循环早期摘要:暂无。",
)
return strings.Join(lines, "\n")
}
history := ctx.HistorySnapshot()
// 注入真实对话流user + assistant speak全量放入不再限制轮数和单条长度。
turns := collectExecuteConversationTurns(history)
turns := collectExecuteConversationTurns(ctx.HistorySnapshot())
if len(turns) == 0 {
lines = append(lines, "- 对话历史:暂无。")
} else {
@@ -180,16 +106,15 @@ func buildExecuteMessage1V3(ctx *newagentmodel.ConversationContext) string {
lines = append(lines, "- 阶段锚点:按当前工具事实推进,不做无依据操作。")
}
archivedLoops, activeLoops := splitExecuteLoopRecordsByBoundary(history)
lines = append(lines, "- 历史归档 ReAct 摘要:"+buildEarlyExecuteReactSummary(archivedLoops, executeLoopWindowLimit))
lines = append(lines, renderArchivedExecuteLoopWindowForMessage1V3(archivedLoops))
lines = append(lines, "- 当前循环早期摘要:"+buildEarlyExecuteReactSummary(activeLoops, executeLoopWindowLimit))
return strings.Join(lines, "\n")
}
// buildExecuteMessage2V3 承载当前活跃 loop 的全部记录。
// 若是新一轮刚开始(活跃 loop 为空),明确返回已清空状态。
// 不再限制窗口大小token 预算由 execute 层统一管理。
// buildExecuteMessage2V3 承载当前会话中全部 ReAct Loop 记录。
//
// 改造说明:
// 1. 不再按 execute_loop_closed / execute_step_advanced 边界切分"归档/活跃"两段;
// 2. 直接从 history 提取全部 assistant tool_call + 对应 observation 作为当前 Loop 视图;
// 3. 新一轮刚开始(尚未产生 tool_call时返回明确占位方便模型识别"干净起点"。
func buildExecuteMessage2V3(ctx *newagentmodel.ConversationContext) string {
lines := []string{"当轮 ReAct Loop 记录:"}
if ctx == nil {
@@ -197,31 +122,13 @@ func buildExecuteMessage2V3(ctx *newagentmodel.ConversationContext) string {
return strings.Join(lines, "\n")
}
_, activeLoops := splitExecuteLoopRecordsByBoundary(ctx.HistorySnapshot())
if len(activeLoops) == 0 {
loops := collectExecuteLoopRecords(ctx.HistorySnapshot())
if len(loops) == 0 {
lines = append(lines, "- 已清空(新一轮 loop 准备中)。")
return strings.Join(lines, "\n")
}
// 全量放入,不再限制窗口大小
for i, loop := range activeLoops {
lines = append(lines, fmt.Sprintf("%d) thought/reason%s", i+1, loop.Thought))
lines = append(lines, fmt.Sprintf(" tool_call%s", renderExecuteToolCallText(loop.ToolName, loop.ToolArgs)))
lines = append(lines, fmt.Sprintf(" observation%s", loop.Observation))
}
return strings.Join(lines, "\n")
}
func renderArchivedExecuteLoopWindowForMessage1V3(records []executeLoopRecord) string {
if len(records) == 0 {
return "- 历史归档 ReAct 窗口:暂无。"
}
windowLoops := tailExecuteLoops(records, executeLoopWindowLimit)
windowLoops = compressExecuteLoopObservationsByTool(windowLoops)
lines := []string{"历史归档 ReAct 窗口(由上一轮 msg2 并入):"}
for i, loop := range windowLoops {
for i, loop := range loops {
lines = append(lines, fmt.Sprintf("%d) thought/reason%s", i+1, loop.Thought))
lines = append(lines, fmt.Sprintf(" tool_call%s", renderExecuteToolCallText(loop.ToolName, loop.ToolArgs)))
lines = append(lines, fmt.Sprintf(" observation%s", loop.Observation))
@@ -525,51 +432,6 @@ func findExecuteThoughtBefore(history []*schema.Message, index int) string {
return "(未记录)"
}
func tailExecuteLoops(records []executeLoopRecord, limit int) []executeLoopRecord {
if len(records) == 0 {
return nil
}
if limit <= 0 || len(records) <= limit {
result := make([]executeLoopRecord, len(records))
copy(result, records)
return result
}
result := make([]executeLoopRecord, limit)
copy(result, records[len(records)-limit:])
return result
}
// compressExecuteLoopObservationsByTool 对窗口内重复工具做 observation 压缩。
func compressExecuteLoopObservationsByTool(records []executeLoopRecord) []executeLoopRecord {
if len(records) == 0 {
return records
}
latestIndexByTool := make(map[string]int, len(records))
for i := len(records) - 1; i >= 0; i-- {
key := strings.ToLower(strings.TrimSpace(records[i].ToolName))
if key == "" {
key = "unknown_tool"
}
if _, exists := latestIndexByTool[key]; !exists {
latestIndexByTool[key] = i
}
}
result := make([]executeLoopRecord, len(records))
copy(result, records)
for i := range result {
key := strings.ToLower(strings.TrimSpace(result[i].ToolName))
if key == "" {
key = "unknown_tool"
}
if latestIndexByTool[key] != i {
result[i].Observation = executeTrimmedObservationText
}
}
return result
}
func renderExecuteToolCallText(toolName, toolArgs string) string {
toolName = strings.TrimSpace(toolName)
if toolName == "" {
@@ -582,38 +444,6 @@ func renderExecuteToolCallText(toolName, toolArgs string) string {
return toolName + "(" + toolArgs + ")"
}
func buildEarlyExecuteReactSummary(records []executeLoopRecord, windowLimit int) string {
if len(records) == 0 {
return "暂无。"
}
if len(records) <= windowLimit {
return "无(当前窗口已覆盖全部 ReAct 记录)。"
}
early := records[:len(records)-windowLimit]
toolCounts := make(map[string]int, len(early))
for _, record := range early {
key := strings.TrimSpace(record.ToolName)
if key == "" {
key = "unknown_tool"
}
toolCounts[key]++
}
names := make([]string, 0, len(toolCounts))
for name := range toolCounts {
names = append(names, name)
}
sort.Strings(names)
parts := make([]string, 0, len(names))
for _, name := range names {
parts = append(parts, fmt.Sprintf("%s×%d", name, toolCounts[name]))
}
return fmt.Sprintf("已折叠 %d 条旧记录,涉及:%s。", len(early), strings.Join(parts, "、"))
}
func hasExecuteRoughBuildDone(ctx *newagentmodel.ConversationContext) bool {
if ctx == nil {
return false
@@ -725,3 +555,12 @@ func renderExecuteTaskClassIDs(state *newagentmodel.CommonState) string {
}
return fmt.Sprintf("task_class_ids=[%s]", strings.Join(parts, ","))
}
// renderExecuteMemoryContext 提取 execute 阶段要注入 msg3 的记忆文本。
//
// 1. 只读取统一的 memory_context避免把其他 pinned block 误塞进 prompt。
// 2. 为空时直接返回空串,保持 msg3 干净。
// 3. 复用统一记忆渲染逻辑,保证各阶段记忆入口一致。
func renderExecuteMemoryContext(ctx *newagentmodel.ConversationContext) string {
return renderUnifiedMemoryContext(ctx)
}

View File

@@ -1,31 +0,0 @@
package newagentprompt
import (
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
)
const executeMemoryContextKey = "memory_context"
// renderExecuteMemoryContext 提取 Execute 阶段需要补充到 msg3 的记忆文本。
//
// 步骤化说明:
// 1. 只白名单消费 memory_context避免把 execution_context / current_step 等 Execute 自有块再次注入;
// 2. 若 block 不存在或正文为空,直接返回空串,不给 msg3 留空段;
// 3. 这里不重新渲染记忆,只消费 agentsvc 已经产出的最终文本,保证所有阶段口径一致。
func renderExecuteMemoryContext(ctx *newagentmodel.ConversationContext) string {
if ctx == nil {
return ""
}
block, ok := ctx.PinnedBlockByKey(executeMemoryContextKey)
if !ok {
return ""
}
content := strings.TrimSpace(block.Content)
if content == "" {
return ""
}
return content
}

View File

@@ -2,7 +2,6 @@ package newagentprompt
import (
"fmt"
"strconv"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
@@ -49,14 +48,19 @@ func BuildPlanSystemPrompt() string {
// BuildPlanMessages 组装规划阶段的 messages。
//
// 职责边界:
// 1. 负责把 state + context 收敛成规划阶段模型输入;
// 2. 负责把置顶上下文和工具摘要放在 history 前面,降低模型跑偏概率
// 3. 不负责解析模型输出,也不负责判断规划质量
// 1. 负责把 state + context 收敛成统一 4 段式规划阶段模型输入;
// 2. 负责解析模型输出,也不负责判断规划质量
// 3. msg3 中的状态文本由本函数显式传入,确保统一骨架下仍能看到完整计划与阶段信息
func BuildPlanMessages(state *newagentmodel.CommonState, ctx *newagentmodel.ConversationContext, userInput string) []*schema.Message {
return buildStageMessages(
BuildPlanSystemPrompt(),
return buildUnifiedStageMessages(
ctx,
BuildPlanUserPrompt(state, userInput),
StageMessagesConfig{
SystemPrompt: BuildPlanSystemPrompt(),
Msg1Content: buildPlanConversationMessage(ctx),
Msg2Content: buildPlanWorkspace(state),
Msg3Suffix: BuildPlanUserPrompt(state, userInput),
Msg3Role: schema.User,
},
)
}
@@ -64,21 +68,9 @@ func BuildPlanMessages(state *newagentmodel.CommonState, ctx *newagentmodel.Conv
func BuildPlanUserPrompt(state *newagentmodel.CommonState, userInput string) string {
var sb strings.Builder
sb.WriteString("请继续当前任务的规划阶段。\n")
sb.WriteString(renderStateSummary(state))
sb.WriteString("\n")
sb.WriteString("本轮目标:围绕当前任务继续规划,直到形成一份稳定、可执行的自然语言 plan或在信息不足时明确追问用户。\n\n")
sb.WriteString("请继续当前任务的规划阶段,严格输出 JSON。\n")
sb.WriteString("目标:围绕最近对话和规划工作区信息,产出一份稳定、可执行的自然语言计划;若关键信息不足,请明确 ask_user。\n\n")
sb.WriteString(BuildPlanDecisionContractText())
sb.WriteString("\n")
if state != nil && len(state.TaskClassIDs) > 0 {
parts := make([]string, len(state.TaskClassIDs))
for i, id := range state.TaskClassIDs {
parts[i] = strconv.Itoa(id)
}
sb.WriteString(fmt.Sprintf("\n本次排课请求涉及的任务类 ID前端传入[%s]\n", strings.Join(parts, ", ")))
sb.WriteString("规划时请结合上述任务类 ID 判断是否需要粗排needs_rough_build并在 plan_steps 中体现排课意图。\n")
}
trimmedInput := strings.TrimSpace(userInput)
if trimmedInput != "" {

View File

@@ -0,0 +1,133 @@
package newagentprompt
import (
"fmt"
"strconv"
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
)
// buildPlanConversationMessage 生成 plan 节点看到的真实对话视图。
func buildPlanConversationMessage(ctx *newagentmodel.ConversationContext) string {
return buildConversationHistoryMessage(ctx, "规划参考对话")
}
// buildPlanWorkspace 渲染 plan 节点自己的工作区。
//
// 设计说明:
// 1. 这里只保留“规划真正需要知道的东西”已有计划、当前步骤、task_class_ids、任务类约束
// 2. 不再复用通用胖状态摘要,避免把 execute / deliver 无关状态一起塞给 plan
// 3. 若当前没有正式计划,则明确告诉模型“从零开始规划”,避免继续误沿用旧上下文。
func buildPlanWorkspace(state *newagentmodel.CommonState) string {
lines := []string{"规划工作区:"}
if state == nil {
lines = append(lines, "- 当前缺少流程状态,请主要依据最近对话与本轮输入继续规划。")
return strings.Join(lines, "\n")
}
if !state.HasPlan() {
lines = append(lines, "- 当前还没有正式计划。")
} else {
lines = append(lines, fmt.Sprintf("- 已有计划:共 %d 步。", len(state.PlanSteps)))
lines = append(lines, renderPlanCurrentStepSummary(state))
lines = append(lines, "计划简表:")
lines = append(lines, renderPlanStepOutline(state.PlanSteps))
}
if taskClassIDs := renderPlanTaskClassIDs(state); taskClassIDs != "" {
lines = append(lines, "- "+taskClassIDs)
}
if taskClassMeta := renderPlanTaskClassMeta(state); taskClassMeta != "" {
lines = append(lines, "任务类约束:")
lines = append(lines, taskClassMeta)
}
return strings.Join(lines, "\n")
}
// renderPlanCurrentStepSummary 返回 plan 节点需要知道的当前步骤进度。
func renderPlanCurrentStepSummary(state *newagentmodel.CommonState) string {
if state == nil || !state.HasPlan() {
return "- 当前步骤:暂无。"
}
current, total := state.PlanProgress()
step, ok := state.CurrentPlanStep()
if !ok {
return fmt.Sprintf("- 当前步骤:计划共 %d 步,当前没有可继续沿用的有效步骤。", total)
}
content := strings.TrimSpace(step.Content)
if content == "" {
content = "(当前步骤正文为空)"
}
summary := fmt.Sprintf("- 当前步骤:第 %d/%d 步,%s", current, total, content)
if doneWhen := strings.TrimSpace(step.DoneWhen); doneWhen != "" {
summary += fmt.Sprintf(";完成判定:%s", doneWhen)
}
return summary
}
// renderPlanStepOutline 将完整计划压成 plan 节点可读的简表。
func renderPlanStepOutline(steps []newagentmodel.PlanStep) string {
if len(steps) == 0 {
return "- 暂无。"
}
lines := make([]string, 0, len(steps))
for i, step := range steps {
content := strings.TrimSpace(step.Content)
if content == "" {
content = "(步骤正文为空)"
}
line := fmt.Sprintf("%d. %s", i+1, content)
if doneWhen := strings.TrimSpace(step.DoneWhen); doneWhen != "" {
line += fmt.Sprintf(" | 完成判定:%s", doneWhen)
}
lines = append(lines, line)
}
return strings.Join(lines, "\n")
}
// renderPlanTaskClassIDs 返回批量排课场景下的 task_class_ids 简表。
func renderPlanTaskClassIDs(state *newagentmodel.CommonState) string {
if state == nil || len(state.TaskClassIDs) == 0 {
return ""
}
parts := make([]string, len(state.TaskClassIDs))
for i, id := range state.TaskClassIDs {
parts[i] = strconv.Itoa(id)
}
return fmt.Sprintf("task_class_ids=[%s]", strings.Join(parts, ", "))
}
// renderPlanTaskClassMeta 返回 plan 节点真正需要看的任务类边界。
//
// 说明:
// 1. 这里只保留名称、策略、总时段、日期范围这类规划相关信息;
// 2. 不再把所有字段原样平铺,避免工作区过胖;
// 3. 若某项字段为空,则直接省略,不制造噪声。
func renderPlanTaskClassMeta(state *newagentmodel.CommonState) string {
if state == nil || len(state.TaskClasses) == 0 {
return ""
}
lines := make([]string, 0, len(state.TaskClasses))
for _, tc := range state.TaskClasses {
line := fmt.Sprintf("- [ID=%d] %s", tc.ID, strings.TrimSpace(tc.Name))
if strategy := strings.TrimSpace(tc.Strategy); strategy != "" {
line += fmt.Sprintf(";策略:%s", strategy)
}
if tc.TotalSlots > 0 {
line += fmt.Sprintf(";总时段预算:%d", tc.TotalSlots)
}
if tc.StartDate != "" || tc.EndDate != "" {
line += fmt.Sprintf(";日期范围:%s ~ %s", tc.StartDate, tc.EndDate)
}
lines = append(lines, line)
}
return strings.Join(lines, "\n")
}

View File

@@ -0,0 +1,212 @@
package newagentprompt
import (
"strings"
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
"github.com/cloudwego/eino/schema"
)
// ConversationTurn 表示对话历史中的一轮自然语言交互。
//
// 职责边界:
// 1. 这里只承载 user 与 assistant speak不承载 tool_call 和 tool observation
// 2. 供 chat / plan / deliver 等节点复用,避免各节点重复写一套提取逻辑;
// 3. 不负责裁剪长度,长度预算统一交给压缩层处理。
type ConversationTurn struct {
Role string
Content string
}
// StageMessagesConfig 描述统一四段式骨架下,各节点自行提供的内容块。
//
// 设计目标:
// 1. 统一层只负责“四条消息怎么拼”,不再替节点决定“每条消息里该放什么”;
// 2. Msg1 / Msg2 / Msg3Prefix / Msg3Suffix 都由节点自己渲染,避免 chat / plan / deliver 继续套 execute 的内容模板;
// 3. memory_context 仍由统一层单入口注入到 msg3避免多处重复注入。
type StageMessagesConfig struct {
// SystemPrompt 是节点自己的系统提示词。
SystemPrompt string
// Msg1Content 是第 2 条 assistant 消息,通常放“节点想看的历史视图”。
Msg1Content string
// Msg2Content 是第 3 条 assistant 消息,通常放“节点自己的工作区/补充约束”。
Msg2Content string
// Msg3Prefix 是第 4 条消息中位于 memory_context 之前的内容。
// 常见放法:阶段状态、规划工作区摘要、交付收口约束等。
Msg3Prefix string
// Msg3Suffix 是第 4 条消息中位于 memory_context 之后的内容。
// 对 user-role 节点来说,这里通常放最终用户指令,保证“用户输入收尾”。
Msg3Suffix string
// Msg3Role 指定第 4 条消息的角色。
// Execute 继续使用 system其余节点一般使用 user。
Msg3Role schema.RoleType
}
// buildUnifiedStageMessages 组装统一 4 段式消息骨架。
//
// 固定布局:
// 1. msg0(system):系统规则 + 阶段规则 + 工具简表;
// 2. msg1(assistant):节点自定义的历史视图;
// 3. msg2(assistant):节点自定义的工作区;
// 4. msg3(user/system):节点自定义前后缀 + 统一 memory_context。
func buildUnifiedStageMessages(
ctx *newagentmodel.ConversationContext,
config StageMessagesConfig,
) []*schema.Message {
msg0 := buildUnifiedMsg0(config.SystemPrompt, ctx)
msg1 := buildUnifiedMsg1(config.Msg1Content)
msg2 := buildUnifiedMsg2(config.Msg2Content)
msg3 := buildUnifiedMsg3(ctx, config)
return []*schema.Message{
schema.SystemMessage(msg0),
{Role: schema.Assistant, Content: msg1},
{Role: schema.Assistant, Content: msg2},
buildUnifiedMsg3Message(msg3, config.Msg3Role),
}
}
// buildUnifiedMsg3Message 根据配置决定第 4 条消息的角色。
func buildUnifiedMsg3Message(content string, role schema.RoleType) *schema.Message {
if role == schema.User {
return schema.UserMessage(content)
}
return schema.SystemMessage(content)
}
// buildUnifiedMsg0 合并系统提示 + 工具简表,生成 msg0。
//
// 步骤化说明:
// 1. 先合并基础系统提示与节点系统提示,保证模型身份稳定;
// 2. 若当前节点注入了工具 schema则附加紧凑工具目录
// 3. 若两部分都为空,则回退到最小兜底提示,避免出现空消息。
func buildUnifiedMsg0(stageSystemPrompt string, ctx *newagentmodel.ConversationContext) string {
base := strings.TrimSpace(mergeSystemPrompts(ctx, stageSystemPrompt))
if base == "" {
base = "你是 SmartMate 助手,请继续当前阶段。"
}
toolCatalog := renderExecuteToolCatalogCompact(ctx)
if toolCatalog == "" {
return base
}
return base + "\n\n" + toolCatalog
}
// buildUnifiedMsg1 返回节点自行提供的历史视图。
//
// 说明:
// 1. 统一层不再内置 execute 风格的 ReAct 摘要;
// 2. 节点若未传入内容,则回退到最小占位,保证四段结构稳定;
// 3. 压缩层仍会统一统计和压缩这条消息。
func buildUnifiedMsg1(content string) string {
content = strings.TrimSpace(content)
if content != "" {
return content
}
return "历史上下文:暂无。"
}
// buildUnifiedMsg2 返回节点自行提供的工作区。
//
// 说明:
// 1. 非 execute 节点也允许有自己的 msg2不再被统一层硬塞“暂无”语义
// 2. 若节点暂时没有额外工作区,则回退到最小占位,保证结构稳定。
func buildUnifiedMsg2(content string) string {
content = strings.TrimSpace(content)
if content != "" {
return content
}
return "阶段工作区:暂无。"
}
// buildUnifiedMsg3 统一拼装 msg3前缀 + memory_context + 后缀。
//
// 步骤化说明:
// 1. 前缀由节点决定,适合放轻量状态或阶段约束;
// 2. memory_context 只在这里注入一次,避免 pinned block 多入口重复出现;
// 3. 后缀由节点决定。对于 user-role 节点,通常把最终用户指令放在这里,保证消息末尾仍是用户输入。
func buildUnifiedMsg3(ctx *newagentmodel.ConversationContext, config StageMessagesConfig) string {
var sections []string
if prefix := strings.TrimSpace(config.Msg3Prefix); prefix != "" {
sections = append(sections, prefix)
}
if memoryText := renderUnifiedMemoryContext(ctx); memoryText != "" {
sections = append(sections, "相关记忆(仅在确有帮助时参考,不要机械复述):\n"+memoryText)
}
if suffix := strings.TrimSpace(config.Msg3Suffix); suffix != "" {
sections = append(sections, suffix)
}
if len(sections) == 0 {
return "请继续当前阶段。"
}
return strings.Join(sections, "\n\n")
}
// renderUnifiedMemoryContext 提取需要补充到 msg3 的记忆文本。
//
// 步骤化说明:
// 1. 只消费 memory_context避免把 execution_context / current_step 等阶段专属块混回 prompt
// 2. block 不存在或正文为空时直接返回空串;
// 3. 这里只读取 agentsvc 已经产出的最终文本,不在这里重新拼装记忆。
func renderUnifiedMemoryContext(ctx *newagentmodel.ConversationContext) string {
if ctx == nil {
return ""
}
block, ok := ctx.PinnedBlockByKey("memory_context")
if !ok {
return ""
}
content := strings.TrimSpace(block.Content)
if content == "" {
return ""
}
return content
}
// CollectConversationTurns 从历史消息中提取 user + assistant speak 对话流。
//
// 提取规则:
// 1. 只保留 user 消息(排除 correction prompt和 assistant 纯文本消息;
// 2. assistant tool_call 消息与 tool observation 消息不纳入“真实对话”;
// 3. 返回顺序保持与原始 history 一致。
func CollectConversationTurns(history []*schema.Message) []ConversationTurn {
if len(history) == 0 {
return nil
}
turns := make([]ConversationTurn, 0, len(history))
for _, msg := range history {
if msg == nil {
continue
}
text := strings.TrimSpace(msg.Content)
if text == "" {
continue
}
switch msg.Role {
case schema.User:
// 1. 跳过后端注入的 correction prompt避免把纠错文案误判为用户真实意图。
if isExecuteCorrectionPrompt(msg) {
continue
}
turns = append(turns, ConversationTurn{Role: "user", Content: text})
case schema.Assistant:
// 2. 跳过工具调用消息,只保留真正面向用户的 speak/答复。
if len(msg.ToolCalls) > 0 {
continue
}
turns = append(turns, ConversationTurn{Role: "assistant", Content: text})
}
}
return turns
}

View File

@@ -27,6 +27,8 @@ const (
ExecuteTokenBudget = 80000
// msg0 + msg3 固定开销 + 安全余量
ExecuteReserveTokens = 8000
StageTokenBudget = ExecuteTokenBudget
StageReserveTokens = ExecuteReserveTokens
)
// MaxContextTokensByModel 返回指定模型的最大上下文 token。
@@ -151,8 +153,8 @@ func isCJK(r rune) bool {
return unicode.Is(unicode.Han, r) || unicode.Is(unicode.Hiragana, r) || unicode.Is(unicode.Katakana, r) || unicode.Is(unicode.Hangul, r)
}
// ExecuteTokenBreakdown 是 Execute 阶段四条消息的 token 分布。
type ExecuteTokenBreakdown struct {
// StageTokenBreakdown 记录四条阶段消息的 token 分布。
type StageTokenBreakdown struct {
Msg0 int `json:"msg0"`
Msg1 int `json:"msg1"`
Msg2 int `json:"msg2"`
@@ -161,31 +163,47 @@ type ExecuteTokenBreakdown struct {
Budget int `json:"budget"`
}
// EstimateExecuteMessagesTokens 估算 Execute 四条消息的 token 分布
func EstimateExecuteMessagesTokens(msg0, msg1, msg2, msg3 string) ExecuteTokenBreakdown {
b := ExecuteTokenBreakdown{
// ExecuteTokenBreakdown 保留为历史兼容别名,避免旧调用点改动
type ExecuteTokenBreakdown = StageTokenBreakdown
// EstimateStageMessagesTokens 估算四条阶段消息的 token 分布。
func EstimateStageMessagesTokens(msg0, msg1, msg2, msg3 string) StageTokenBreakdown {
b := StageTokenBreakdown{
Msg0: EstimateTextTokens(msg0),
Msg1: EstimateTextTokens(msg1),
Msg2: EstimateTextTokens(msg2),
Msg3: EstimateTextTokens(msg3),
Budget: ExecuteTokenBudget,
Budget: StageTokenBudget,
}
b.Total = b.Msg0 + b.Msg1 + b.Msg2 + b.Msg3
return b
}
// CheckExecuteTokenBudget 检查是否超出 token 预算
// 返回 breakdown、是否超限、是否需要压缩 msg1、是否需要压缩 msg2。
func CheckExecuteTokenBudget(msg0, msg1, msg2, msg3 string) (breakdown ExecuteTokenBreakdown, overBudget bool, needCompactMsg1 bool, needCompactMsg2 bool) {
breakdown = EstimateExecuteMessagesTokens(msg0, msg1, msg2, msg3)
overBudget = breakdown.Total > ExecuteTokenBudget
// CheckStageTokenBudget 检查是否超出阶段预算,并给出需要压缩的消息标记
//
// 1. 先计算四条消息的 token 分布,便于后续日志和统计。
// 2. 如果总量没有超预算,直接返回。
// 3. 如果超预算,则按 msg1 / msg2 的相对占比判断是否需要分别压缩。
func CheckStageTokenBudget(msg0, msg1, msg2, msg3 string) (breakdown StageTokenBreakdown, overBudget bool, needCompactMsg1 bool, needCompactMsg2 bool) {
breakdown = EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
overBudget = breakdown.Total > StageTokenBudget
if !overBudget {
return
}
// msg1 超过可用预算的一半时需要压缩
available := ExecuteTokenBudget - ExecuteReserveTokens
// msg1 过大时,优先压缩历史对话。
available := StageTokenBudget - StageReserveTokens
needCompactMsg1 = breakdown.Msg1 > available/2
// 压缩 msg1 后仍超限,压缩 msg2
needCompactMsg2 = (breakdown.Total - breakdown.Msg1 + available/4) > ExecuteTokenBudget
// 压缩 msg1 后仍超限,压缩执行记录区。
needCompactMsg2 = (breakdown.Total - breakdown.Msg1 + available/4) > StageTokenBudget
return
}
// EstimateExecuteMessagesTokens 保留旧名称,内部复用阶段预算实现。
func EstimateExecuteMessagesTokens(msg0, msg1, msg2, msg3 string) StageTokenBreakdown {
return EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
}
// CheckExecuteTokenBudget 保留旧名称,内部复用阶段预算实现。
func CheckExecuteTokenBudget(msg0, msg1, msg2, msg3 string) (breakdown StageTokenBreakdown, overBudget bool, needCompactMsg1 bool, needCompactMsg2 bool) {
return CheckStageTokenBudget(msg0, msg1, msg2, msg3)
}

View File

@@ -3,14 +3,12 @@ package agentsvc
import (
"context"
"encoding/json"
"errors"
"log"
"strconv"
"strings"
"time"
agentchat "github.com/LoveLosita/smartflow/backend/agent/chat"
agentrouter "github.com/LoveLosita/smartflow/backend/agent/router"
"github.com/LoveLosita/smartflow/backend/conv"
"github.com/LoveLosita/smartflow/backend/dao"
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
@@ -21,7 +19,6 @@ import (
newagentmodel "github.com/LoveLosita/smartflow/backend/newAgent/model"
newagenttools "github.com/LoveLosita/smartflow/backend/newAgent/tools"
"github.com/LoveLosita/smartflow/backend/pkg"
"github.com/LoveLosita/smartflow/backend/respond"
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
"github.com/cloudwego/eino-ext/components/model/ark"
"github.com/cloudwego/eino/schema"
@@ -129,10 +126,6 @@ func (s *AgentService) PersistChatHistory(ctx context.Context, payload model.Cha
payload.Message,
payload.ReasoningContent,
payload.ReasoningDurationSeconds,
payload.RetryGroupID,
payload.RetryIndex,
payload.RetryFromUserMessageID,
payload.RetryFromAssistantMessageID,
payload.TokensConsumed,
)
}
@@ -159,112 +152,6 @@ func mergeAgentReasoningText(parts ...string) string {
return strings.Join(merged, "\n\n")
}
type chatRetryMeta struct {
GroupID string
Index int
FromUserMessageID int
FromAssistantMessageID int
}
func (m *chatRetryMeta) GroupIDPtr() *string {
if m == nil || strings.TrimSpace(m.GroupID) == "" {
return nil
}
groupID := strings.TrimSpace(m.GroupID)
return &groupID
}
func (m *chatRetryMeta) IndexPtr() *int {
if m == nil || m.Index <= 0 {
return nil
}
index := m.Index
return &index
}
func (m *chatRetryMeta) FromUserMessageIDPtr() *int {
if m == nil || m.FromUserMessageID <= 0 {
return nil
}
id := m.FromUserMessageID
return &id
}
func (m *chatRetryMeta) FromAssistantMessageIDPtr() *int {
if m == nil || m.FromAssistantMessageID <= 0 {
return nil
}
id := m.FromAssistantMessageID
return &id
}
func (m *chatRetryMeta) CacheExtra() map[string]any {
if m == nil || strings.TrimSpace(m.GroupID) == "" || m.Index <= 0 {
return nil
}
extra := map[string]any{
"retry_group_id": m.GroupID,
"retry_index": m.Index,
}
if m.FromUserMessageID > 0 {
extra["retry_from_user_message_id"] = m.FromUserMessageID
}
if m.FromAssistantMessageID > 0 {
extra["retry_from_assistant_message_id"] = m.FromAssistantMessageID
}
return extra
}
func (s *AgentService) buildChatRetryMeta(ctx context.Context, userID int, chatID string, extra map[string]any) (*chatRetryMeta, error) {
if len(extra) == 0 {
return nil, nil
}
requestMode := strings.ToLower(strings.TrimSpace(readAgentExtraString(extra, "request_mode")))
if requestMode != "retry" {
return nil, nil
}
groupID := strings.TrimSpace(readAgentExtraString(extra, "retry_group_id"))
if groupID == "" {
groupID = uuid.NewString()
}
sourceUserMessageID := readAgentExtraInt(extra, "retry_from_user_message_id")
sourceAssistantMessageID := readAgentExtraInt(extra, "retry_from_assistant_message_id")
// 1. retry 请求必须明确指向“被重试的那一轮 user + assistant”。
// 2. 若这里拿不到有效父消息 id继续写库只会生成一组孤立的 index=1 重试消息。
// 3. 因此直接拒绝本次请求,让前端刷新历史后重试,比静默写脏数据更安全。
if sourceUserMessageID <= 0 || sourceAssistantMessageID <= 0 {
return nil, errors.New("重试请求缺少有效的父消息ID请刷新会话后重试")
}
// 4. 再进一步校验父消息确实属于当前用户与当前会话,且角色语义正确。
// 5. 这样即便前端误把占位 id 或串号 id 发过来,后端也不会继续落错库。
if err := s.repo.ValidateRetrySourceMessages(ctx, userID, chatID, sourceUserMessageID, sourceAssistantMessageID); err != nil {
return nil, errors.New("重试引用的父消息无效,请刷新会话后重试")
}
if err := s.repo.EnsureRetryGroupSeed(ctx, userID, chatID, groupID, sourceUserMessageID, sourceAssistantMessageID); err != nil {
return nil, err
}
if s.agentCache != nil && (sourceUserMessageID > 0 || sourceAssistantMessageID > 0) {
if cacheErr := s.agentCache.ApplyRetrySeed(ctx, chatID, groupID, sourceUserMessageID, sourceAssistantMessageID); cacheErr != nil {
log.Printf("更新重试分组缓存失败 chat=%s group=%s err=%v", chatID, groupID, cacheErr)
}
}
nextIndex, err := s.repo.GetRetryGroupNextIndex(ctx, userID, chatID, groupID)
if err != nil {
return nil, err
}
return &chatRetryMeta{
GroupID: groupID,
Index: nextIndex,
FromUserMessageID: sourceUserMessageID,
FromAssistantMessageID: sourceAssistantMessageID,
}, nil
}
func readAgentExtraString(extra map[string]any, key string) string {
if len(extra) == 0 {
return ""
@@ -400,9 +287,9 @@ func (s *AgentService) runNormalChatFlow(
selectedModel *ark.ChatModel,
resolvedModelName string,
userMessage string,
userPersisted bool,
assistantReasoningPrefix string,
assistantReasoningStartedAt *time.Time,
retryMeta *chatRetryMeta,
ifThinking bool,
userID int,
chatID string,
@@ -481,10 +368,8 @@ func (s *AgentService) runNormalChatFlow(
// 7. 后置持久化(用户消息):
// 7.1 先写 Redis保证“最新会话上下文”可立即用于下一轮推理
// 7.2 再走可靠持久化入口outbox 或同步 DB
if !userPersisted {
userMsg := &schema.Message{Role: schema.User, Content: userMessage}
if retryExtra := retryMeta.CacheExtra(); len(retryExtra) > 0 {
userMsg.Extra = retryExtra
}
if err = s.agentCache.PushMessage(ctx, chatID, userMsg); err != nil {
log.Printf("写入用户消息到 Redis 失败: %v", err)
}
@@ -496,11 +381,7 @@ func (s *AgentService) runNormalChatFlow(
Message: userMessage,
ReasoningContent: "",
ReasoningDurationSeconds: 0,
RetryGroupID: retryMeta.GroupIDPtr(),
RetryIndex: retryMeta.IndexPtr(),
RetryFromUserMessageID: retryMeta.FromUserMessageIDPtr(),
RetryFromAssistantMessageID: retryMeta.FromAssistantMessageIDPtr(),
// 口径B用户消息固定记 0本轮总 token 统一记在助手消息。
// 口径 B用户消息固定记 0本轮总 token 统一记在助手消息。
TokensConsumed: 0,
}); err != nil {
pushErrNonBlocking(errChan, err)
@@ -515,10 +396,10 @@ func (s *AgentService) runNormalChatFlow(
userMessage,
"",
0,
retryMeta,
requestStart,
),
)
}
// 普通聊天链路也需要把助手回复写入 Redis
// 否则会出现“数据库有助手消息,但 Redis 最新会话只有用户消息”的口径不一致。
@@ -529,14 +410,6 @@ func (s *AgentService) runNormalChatFlow(
if reasoningDurationSeconds > 0 {
assistantMsg.Extra = map[string]any{"reasoning_duration_seconds": reasoningDurationSeconds}
}
if retryExtra := retryMeta.CacheExtra(); len(retryExtra) > 0 {
if assistantMsg.Extra == nil {
assistantMsg.Extra = make(map[string]any, len(retryExtra))
}
for key, value := range retryExtra {
assistantMsg.Extra[key] = value
}
}
if err = s.agentCache.PushMessage(context.Background(), chatID, assistantMsg); err != nil {
log.Printf("写入助手消息到 Redis 失败: %v", err)
}
@@ -548,10 +421,6 @@ func (s *AgentService) runNormalChatFlow(
Message: fullText,
ReasoningContent: assistantReasoning,
ReasoningDurationSeconds: reasoningDurationSeconds,
RetryGroupID: retryMeta.GroupIDPtr(),
RetryIndex: retryMeta.IndexPtr(),
RetryFromUserMessageID: retryMeta.FromUserMessageIDPtr(),
RetryFromAssistantMessageID: retryMeta.FromAssistantMessageIDPtr(),
// 口径B助手消息记录“本轮请求总 token”。
TokensConsumed: requestTotalTokens,
}); saveErr != nil {
@@ -566,7 +435,6 @@ func (s *AgentService) runNormalChatFlow(
fullText,
assistantReasoning,
reasoningDurationSeconds,
retryMeta,
time.Now(),
),
)
@@ -591,196 +459,3 @@ func (s *AgentService) AgentChat(ctx context.Context, userMessage string, thinki
return outChan, errChan
}
// agentChatOld 是旧路由逻辑的备份,暂时保留供回滚使用。
// TODO: 新 graph 稳定后删除。
func (s *AgentService) agentChatOld(ctx context.Context, userMessage string, thinkingMode string, modelName string, userID int, chatID string, extra map[string]any) (<-chan string, <-chan error) {
ifThinking := thinkingModeToBool(thinkingMode)
requestStart := time.Now()
traceID := uuid.NewString()
outChan := make(chan string, 256)
errChan := make(chan error, 1)
// 0. 初始化”请求级 token 统计器”,用于聚合本次请求所有模型开销。
requestCtx, _ := withRequestTokenMeter(ctx)
// 1) 规范会话 ID选择模型。
chatID = normalizeConversationID(chatID)
selectedModel, resolvedModelName := s.pickChatModel(modelName)
// 2) 确保会话存在(优先缓存,必要时回源 DB 并创建)。
// 2.1 先查 Redis 会话标记,命中则可跳过 DB 存在性校验。
result, err := s.agentCache.GetConversationStatus(requestCtx, chatID)
if err != nil {
errChan <- err
close(outChan)
close(errChan)
return outChan, errChan
}
if !result {
// 2.2 缓存未命中时回源 DB确认会话是否存在。
innerResult, ifErr := s.repo.IfChatExists(requestCtx, userID, chatID)
if ifErr != nil {
errChan <- ifErr
close(outChan)
close(errChan)
return outChan, errChan
}
if !innerResult {
// 2.3 DB 里也不存在则创建新会话。
if _, err = s.repo.CreateNewChat(userID, chatID); err != nil {
errChan <- err
close(outChan)
close(errChan)
return outChan, errChan
}
}
// 2.4 补写 Redis 会话标记,优化下次访问。
if err = s.agentCache.SetConversationStatus(requestCtx, chatID); err != nil {
log.Printf("设置会话状态缓存失败 chat=%s: %v", chatID, err)
}
}
retryMeta, err := s.buildChatRetryMeta(requestCtx, userID, chatID, extra)
if err != nil {
errChan <- err
close(outChan)
close(errChan)
return outChan, errChan
}
// 3) 统一异步分流:
// 3.1 先走“通用控制码路由”决定 actionchat / quick_note_create / task_query
// 3.2 quick_note_create 进入随口记 graph
// 3.3 task_query 进入任务查询 tool-calling
// 3.4 chat 直接普通流式聊天。
go func() {
defer close(outChan)
// 3.1 先走轻量路由,拿到统一 action。
routing := s.decideActionRouting(requestCtx, selectedModel, userMessage)
if routing.RouteFailed {
// 3.1.1 路由码失败不再回落聊天。
// 3.1.2 直接返回内部错误,避免误进入业务分支导致“吐错内容”(例如吐排程 JSON
pushErrNonBlocking(errChan, respond.RouteControlInternalError)
return
}
// 3.2 chat直接走普通聊天主链路。
if routing.Action == agentrouter.ActionChat {
s.runNormalChatFlow(requestCtx, selectedModel, resolvedModelName, userMessage, "", nil, retryMeta, ifThinking, userID, chatID, traceID, requestStart, outChan, errChan)
return
}
// 3.3 非 chat 分支统一先发“接收成功”阶段,减少用户等待时的“无反馈感”。
progress := newQuickNoteProgressEmitter(outChan, resolvedModelName, true)
progress.Emit("request.accepted", routing.Detail)
// 3.4 quick_note_create执行随口记 graph。
if routing.Action == agentrouter.ActionQuickNoteCreate {
quickHandled, quickState, quickErr := s.tryHandleQuickNoteWithGraph(
requestCtx,
selectedModel,
userMessage,
userID,
chatID,
traceID,
routing.TrustRoute,
progress.Emit,
)
if quickErr != nil {
// graph 出错不直接中断用户请求,而是回退普通聊天,保证可用性优先。
log.Printf("随口记 graph 执行失败,回退普通聊天 trace_id=%s chat_id=%s err=%v", traceID, chatID, quickErr)
}
if quickHandled {
// 3.4.1 随口记处理成功:组织最终回复并按 OpenAI 兼容格式输出。
progress.Emit("quick_note.reply.polishing", "正在结合你的话题润色回复。")
quickReply := buildQuickNoteFinalReply(requestCtx, selectedModel, userMessage, quickState)
if emitErr := emitSingleAssistantCompletion(outChan, resolvedModelName, quickReply); emitErr != nil {
pushErrNonBlocking(errChan, emitErr)
return
}
// 3.4.2 对随口记回复执行统一后置持久化Redis + outbox/DB
requestTotalTokens := snapshotRequestTokenMeter(requestCtx).TotalTokens
s.persistChatAfterReply(requestCtx, userID, chatID, userMessage, quickReply, progress.HistoryText(), progress.DurationSeconds(time.Now()), retryMeta, 0, requestTotalTokens, errChan)
// 3.4.3 随口记链路同样异步生成会话标题(仅首次写入)。
s.ensureConversationTitleAsync(userID, chatID)
return
}
// 3.4.4 路由误判或 graph 判定非随口记时,回落普通聊天,保证“能聊”。
progress.Emit("quick_note.fallback", "当前输入不是随口记请求,切换到普通对话。")
s.runNormalChatFlow(requestCtx, selectedModel, resolvedModelName, userMessage, progress.HistoryText(), progress.StartedAt(), retryMeta, ifThinking, userID, chatID, traceID, requestStart, outChan, errChan)
return
}
// 3.5 task_query执行任务查询 tool-calling。
if routing.Action == agentrouter.ActionTaskQuery {
reply, queryErr := s.runTaskQueryFlow(requestCtx, selectedModel, userMessage, userID, progress.Emit)
if queryErr != nil {
// 3.5.1 任务查询失败时回退普通聊天,避免请求直接中断。
log.Printf("任务查询 tool-calling 执行失败,回退普通聊天 trace_id=%s chat_id=%s err=%v", traceID, chatID, queryErr)
progress.Emit("task_query.fallback", "任务查询暂不可用,先切回普通对话。")
s.runNormalChatFlow(requestCtx, selectedModel, resolvedModelName, userMessage, progress.HistoryText(), progress.StartedAt(), retryMeta, ifThinking, userID, chatID, traceID, requestStart, outChan, errChan)
return
}
// 3.5.2 查询成功后按 OpenAI 兼容格式输出,并执行统一后置持久化。
if emitErr := emitSingleAssistantCompletion(outChan, resolvedModelName, reply); emitErr != nil {
pushErrNonBlocking(errChan, emitErr)
return
}
requestTotalTokens := snapshotRequestTokenMeter(requestCtx).TotalTokens
s.persistChatAfterReply(requestCtx, userID, chatID, userMessage, reply, progress.HistoryText(), progress.DurationSeconds(time.Now()), retryMeta, 0, requestTotalTokens, errChan)
s.ensureConversationTitleAsync(userID, chatID)
return
}
// 3.6 schedule_plan执行智能排程 graph。
if routing.Action == agentrouter.ActionSchedulePlanCreate {
reply, planErr := s.runSchedulePlanFlow(requestCtx, selectedModel, userMessage, userID, chatID, traceID, extra, progress.Emit, outChan, resolvedModelName)
if planErr != nil {
log.Printf("智能排程 graph 执行失败,回退普通聊天 trace_id=%s chat_id=%s err=%v", traceID, chatID, planErr)
progress.Emit("schedule_plan.fallback", "智能排程暂不可用,先切回普通对话。")
s.runNormalChatFlow(requestCtx, selectedModel, resolvedModelName, userMessage, progress.HistoryText(), progress.StartedAt(), retryMeta, ifThinking, userID, chatID, traceID, requestStart, outChan, errChan)
return
}
if emitErr := emitSingleAssistantCompletion(outChan, resolvedModelName, reply); emitErr != nil {
pushErrNonBlocking(errChan, emitErr)
return
}
requestTotalTokens := snapshotRequestTokenMeter(requestCtx).TotalTokens
s.persistChatAfterReply(requestCtx, userID, chatID, userMessage, reply, progress.HistoryText(), progress.DurationSeconds(time.Now()), retryMeta, 0, requestTotalTokens, errChan)
s.ensureConversationTitleAsync(userID, chatID)
return
}
// 3.7 schedule_plan_refine执行“连续微调排程”graph。
if routing.Action == agentrouter.ActionSchedulePlanRefine {
reply, refineErr := s.runScheduleRefineFlow(requestCtx, selectedModel, userMessage, userID, chatID, traceID, progress.Emit, outChan, resolvedModelName)
if refineErr != nil {
// 连续微调失败不再回落普通聊天,直接上报错误。
pushErrNonBlocking(errChan, refineErr)
return
}
if emitErr := emitSingleAssistantCompletion(outChan, resolvedModelName, reply); emitErr != nil {
pushErrNonBlocking(errChan, emitErr)
return
}
requestTotalTokens := snapshotRequestTokenMeter(requestCtx).TotalTokens
s.persistChatAfterReply(requestCtx, userID, chatID, userMessage, reply, progress.HistoryText(), progress.DurationSeconds(time.Now()), retryMeta, 0, requestTotalTokens, errChan)
s.ensureConversationTitleAsync(userID, chatID)
return
}
// 3.8 未知 action 兜底:走普通聊天,保证可用性。
s.runNormalChatFlow(requestCtx, selectedModel, resolvedModelName, userMessage, progress.HistoryText(), progress.StartedAt(), retryMeta, ifThinking, userID, chatID, traceID, requestStart, outChan, errChan)
}()
return outChan, errChan
}

View File

@@ -118,7 +118,6 @@ func (s *AgentService) appendConversationHistoryCacheOptimistically(
merged = appendConversationHistoryItemIfMissing(merged, item)
}
sortConversationHistoryItems(merged)
merged = attachConversationRetryTotals(merged)
if err = s.cacheDAO.SetConversationHistoryToCache(ctx, userID, normalizedChatID, merged); err != nil {
log.Printf("乐观追加会话历史视图缓存失败 chat_id=%s: %v", normalizedChatID, err)
@@ -151,11 +150,9 @@ func buildConversationHistoryItemsFromDB(histories []model.ChatHistory) []model.
CreatedAt: history.CreatedAt,
ReasoningContent: strings.TrimSpace(derefConversationHistoryText(history.ReasoningContent)),
ReasoningDurationSeconds: history.ReasoningDurationSeconds,
RetryGroupID: cloneConversationStringPointer(history.RetryGroupID),
RetryIndex: cloneConversationIntPointer(history.RetryIndex),
})
}
return attachConversationRetryTotals(items)
return items
}
func derefConversationHistoryText(text *string) string {
@@ -165,58 +162,6 @@ func derefConversationHistoryText(text *string) string {
return *text
}
func attachConversationRetryTotals(items []model.GetConversationHistoryItem) []model.GetConversationHistoryItem {
if len(items) == 0 {
return items
}
groupTotals := make(map[string]int)
for _, item := range items {
if item.RetryGroupID == nil || item.RetryIndex == nil {
continue
}
groupID := strings.TrimSpace(*item.RetryGroupID)
if groupID == "" {
continue
}
if *item.RetryIndex > groupTotals[groupID] {
groupTotals[groupID] = *item.RetryIndex
}
}
for idx := range items {
groupIDPtr := items[idx].RetryGroupID
if groupIDPtr == nil {
continue
}
groupID := strings.TrimSpace(*groupIDPtr)
total := groupTotals[groupID]
if total <= 0 {
continue
}
totalCopy := total
items[idx].RetryTotal = &totalCopy
}
return items
}
func cloneConversationStringPointer(src *string) *string {
if src == nil {
return nil
}
text := strings.TrimSpace(*src)
if text == "" {
return nil
}
return &text
}
func cloneConversationIntPointer(src *int) *int {
if src == nil || *src <= 0 {
return nil
}
value := *src
return &value
}
func normalizeConversationHistoryRole(role string) string {
switch strings.ToLower(strings.TrimSpace(role)) {
case "user":
@@ -245,7 +190,6 @@ func buildOptimisticConversationHistoryItem(
content string,
reasoningContent string,
reasoningDurationSeconds int,
retryMeta *chatRetryMeta,
createdAt time.Time,
) model.GetConversationHistoryItem {
item := model.GetConversationHistoryItem{
@@ -258,11 +202,6 @@ func buildOptimisticConversationHistoryItem(
t := createdAt
item.CreatedAt = &t
}
if retryMeta != nil {
item.RetryGroupID = retryMeta.GroupIDPtr()
item.RetryIndex = retryMeta.IndexPtr()
item.RetryTotal = retryMeta.IndexPtr()
}
return item
}
@@ -284,26 +223,16 @@ func conversationHistoryItemSignature(item model.GetConversationHistoryItem) str
return fmt.Sprintf("id:%d", item.ID)
}
groupID := ""
if item.RetryGroupID != nil {
groupID = strings.TrimSpace(*item.RetryGroupID)
}
retryIndex := 0
if item.RetryIndex != nil {
retryIndex = *item.RetryIndex
}
createdAt := ""
if item.CreatedAt != nil {
createdAt = item.CreatedAt.UTC().Format(time.RFC3339Nano)
}
return fmt.Sprintf(
"%s|%s|%s|%s|%d|%d|%s",
"%s|%s|%s|%d|%s",
strings.TrimSpace(item.Role),
strings.TrimSpace(item.Content),
strings.TrimSpace(item.ReasoningContent),
groupID,
retryIndex,
item.ReasoningDurationSeconds,
createdAt,
)

View File

@@ -84,12 +84,7 @@ func (s *AgentService) runNewAgentGraph(
}
}
// 3. 构建重试元数据。
retryMeta, err := s.buildChatRetryMeta(requestCtx, userID, chatID, extra)
if err != nil {
pushErrNonBlocking(errChan, err)
return
}
// 3. retry 机制已下线,不再构建重试元数据。
// 4. 从 StateStore 加载或创建 RuntimeState。
// 恢复场景confirm/ask_user同时拿到快照中保存的 ConversationContext
@@ -137,6 +132,31 @@ func (s *AgentService) runNewAgentGraph(
}
}
cs = runtimeState.EnsureCommonState()
// 5.7 先把本轮用户输入落库,确保后续可见 assistant 消息按真实时间线追加。
userMsg := schema.UserMessage(userMessage)
if err := s.persistNewAgentConversationMessage(requestCtx, userID, chatID, userMsg, 0); err != nil {
pushErrNonBlocking(errChan, err)
return
}
persistVisibleMessage := func(persistCtx context.Context, state *newagentmodel.CommonState, msg *schema.Message) error {
targetState := state
if targetState == nil {
targetState = runtimeState.EnsureCommonState()
}
if targetState != nil {
if targetState.UserID <= 0 {
targetState.UserID = userID
}
if strings.TrimSpace(targetState.ConversationID) == "" {
targetState.ConversationID = chatID
}
}
return s.persistNewAgentConversationMessage(persistCtx, userID, chatID, msg, 0)
}
// 6. 构造 AgentGraphRequest。
var confirmAction string
if len(extra) > 0 {
@@ -179,6 +199,7 @@ func (s *AgentService) runNewAgentGraph(
ThinkingPlan: viper.GetBool("agent.thinking.plan"),
ThinkingExecute: viper.GetBool("agent.thinking.execute"),
ThinkingDeliver: viper.GetBool("agent.thinking.deliver"),
PersistVisibleMessage: persistVisibleMessage,
}
// 10. 构造 AgentGraphRunInput 并运行 graph。
@@ -197,12 +218,13 @@ func (s *AgentService) runNewAgentGraph(
pushErrNonBlocking(errChan, fmt.Errorf("graph 执行失败: %w", graphErr))
// Graph 出错时回退普通聊天,保证可用性。回退使用 Pro 模型。
s.runNormalChatFlow(requestCtx, s.AIHub.Pro, resolvedModelName, userMessage, "", nil, retryMeta, thinkingModeToBool(thinkingMode), userID, chatID, traceID, requestStart, outChan, errChan)
s.runNormalChatFlow(requestCtx, s.AIHub.Pro, resolvedModelName, userMessage, true, "", nil, thinkingModeToBool(thinkingMode), userID, chatID, traceID, requestStart, outChan, errChan)
return
}
// 11. 持久化聊天历史(用户消息 + 助手回复)。
s.persistChatAfterGraph(requestCtx, userID, chatID, userMessage, finalState, retryMeta, requestStart, outChan, errChan)
requestTotalTokens := snapshotRequestTokenMeter(requestCtx).TotalTokens
s.adjustNewAgentRequestTokenUsage(requestCtx, userID, chatID, requestTotalTokens)
// 11.5. 将最终状态快照异步写入 MySQL通过 outbox
// Deliver 节点已将快照保存到 Redis2h TTL此处通过 outbox 异步写入 MySQL 做永久存储。
if finalState != nil {
@@ -369,135 +391,89 @@ func (s *AgentService) loadConversationContext(ctx context.Context, chatID, user
return conversationContext
}
// persistChatAfterGraph graph 执行完成后持久化聊天历史
func (s *AgentService) persistChatAfterGraph(
// persistNewAgentConversationMessage 负责把 newAgent 链路里"真正对用户可见"的消息统一落到 Redis + MySQL
//
// 职责边界:
// 1. 只做单条消息的持久化,不做 graph 流程控制;
// 2. TokensConsumed 由调用方显式传入newAgent 逐条可见消息默认写 0
// 3. Redis 失败只记日志DB 失败返回错误,便于调用方决定是否中止当前链路。
func (s *AgentService) persistNewAgentConversationMessage(
ctx context.Context,
userID int,
chatID string,
userMessage string,
finalState *newagentmodel.AgentGraphState,
retryMeta *chatRetryMeta,
requestStart time.Time,
outChan chan<- string,
errChan chan error,
) {
if finalState == nil {
return
msg *schema.Message,
tokensConsumed int,
) error {
if s == nil || msg == nil {
return nil
}
role := strings.TrimSpace(string(msg.Role))
content := strings.TrimSpace(msg.Content)
if role == "" || content == "" {
return nil
}
if userID <= 0 || strings.TrimSpace(chatID) == "" {
return fmt.Errorf("newAgent visible message persist: invalid conversation identity")
}
if ctx == nil {
ctx = context.Background()
}
// 1. 持久化用户消息:先写 LLM 上下文 Redis再落 DB最后更新 UI 历史缓存。
userMsg := &schema.Message{Role: schema.User, Content: userMessage}
if retryExtra := retryMeta.CacheExtra(); len(retryExtra) > 0 {
userMsg.Extra = retryExtra
persistMsg := &schema.Message{
Role: msg.Role,
Content: content,
ReasoningContent: strings.TrimSpace(msg.ReasoningContent),
}
if len(msg.Extra) > 0 {
persistMsg.Extra = make(map[string]any, len(msg.Extra))
for key, value := range msg.Extra {
persistMsg.Extra[key] = value
}
if err := s.agentCache.PushMessage(ctx, chatID, userMsg); err != nil {
log.Printf("写入用户消息到 LLM 上下文 Redis 失败 chat=%s: %v", chatID, err)
}
userPayload := model.ChatHistoryPersistPayload{
if err := s.agentCache.PushMessage(ctx, chatID, persistMsg); err != nil {
log.Printf("写入 newAgent 可见消息到 Redis 失败 chat=%s role=%s: %v", chatID, role, err)
}
reasoningDurationSeconds := 0
if persistMsg.Extra != nil {
switch v := persistMsg.Extra["reasoning_duration_seconds"].(type) {
case int:
reasoningDurationSeconds = v
case int64:
reasoningDurationSeconds = int(v)
case float64:
reasoningDurationSeconds = int(v)
}
}
persistPayload := model.ChatHistoryPersistPayload{
UserID: userID,
ConversationID: chatID,
Role: "user",
Message: userMessage,
ReasoningContent: "",
ReasoningDurationSeconds: 0,
RetryGroupID: retryMeta.GroupIDPtr(),
RetryIndex: retryMeta.IndexPtr(),
RetryFromUserMessageID: retryMeta.FromUserMessageIDPtr(),
RetryFromAssistantMessageID: retryMeta.FromAssistantMessageIDPtr(),
TokensConsumed: 0,
}
if err := s.PersistChatHistory(ctx, userPayload); err != nil {
pushErrNonBlocking(errChan, err)
}
userCreatedAt := time.Now()
s.appendConversationHistoryCacheOptimistically(
context.Background(),
userID,
chatID,
buildOptimisticConversationHistoryItem("user", userMessage, "", 0, retryMeta, userCreatedAt),
)
// 2. 从 ConversationContext 提取助手回复(最后一条 assistant 消息)。
conversationContext := finalState.ConversationContext
if conversationContext == nil || len(conversationContext.History) == 0 {
return
}
var lastAssistantMsg *schema.Message
for i := len(conversationContext.History) - 1; i >= 0; i-- {
msg := conversationContext.History[i]
if msg.Role == schema.Assistant {
lastAssistantMsg = msg
break
}
}
if lastAssistantMsg == nil {
return
}
assistantReply := lastAssistantMsg.Content
reasoningContent := lastAssistantMsg.ReasoningContent
var reasoningDurationSeconds int
if lastAssistantMsg.Extra != nil {
if dur, ok := lastAssistantMsg.Extra["reasoning_duration_seconds"].(float64); ok {
reasoningDurationSeconds = int(dur)
}
}
// 3. 持久化助手消息:先写 LLM 上下文 Redis再落 DB最后更新 UI 历史缓存。
assistantMsg := &schema.Message{
Role: schema.Assistant,
Content: assistantReply,
ReasoningContent: reasoningContent,
}
if reasoningDurationSeconds > 0 {
assistantMsg.Extra = map[string]any{"reasoning_duration_seconds": reasoningDurationSeconds}
}
if retryExtra := retryMeta.CacheExtra(); len(retryExtra) > 0 {
if assistantMsg.Extra == nil {
assistantMsg.Extra = make(map[string]any)
}
for k, v := range retryExtra {
assistantMsg.Extra[k] = v
}
}
if err := s.agentCache.PushMessage(context.Background(), chatID, assistantMsg); err != nil {
log.Printf("写入助手消息到 LLM 上下文 Redis 失败 chat=%s: %v", chatID, err)
}
requestTotalTokens := snapshotRequestTokenMeter(ctx).TotalTokens
assistantPayload := model.ChatHistoryPersistPayload{
UserID: userID,
ConversationID: chatID,
Role: "assistant",
Message: assistantReply,
ReasoningContent: reasoningContent,
Role: role,
Message: content,
ReasoningContent: strings.TrimSpace(persistMsg.ReasoningContent),
ReasoningDurationSeconds: reasoningDurationSeconds,
RetryGroupID: retryMeta.GroupIDPtr(),
RetryIndex: retryMeta.IndexPtr(),
RetryFromUserMessageID: retryMeta.FromUserMessageIDPtr(),
RetryFromAssistantMessageID: retryMeta.FromAssistantMessageIDPtr(),
TokensConsumed: requestTotalTokens,
TokensConsumed: tokensConsumed,
}
if err := s.PersistChatHistory(ctx, assistantPayload); err != nil {
pushErrNonBlocking(errChan, err)
} else {
if err := s.PersistChatHistory(ctx, persistPayload); err != nil {
return err
}
now := time.Now()
s.appendConversationHistoryCacheOptimistically(
context.Background(),
ctx,
userID,
chatID,
buildOptimisticConversationHistoryItem(
"assistant",
assistantReply,
reasoningContent,
role,
content,
persistPayload.ReasoningContent,
reasoningDurationSeconds,
retryMeta,
time.Now(),
now,
),
)
}
return nil
}
// makeRoughBuildFunc 把 AgentService 上的 HybridScheduleWithPlanMultiFunc 封装成
@@ -509,6 +485,38 @@ func (s *AgentService) persistChatAfterGraph(
// placement普通时段放置的任务全部被丢弃。
// 正确做法:使用第一个返回值 []HybridScheduleEntry过滤 Status="suggested" 且 TaskItemID>0 的条目,
// 这样嵌入和非嵌入的粗排结果都能正确写入 ScheduleState。
// adjustNewAgentRequestTokenUsage 负责把本轮 graph 的请求级 token 一次性回写到账本。
//
// 说明:
// 1. newAgent 逐条可见消息都按 0 token 落库,最终统一在这里补记整轮消耗;
// 2. 如果启用了 outbox就沿用异步 token 调整事件,保持写账口径一致;
// 3. 该步骤属于请求收尾,不应反过来打断用户已看到的回复。
func (s *AgentService) adjustNewAgentRequestTokenUsage(ctx context.Context, userID int, chatID string, deltaTokens int) {
if s == nil || userID <= 0 || strings.TrimSpace(chatID) == "" || deltaTokens <= 0 {
return
}
if ctx == nil {
ctx = context.Background()
}
if s.eventPublisher != nil {
if err := eventsvc.PublishChatTokenUsageAdjustRequested(ctx, s.eventPublisher, model.ChatTokenUsageAdjustPayload{
UserID: userID,
ConversationID: chatID,
TokensDelta: deltaTokens,
Reason: "new_agent_request",
TriggeredAt: time.Now(),
}); err != nil {
log.Printf("写入 newAgent 请求级 token 调整事件失败 chat=%s tokens=%d err=%v", chatID, deltaTokens, err)
}
return
}
if err := s.repo.AdjustTokenUsage(ctx, userID, chatID, deltaTokens); err != nil {
log.Printf("同步写入 newAgent 请求级 token 调整失败 chat=%s tokens=%d err=%v", chatID, deltaTokens, err)
}
}
func (s *AgentService) makeRoughBuildFunc() newagentmodel.RoughBuildFunc {
if s.HybridScheduleWithPlanMultiFunc == nil {
return nil

View File

@@ -15,7 +15,6 @@ import (
agentstream "github.com/LoveLosita/smartflow/backend/agent/stream"
"github.com/LoveLosita/smartflow/backend/model"
"github.com/cloudwego/eino-ext/components/model/ark"
"github.com/cloudwego/eino/schema"
"github.com/google/uuid"
)
@@ -302,109 +301,3 @@ func (s *AgentService) decideQuickNoteRouting(ctx context.Context, selectedModel
_ = s
return agentrouter.DecideQuickNoteRouting(ctx, selectedModel, userMessage)
}
// persistChatAfterReply 在“随口记 graph”返回后复用当前项目的后置持久化策略
// 1) 用户消息写 Redis + outbox/DB
// 2) 助手消息写 Redis + outbox/DB。
func (s *AgentService) persistChatAfterReply(
ctx context.Context,
userID int,
chatID string,
userMessage string,
assistantReply string,
assistantReasoning string,
assistantReasoningDurationSeconds int,
retryMeta *chatRetryMeta,
userTokens int,
assistantTokens int,
errChan chan error,
) {
// 1. 先把用户消息写入 Redis保证会话上下文“马上可见”。
userMsg := &schema.Message{Role: schema.User, Content: userMessage}
if retryExtra := retryMeta.CacheExtra(); len(retryExtra) > 0 {
userMsg.Extra = retryExtra
}
if err := s.agentCache.PushMessage(ctx, chatID, userMsg); err != nil {
log.Printf("写入用户消息到 Redis 失败: %v", err)
}
// 2. 再把用户消息写入可靠持久化通道outbox 或同步 DB
if err := s.PersistChatHistory(ctx, model.ChatHistoryPersistPayload{
UserID: userID,
ConversationID: chatID,
Role: "user",
Message: userMessage,
ReasoningContent: "",
ReasoningDurationSeconds: 0,
RetryGroupID: retryMeta.GroupIDPtr(),
RetryIndex: retryMeta.IndexPtr(),
RetryFromUserMessageID: retryMeta.FromUserMessageIDPtr(),
RetryFromAssistantMessageID: retryMeta.FromAssistantMessageIDPtr(),
TokensConsumed: userTokens,
}); err != nil {
pushErrNonBlocking(errChan, err)
return
}
userCreatedAt := time.Now()
s.appendConversationHistoryCacheOptimistically(
context.Background(),
userID,
chatID,
buildOptimisticConversationHistoryItem(
"user",
userMessage,
"",
0,
retryMeta,
userCreatedAt,
),
)
// 3. 助手消息同样遵循“Redis 先行 + 可靠持久化补齐”策略。
assistantMsg := &schema.Message{Role: schema.Assistant, Content: assistantReply, ReasoningContent: assistantReasoning}
if assistantReasoningDurationSeconds > 0 {
assistantMsg.Extra = map[string]any{"reasoning_duration_seconds": assistantReasoningDurationSeconds}
}
if retryExtra := retryMeta.CacheExtra(); len(retryExtra) > 0 {
if assistantMsg.Extra == nil {
assistantMsg.Extra = make(map[string]any, len(retryExtra))
}
for key, value := range retryExtra {
assistantMsg.Extra[key] = value
}
}
if err := s.agentCache.PushMessage(context.Background(), chatID, assistantMsg); err != nil {
log.Printf("写入助手消息到 Redis 失败: %v", err)
}
// 4. 助手消息持久化失败不阻断主流程,通过 errChan 异步上报。
if err := s.PersistChatHistory(context.Background(), model.ChatHistoryPersistPayload{
UserID: userID,
ConversationID: chatID,
Role: "assistant",
Message: assistantReply,
ReasoningContent: assistantReasoning,
ReasoningDurationSeconds: assistantReasoningDurationSeconds,
RetryGroupID: retryMeta.GroupIDPtr(),
RetryIndex: retryMeta.IndexPtr(),
RetryFromUserMessageID: retryMeta.FromUserMessageIDPtr(),
RetryFromAssistantMessageID: retryMeta.FromAssistantMessageIDPtr(),
TokensConsumed: assistantTokens,
}); err != nil {
pushErrNonBlocking(errChan, err)
return
}
s.appendConversationHistoryCacheOptimistically(
context.Background(),
userID,
chatID,
buildOptimisticConversationHistoryItem(
"assistant",
assistantReply,
assistantReasoning,
assistantReasoningDurationSeconds,
retryMeta,
userCreatedAt.Add(time.Millisecond),
),
)
}

View File

@@ -71,10 +71,6 @@ func RegisterChatHistoryPersistHandler(
payload.Message,
payload.ReasoningContent,
payload.ReasoningDurationSeconds,
payload.RetryGroupID,
payload.RetryIndex,
payload.RetryFromUserMessageID,
payload.RetryFromAssistantMessageID,
payload.TokensConsumed,
); err != nil {
return err

View File

@@ -12,9 +12,6 @@ export interface ConversationHistoryMessage {
created_at?: string | null
reasoning_content?: string | null
reasoning_duration_seconds?: number | null
retry_group_id?: string | null
retry_index?: number | null
retry_total?: number | null
}
export interface ConversationListQuery {
@@ -111,9 +108,6 @@ function normalizeConversationHistoryMessage(raw: unknown): ConversationHistoryM
reasoning_content: normalizedReasoning,
reasoning_duration_seconds:
typeof candidate.reasoning_duration_seconds === 'number' ? candidate.reasoning_duration_seconds : null,
retry_group_id: typeof candidate.retry_group_id === 'string' ? candidate.retry_group_id : null,
retry_index: typeof candidate.retry_index === 'number' ? candidate.retry_index : null,
retry_total: typeof candidate.retry_total === 'number' ? candidate.retry_total : null,
}
}

View File

@@ -18,6 +18,23 @@ const props = withDefaults(
const safeStats = computed(() => props.stats ?? null)
function formatCompactCount(value: number) {
if (!Number.isFinite(value)) {
return '--'
}
// 1. 千位及以上用 k 单位压缩,避免按钮过宽。
// 2. 保留小数点后 1 位;如果刚好是整数千位,则去掉 .0,像 80k 这种展示会更干净。
const absoluteValue = Math.abs(value)
if (absoluteValue >= 1000) {
const compactValue = value / 1000
const compactText = compactValue.toFixed(1)
return `${compactText.endsWith('.0') ? compactText.slice(0, -2) : compactText}k`
}
return `${Math.round(value)}`
}
const usagePercent = computed(() => {
if (!safeStats.value || safeStats.value.budget <= 0) {
return 0
@@ -29,7 +46,9 @@ const barWidthPercent = computed(() => {
if (!safeStats.value || safeStats.value.budget <= 0) {
return 0
}
// 1. 按 total / budget 计算宽度,上限 100%(超预算时撑满进度条)。
// 1. 进度条只负责表达相对占用率。
// 2. 超过预算时只把宽度封顶到 100%,避免条形溢出容器。
return Math.min(100, (safeStats.value.total / safeStats.value.budget) * 100)
})
@@ -40,16 +59,28 @@ const isOverBudget = computed(() => {
return safeStats.value.total > safeStats.value.budget
})
const usageText = computed(() => {
const usagePercentText = computed(() => {
if (props.loading) {
return '--'
}
if (!safeStats.value) {
return props.disabled ? '--' : '0%'
}
return `${usagePercent.value}%`
})
const usageSummaryText = computed(() => {
if (props.loading) {
return '...'
}
if (!safeStats.value) {
return props.disabled ? '--' : ''
return props.disabled ? '--/--' : '0/0'
}
return `${usagePercent.value}%`
return `${formatCompactCount(safeStats.value.total)}/${formatCompactCount(safeStats.value.budget)}`
})
const tooltipText = computed(() => {
@@ -58,10 +89,12 @@ const tooltipText = computed(() => {
}
if (!safeStats.value) {
return props.disabled ? '新会话发送首条消息后展示上下文窗口统计' : '当前会话暂无上下文窗口统计'
return props.disabled
? '新会话发送首条消息后展示上下文窗口统计'
: '当前会话暂无上下文窗口统计'
}
return `总计 ${safeStats.value.total} / 预算 ${safeStats.value.budget}${usagePercent.value}%`
return `上下文使用 ${usagePercentText.value}${usageSummaryText.value}`
})
</script>
@@ -76,23 +109,29 @@ const tooltipText = computed(() => {
:title="tooltipText"
>
<span class="assistant-context-meter__label">窗口</span>
<div class="assistant-context-meter__core">
<span class="assistant-context-meter__percent">{{ usagePercentText }}</span>
<span class="assistant-context-meter__summary">{{ usageSummaryText }}</span>
<div class="assistant-context-meter__track" aria-hidden="true">
<div v-if="loading" class="assistant-context-meter__loading-bar" />
<div v-else-if="barWidthPercent > 0" class="assistant-context-meter__bar" :style="{ width: `${barWidthPercent}%` }" />
<div
v-else-if="barWidthPercent > 0"
class="assistant-context-meter__bar"
:style="{ width: `${barWidthPercent}%` }"
/>
</div>
</div>
<span class="assistant-context-meter__value">{{ usageText }}</span>
</div>
</template>
<style scoped>
.assistant-context-meter {
width: 144px;
min-width: 144px;
max-width: 144px;
width: 188px;
min-width: 188px;
max-width: 188px;
height: 32px;
padding: 0 9px 0 10px;
padding: 0 8px;
border: 1px solid rgba(15, 23, 42, 0.1);
border-radius: 999px;
background: #ffffff;
@@ -120,9 +159,10 @@ const tooltipText = computed(() => {
}
.assistant-context-meter__label,
.assistant-context-meter__value {
.assistant-context-meter__percent,
.assistant-context-meter__summary {
flex: 0 0 auto;
font-size: 12px;
font-size: 11px;
line-height: 1;
white-space: nowrap;
}
@@ -132,25 +172,42 @@ const tooltipText = computed(() => {
font-weight: 600;
}
.assistant-context-meter__value {
width: 28px;
min-width: 28px;
text-align: right;
color: #334155;
font-weight: 700;
.assistant-context-meter__core {
flex: 1 1 auto;
min-width: 0;
display: grid;
grid-template-columns: auto auto minmax(0, 1fr);
align-items: center;
column-gap: 2px;
}
.assistant-context-meter--disabled .assistant-context-meter__value {
.assistant-context-meter__percent {
min-width: 24px;
color: #334155;
font-weight: 700;
text-align: right;
}
.assistant-context-meter__summary {
min-width: 52px;
color: #667085;
font-weight: 600;
text-align: right;
}
.assistant-context-meter--disabled .assistant-context-meter__percent,
.assistant-context-meter--disabled .assistant-context-meter__summary {
color: #6b7280;
}
.assistant-context-meter--danger .assistant-context-meter__value {
.assistant-context-meter--danger .assistant-context-meter__percent,
.assistant-context-meter--danger .assistant-context-meter__summary {
color: #b42318;
}
.assistant-context-meter__track {
flex: 1 1 auto;
min-width: 0;
width: 100%;
height: 7px;
overflow: hidden;
border-radius: 999px;
@@ -189,6 +246,7 @@ const tooltipText = computed(() => {
0% {
background-position: 200% 0;
}
100% {
background-position: -200% 0;
}

View File

@@ -55,12 +55,21 @@ interface ConversationGroup {
items: ConversationListItem[]
}
interface RetryPageGroup {
groupId: string
total: number
latestIndex: number
visibleIndex: number
pages: Map<number, { user?: AssistantMessage; assistant?: AssistantMessage }>
// 展示用消息:合并连续 assistant 消息后的视图模型
interface DisplayMessage {
/** 第一条源消息的 id用作 Vue key */
id: string
role: 'user' | 'assistant'
/** 合并后的正文内容 */
content: string
/** 最后一条源消息的时间 */
createdAt: string
/** 合并后的推理内容 */
reasoning?: string
/** 原始消息引用列表 */
sources: AssistantMessage[]
/** 是否为多条合并 */
merged: boolean
}
const props = withDefaults(
@@ -92,7 +101,6 @@ const historyPanelWidth = ref(props.initialHistoryWidth)
const activeStreamingMessageId = ref('')
const editingUserMessageId = ref('')
const editingUserMessageDraft = ref('')
const retryVisiblePageMap = reactive<Record<string, number>>({})
const pendingPlanningTaskClassIds = ref<number[]>([])
const conversationPage = ref(1)
@@ -125,6 +133,7 @@ const DEFAULT_PLANNING_PROMPT = '请基于这些任务类帮我做一版智能
let messageScrollRaf = 0
let messageScrollReleaseRaf = 0
let reasoningTicker = 0
let historyResizeCleanup: (() => void) | null = null
const reasoningDisplayNow = ref(Date.now())
const shouldAutoFollowMessages = ref(true)
const messageBottomTolerancePx = 24
@@ -149,75 +158,48 @@ const rawSelectedMessages = computed(() => {
return conversationMessagesMap[selectedConversationId.value] ?? []
})
const retryPageGroups = computed<Map<string, RetryPageGroup>>(() => {
const grouped = new Map<string, RetryPageGroup>()
// retry 机制已整体下线selectedMessages 直接回退到原始消息流,不再做分组/翻页。
const selectedMessages = computed(() => rawSelectedMessages.value)
for (const message of rawSelectedMessages.value) {
if (!message.retryGroupId || !message.retryIndex || !message.retryTotal || message.retryTotal <= 1) {
// 1. 将连续 assistant 消息合并为一条展示消息。
// 2. ReAct 循环中 plan/execute/deliver 各节点都会产生 assistant speak
// 合并后用户看到的是一段连续的 AI 回复,而非多段割裂输出。
const displayMessages = computed<DisplayMessage[]>(() => {
const result: DisplayMessage[] = []
const src = selectedMessages.value
let i = 0
while (i < src.length) {
const msg = src[i]
if (msg.role !== 'assistant') {
result.push({
id: msg.id,
role: msg.role,
content: msg.content,
createdAt: msg.createdAt,
reasoning: msg.reasoning,
sources: [msg],
merged: false,
})
i++
continue
}
const existed = grouped.get(message.retryGroupId) ?? {
groupId: message.retryGroupId,
total: message.retryTotal,
latestIndex: message.retryIndex,
visibleIndex: retryVisiblePageMap[message.retryGroupId] ?? message.retryTotal,
pages: new Map<number, { user?: AssistantMessage; assistant?: AssistantMessage }>(),
// 收集连续 assistant 消息并合并
const group: AssistantMessage[] = []
while (i < src.length && src[i].role === 'assistant') {
group.push(src[i])
i++
}
existed.total = Math.max(existed.total, message.retryTotal)
existed.latestIndex = Math.max(existed.latestIndex, message.retryIndex)
existed.visibleIndex = retryVisiblePageMap[message.retryGroupId] ?? existed.latestIndex
const page = existed.pages.get(message.retryIndex) ?? {}
if (message.role === 'user') {
page.user = message
result.push({
id: group[0].id,
role: 'assistant',
content: group.map(m => m.content).filter(Boolean).join('\n\n'),
createdAt: group[group.length - 1].createdAt,
reasoning: group.map(m => m.reasoning).filter(Boolean).join('\n\n') || undefined,
sources: group,
merged: group.length > 1,
})
}
if (message.role === 'assistant') {
page.assistant = message
}
existed.pages.set(message.retryIndex, page)
grouped.set(message.retryGroupId, existed)
}
return grouped
})
const selectedMessages = computed(() => {
const visible: AssistantMessage[] = []
const insertedRetryGroups = new Set<string>()
for (const message of rawSelectedMessages.value) {
if (!message.retryGroupId) {
visible.push(message)
continue
}
const retryGroup = retryPageGroups.value.get(message.retryGroupId)
if (!retryGroup || retryGroup.total <= 1 || !message.retryIndex) {
visible.push(message)
continue
}
if (insertedRetryGroups.has(message.retryGroupId)) {
continue
}
insertedRetryGroups.add(message.retryGroupId)
const nextPage =
retryGroup.pages.get(retryGroup.visibleIndex) ??
retryGroup.pages.get(retryGroup.latestIndex) ??
retryGroup.pages.get(1)
if (nextPage?.user) {
visible.push(nextPage.user)
}
if (nextPage?.assistant) {
visible.push(nextPage.assistant)
}
}
return visible
return result
})
function resolveConversationGroupLabel(timeText?: string | null) {
@@ -475,9 +457,6 @@ function normalizeHistoryMessage(message: ConversationHistoryMessage, index: num
content: message.content,
createdAt: message.created_at ?? new Date().toISOString(),
reasoning: reasoningText || undefined,
retryGroupId: typeof message.retry_group_id === 'string' ? message.retry_group_id : undefined,
retryIndex: typeof message.retry_index === 'number' ? message.retry_index : undefined,
retryTotal: typeof message.retry_total === 'number' ? message.retry_total : undefined,
}
// 1. 历史消息优先使用后端持久化的思考时长,避免刷新后重新按“当前时间 - 创建时间”误算。
@@ -495,57 +474,76 @@ function normalizeHistoryMessage(message: ConversationHistoryMessage, index: num
return normalized
}
function resolveMessageTimestamp(message: AssistantMessage) {
const parsed = Date.parse(message.createdAt)
return Number.isFinite(parsed) ? parsed : 0
}
function isSameLogicalMessage(left: AssistantMessage, right: AssistantMessage) {
return (
left.role === right.role &&
left.content === right.content &&
(left.reasoning || '') === (right.reasoning || '') &&
(left.retryGroupId || '') === (right.retryGroupId || '') &&
(left.retryIndex || 0) === (right.retryIndex || 0)
(left.reasoning || '') === (right.reasoning || '')
)
}
// mergeServerHistoryWithLocalState 将服务端历史与本地乐观消息合并为最终消息流。
//
// 核心策略:保留本地消息的原始顺序,用服务端数据"就地替换"匹配到的本地消息。
//
// 为什么不按时间戳排序?
// 1. 聊天历史通过 Kafka 异步持久化,数据库 created_at 是消费者落库时刻,
// 而非消息产生时刻。Kafka 消费顺序不保证与发布顺序一致,
// 导致 assistant 消息可能比 user 消息先落库created_at 反而更早。
// 2. 本地消息按"用户发送 → 占位 → 流式填充"的顺序 append天然是正确时序
// 任何基于时间戳的排序都会被异步落库的时钟偏差破坏。
// 3. 因此:本地顺序权威,服务端数据用于刷新字段(如 reasoning_duration_seconds
// 新增的服务端消息(其他端产生)追加到尾部。
function mergeServerHistoryWithLocalState(
conversationId: string,
history: ConversationHistoryMessage[],
) {
const existingBucket = conversationMessagesMap[conversationId] ?? []
const normalizedHistory = history.map(normalizeHistoryMessage)
const existingById = new Map(existingBucket.map((message) => [message.id, message]))
const mergedHistory = normalizedHistory.map((serverMessage) => {
const localMessage = existingById.get(serverMessage.id)
if (!localMessage) {
return serverMessage
// 1. 构建服务端消息的快速查找索引:按 ID 和按角色+内容两种方式。
const serverById = new Map(normalizedHistory.map((m) => [m.id, m]))
const usedServerIds = new Set<string>()
// 2. 按本地消息的原始顺序逐一处理:
// - ID 精确命中 → 用服务端数据替换,保持当前位置;
// - 临时 ID 按语义匹配 → 同样替换,保持当前位置;
// - 无法匹配 → 保留为乐观消息,保持当前位置。
const result: AssistantMessage[] = []
for (const localMsg of existingBucket) {
// 2.1 先按 ID 精确匹配(非临时 ID 的消息,如历史加载过的服务端消息)。
const exactMatch = serverById.get(localMsg.id)
if (exactMatch && !usedServerIds.has(exactMatch.id)) {
result.push(exactMatch)
usedServerIds.add(exactMatch.id)
continue
}
return {
...serverMessage,
retryGroupId: serverMessage.retryGroupId ?? localMessage.retryGroupId,
retryIndex: serverMessage.retryIndex ?? localMessage.retryIndex,
retryTotal: serverMessage.retryTotal ?? localMessage.retryTotal,
// 2.2 临时 ID如 user-1700000000000-abc走语义匹配
// 同一角色 + 同一内容的消息视为同一条逻辑消息。
if (isLocalEphemeralMessageId(localMsg.id)) {
const logicalMatch = normalizedHistory.find(
(sm) => !usedServerIds.has(sm.id) && isSameLogicalMessage(sm, localMsg),
)
if (logicalMatch) {
result.push(logicalMatch)
usedServerIds.add(logicalMatch.id)
continue
}
})
const mergedIds = new Set(mergedHistory.map((message) => message.id))
const optimisticMessages = existingBucket.filter((message) => {
if (mergedIds.has(message.id)) {
return false
}
if (!isLocalEphemeralMessageId(message.id)) {
return true
// 2.3 无法匹配服务端消息时保留本地乐观消息(流式中的占位 / 网络延迟未落库)。
result.push(localMsg)
}
return !mergedHistory.some((serverMessage) => isSameLogicalMessage(serverMessage, message))
})
// 3. 本地不存在的服务端消息(如其他设备发送的)追加到尾部,按服务端返回顺序排列。
for (const serverMsg of normalizedHistory) {
if (!usedServerIds.has(serverMsg.id)) {
result.push(serverMsg)
}
}
return [...mergedHistory, ...optimisticMessages].sort((left, right) => resolveMessageTimestamp(left) - resolveMessageTimestamp(right))
return result
}
function renderMessageMarkdown(content: string) {
@@ -573,199 +571,10 @@ function isLatestAssistantMessage(messageId: string) {
return lastAssistant?.id === messageId
}
function resolveRetryPageGroup(message: AssistantMessage) {
if (!message.retryGroupId) {
return null
}
return retryPageGroups.value.get(message.retryGroupId) ?? null
}
function shouldShowRetryPager(message: AssistantMessage) {
if (message.role !== 'assistant') {
return false
}
const retryGroup = resolveRetryPageGroup(message)
return Boolean(retryGroup && retryGroup.total > 1)
}
function changeRetryPage(message: AssistantMessage, delta: number) {
const retryGroup = resolveRetryPageGroup(message)
if (!retryGroup) {
return
}
const nextPage = Math.min(Math.max(1, retryGroup.visibleIndex + delta), retryGroup.total)
if (nextPage === retryGroup.visibleIndex) {
return
}
retryVisiblePageMap[retryGroup.groupId] = nextPage
}
function resolveVisibleUserMessageBeforeAssistant(messageId: string) {
const index = findMessageIndex(messageId)
if (index <= 0) {
return null
}
for (let current = index - 1; current >= 0; current -= 1) {
const candidate = selectedMessages.value[current]
if (candidate?.role === 'user') {
return candidate
}
}
return null
}
function findMessageIndexInList(messages: AssistantMessage[], messageId: string) {
return messages.findIndex((message) => message.id === messageId)
}
function resolveUserMessageBeforeAssistantInBucket(conversationId: string, assistantMessageId: string) {
const bucket = conversationMessagesMap[conversationId] ?? []
const index = findMessageIndexInList(bucket, assistantMessageId)
if (index <= 0) {
return null
}
for (let current = index - 1; current >= 0; current -= 1) {
const candidate = bucket[current]
if (candidate?.role === 'user') {
return candidate
}
}
return null
}
function isLocalEphemeralMessageId(id: string) {
return /^(user|assistant|system)-\d{13}-[a-z0-9]+$/i.test(id)
}
function resolvePersistedMessageId(message: AssistantMessage | null) {
if (!message) {
return null
}
if (isLocalEphemeralMessageId(message.id)) {
return null
}
if (/^\d+$/.test(message.id)) {
return Number(message.id)
}
return message.id
}
function resolveBestMatchedMessageFromBucket(conversationId: string, targetMessage: AssistantMessage) {
const bucket = conversationMessagesMap[conversationId] ?? []
const directMatchedMessage = bucket.find((message) => message.id === targetMessage.id)
if (directMatchedMessage) {
return directMatchedMessage
}
const targetTimestamp = resolveMessageTimestamp(targetMessage)
const logicalMatchedMessages = bucket
.filter((message) => isSameLogicalMessage(message, targetMessage))
.sort((left, right) => {
// 1. 优先命中已经拿到后端稳定主键的消息,避免继续引用本地占位态。
// 2. 若候选状态一致,则优先选择时间更接近原消息的那条。
// 3. 时间也一致时再按较新的记录兜底,降低重复文案时误命中旧消息的概率。
const persistedScoreDiff =
Number(!isLocalEphemeralMessageId(right.id)) - Number(!isLocalEphemeralMessageId(left.id))
if (persistedScoreDiff !== 0) {
return persistedScoreDiff
}
const leftGap = Math.abs(resolveMessageTimestamp(left) - targetTimestamp)
const rightGap = Math.abs(resolveMessageTimestamp(right) - targetTimestamp)
if (leftGap !== rightGap) {
return leftGap - rightGap
}
return resolveMessageTimestamp(right) - resolveMessageTimestamp(left)
})
return logicalMatchedMessages[0] ?? null
}
async function resolveRetrySourceMessages(
conversationId: string,
sourceUserMessage: AssistantMessage,
sourceAssistantMessage: AssistantMessage,
) {
let resolvedUserMessage: AssistantMessage | null = sourceUserMessage
let resolvedAssistantMessage: AssistantMessage | null = sourceAssistantMessage
let persistedUserMessageId = resolvePersistedMessageId(resolvedUserMessage)
let persistedAssistantMessageId = resolvePersistedMessageId(resolvedAssistantMessage)
if (persistedUserMessageId && persistedAssistantMessageId) {
return {
sourceUserMessage: resolvedUserMessage,
sourceAssistantMessage: resolvedAssistantMessage,
persistedUserMessageId,
persistedAssistantMessageId,
}
}
// 1. 若当前点击时仍是本地占位消息,先静默拉一次权威历史,尽量把真实 ID 补回来。
// 2. 这里复用现有 history 接口即可,避免为了一次重试再新增额外查询接口。
// 3. 若静默刷新后依然拿不到稳定 ID则说明消息大概率仍处于异步持久化窗口期。
await loadConversationMessages(conversationId, true)
resolvedAssistantMessage =
resolveBestMatchedMessageFromBucket(conversationId, sourceAssistantMessage) ?? sourceAssistantMessage
resolvedUserMessage =
resolveUserMessageBeforeAssistantInBucket(conversationId, resolvedAssistantMessage.id) ??
resolveBestMatchedMessageFromBucket(conversationId, sourceUserMessage) ??
sourceUserMessage
persistedUserMessageId = resolvePersistedMessageId(resolvedUserMessage)
persistedAssistantMessageId = resolvePersistedMessageId(resolvedAssistantMessage)
return {
sourceUserMessage: resolvedUserMessage,
sourceAssistantMessage: resolvedAssistantMessage,
persistedUserMessageId,
persistedAssistantMessageId,
}
}
function createRetryGroupId() {
if (typeof crypto !== 'undefined' && typeof crypto.randomUUID === 'function') {
return `retry-${crypto.randomUUID()}`
}
return `retry-${Date.now()}-${Math.random().toString(16).slice(2)}`
}
function applyRetryGroupToExistingMessages(groupId: string, total: number, userMessageId: string, assistantMessageId: string) {
const conversationId = selectedConversationId.value
if (!conversationId) {
return
}
const bucket = conversationMessagesMap[conversationId] ?? []
for (const message of bucket) {
if (message.id === userMessageId || message.id === assistantMessageId || message.retryGroupId === groupId) {
message.retryGroupId = groupId
message.retryTotal = total
if (message.id === userMessageId || (message.retryGroupId === groupId && message.role === 'user' && !message.retryIndex)) {
message.retryIndex = 1
}
if (message.id === assistantMessageId || (message.retryGroupId === groupId && message.role === 'assistant' && !message.retryIndex)) {
message.retryIndex = 1
}
}
}
retryVisiblePageMap[groupId] = total
}
function resolvePromptBeforeAssistantMessage(messageId: string) {
const index = findMessageIndex(messageId)
if (index <= 0) {
@@ -895,6 +704,47 @@ function shouldShowAnsweringIndicator(message: AssistantMessage) {
return isStreamingMessage(message) && !isThinkingMessage(message) && !message.content.trim()
}
// ---------- DisplayMessage 适配函数 ----------
// 合并后的 DisplayMessage 包含多条源消息,以下函数统一处理
// 流式状态、推理框、折叠等在合并场景下的语义。
function isDisplayStreaming(dm: DisplayMessage): boolean {
return dm.sources.some(m => m.id === activeStreamingMessageId.value)
}
function shouldShowDisplayReasoningBox(dm: DisplayMessage): boolean {
if (dm.role !== 'assistant') return false
return dm.sources.some(m =>
Boolean(m.reasoning?.trim()) ||
(m.id === activeStreamingMessageId.value && thinkingMessageMap[m.id] === true),
)
}
function shouldShowDisplayAnsweringIndicator(dm: DisplayMessage): boolean {
if (dm.content) return false
return isDisplayStreaming(dm) && dm.sources.every(m => thinkingMessageMap[m.id] !== true)
}
function isDisplayReasoningCollapsed(dm: DisplayMessage): boolean {
return dm.sources.every(m => reasoningCollapsedMap[m.id] === true)
}
function toggleDisplayReasoningCollapse(dm: DisplayMessage): void {
const newCollapsed = !isDisplayReasoningCollapsed(dm)
dm.sources.forEach(m => { reasoningCollapsedMap[m.id] = newCollapsed })
}
function getDisplayReasoningStatusLabel(dm: DisplayMessage): string {
const totalSeconds = dm.sources.reduce(
(sum, m) => sum + (reasoningDurationMap[m.id] ?? 0), 0,
)
if (totalSeconds > 0) return `已思考(用时 ${totalSeconds} 秒)`
const hasActiveThinking = dm.sources.some(
m => m.id === activeStreamingMessageId.value && thinkingMessageMap[m.id] === true,
)
return hasActiveThinking ? '思考中' : '已思考'
}
function isMessageViewportAtBottom(viewport: HTMLElement) {
return viewport.scrollHeight - viewport.scrollTop - viewport.clientHeight <= messageBottomTolerancePx
}
@@ -1103,6 +953,15 @@ function syncHistoryPanelWidthForViewport() {
)
}
function releaseHistoryResizeListeners() {
if (!historyResizeCleanup) {
return
}
historyResizeCleanup()
historyResizeCleanup = null
}
// startResizeHistoryPanel 负责处理会话列表与聊天主区之间的横向拖拽。
// 职责边界:
// 1. 只负责更新助手面板内部的历史区宽度,不修改外层 Dashboard 的左右二分布局。
@@ -1124,6 +983,9 @@ function startResizeHistoryPanel(event: PointerEvent) {
const startX = event.clientX
const startWidth = historyPanelWidth.value
const bounds = getHistoryPanelWidthBounds(rect.width)
// 1. 先清理上一次拖拽遗留的监听器,避免重复绑定导致的光标残留和状态错乱。
// 2. 再注册本次拖拽监听,并把清理函数保存起来,方便 pointerup / pointercancel / 卸载时统一回收。
releaseHistoryResizeListeners()
const handlePointerMove = (moveEvent: PointerEvent) => {
const deltaX = moveEvent.clientX - startX
@@ -1134,14 +996,22 @@ function startResizeHistoryPanel(event: PointerEvent) {
}
const stopResize = () => {
releaseHistoryResizeListeners()
}
historyResizeCleanup = () => {
window.removeEventListener('pointermove', handlePointerMove)
window.removeEventListener('pointerup', stopResize)
window.removeEventListener('pointercancel', stopResize)
window.removeEventListener('blur', stopResize)
document.body.classList.remove('dashboard-resizing')
}
document.body.classList.add('dashboard-resizing')
window.addEventListener('pointermove', handlePointerMove)
window.addEventListener('pointerup', stopResize)
window.addEventListener('pointercancel', stopResize)
window.addEventListener('blur', stopResize)
}
function toggleHistoryPanel() {
@@ -1225,32 +1095,14 @@ function startNewConversation() {
shouldAutoFollowMessages.value = true
}
interface RetryRequestExtra {
retryGroupId: string
retryFromUserMessageId: string | number
retryFromAssistantMessageId: string | number
}
function isManualThinkingEnabled(mode: ThinkingModeType) {
return mode === 'true'
}
function buildChatRequestExtra(
planningTaskClassIds: number[] = [],
retryExtra?: RetryRequestExtra,
): ChatRequestExtra | undefined {
// 1. retry 与“新一轮智能编排”属于互斥语义retry 必须严格指向既有历史消息,不应再混入新的任务类上下文
// 2. 因此只有普通发送链路才透传 task_class_ids避免 regenerate 时把当前输入区的临时选择误带进历史重试。
// 3. 若本轮没有任何附加上下文,则返回 undefined保持请求体尽量精简。
if (retryExtra) {
return {
request_mode: 'retry',
retry_group_id: retryExtra.retryGroupId,
retry_from_user_message_id: retryExtra.retryFromUserMessageId,
retry_from_assistant_message_id: retryExtra.retryFromAssistantMessageId,
}
}
// retry 机制已整体下线,这里只负责把智能编排所需的 task_class_ids 透传给后端
if (planningTaskClassIds.length <= 0) {
return undefined
}
@@ -1541,99 +1393,6 @@ async function sendMessage(preset?: string) {
}
}
async function regenerateAssistantMessage(message: AssistantMessage) {
if (chatLoading.value) {
return
}
const sourceUserMessage = resolveVisibleUserMessageBeforeAssistant(message.id)
const conversationId = selectedConversationId.value
if (!conversationId || !sourceUserMessage) {
ElMessage.warning('没有找到可用于重试的用户消息')
return
}
const retrySource = await resolveRetrySourceMessages(conversationId, sourceUserMessage, message)
const text = retrySource.sourceUserMessage?.content.trim() || sourceUserMessage.content.trim()
if (!text) {
ElMessage.warning('没有找到可用于重试的用户消息')
return
}
if (!retrySource.persistedUserMessageId || !retrySource.persistedAssistantMessageId) {
ElMessage.info('消息正在处理,请稍后再重试,或者直接复制消息重新发送')
return
}
chatLoading.value = true
cancelEditUserMessage()
const retryGroup = resolveRetryPageGroup(retrySource.sourceAssistantMessage)
const retryGroupId = retryGroup?.groupId || createRetryGroupId()
const nextRetryIndex = (retryGroup?.total ?? 1) + 1
applyRetryGroupToExistingMessages(
retryGroupId,
nextRetryIndex,
retrySource.sourceUserMessage.id,
retrySource.sourceAssistantMessage.id,
)
const now = new Date().toISOString()
appendConversationMessage(conversationId, {
id: createMessageId('user'),
role: 'user',
content: text,
createdAt: now,
retryGroupId,
retryIndex: nextRetryIndex,
retryTotal: nextRetryIndex,
})
const retryAssistantMessage = appendConversationMessage(conversationId, {
id: createMessageId('assistant'),
role: 'assistant',
content: '',
createdAt: now,
reasoning: '',
retryGroupId,
retryIndex: nextRetryIndex,
retryTotal: nextRetryIndex,
})
retryVisiblePageMap[retryGroupId] = nextRetryIndex
prependConversationPreview(conversationId, text, now)
prepareAssistantMessageForStreaming(retryAssistantMessage, now)
activeStreamingMessageId.value = retryAssistantMessage.id
scheduleScrollMessagesToBottom(false, true)
try {
const actualConversationId = await streamAssistantReply(
conversationId,
text,
retryAssistantMessage,
now,
true,
buildChatRequestExtra([], {
retryGroupId,
retryFromUserMessageId: retrySource.persistedUserMessageId,
retryFromAssistantMessageId: retrySource.persistedAssistantMessageId,
}),
)
await Promise.allSettled([
loadConversationMessages(actualConversationId, true),
loadConversationContextStats(actualConversationId, true),
])
} catch (error) {
if (!retryAssistantMessage.content.trim()) {
retryAssistantMessage.content = '重新生成失败,请稍后重试。'
}
reasoningCollapsedMap[retryAssistantMessage.id] = false
ElMessage.error(error instanceof Error ? error.message : '重新生成失败,请稍后重试')
} finally {
activeStreamingMessageId.value = ''
chatLoading.value = false
}
}
watch(
() => selectedMessages.value.length,
() => {
@@ -1663,8 +1422,8 @@ onBeforeUnmount(() => {
window.clearInterval(reasoningTicker)
reasoningTicker = 0
}
releaseHistoryResizeListeners()
window.removeEventListener('resize', syncHistoryPanelWidthForViewport)
document.body.classList.remove('dashboard-resizing')
})
</script>
@@ -1791,14 +1550,14 @@ onBeforeUnmount(() => {
</div>
<article
v-for="message in selectedMessages"
:key="message.id"
v-for="dm in displayMessages"
:key="dm.id"
class="chat-message"
:class="`chat-message--${message.role}`"
:class="`chat-message--${dm.role}`"
>
<div v-if="message.role === 'user'" class="chat-message__user-row">
<div v-if="dm.role === 'user'" class="chat-message__user-row">
<div class="chat-message__user-bubble">
<template v-if="isEditingUserMessage(message.id)">
<template v-if="isEditingUserMessage(dm.id)">
<div class="chat-message__editor">
<textarea
v-model="editingUserMessageDraft"
@@ -1809,20 +1568,20 @@ onBeforeUnmount(() => {
<button type="button" class="chat-message__editor-button chat-message__editor-button--ghost" @click="cancelEditUserMessage()">
取消
</button>
<button type="button" class="chat-message__editor-button chat-message__editor-button--primary" @click="submitEditedUserMessage(message)">
<button type="button" class="chat-message__editor-button chat-message__editor-button--primary" @click="submitEditedUserMessage(dm.sources[0])">
发送
</button>
</div>
</div>
</template>
<div v-else class="chat-message__markdown" v-html="renderMessageMarkdown(message.content)" />
<div v-else class="chat-message__markdown" v-html="renderMessageMarkdown(dm.content)" />
</div>
<div v-if="!isEditingUserMessage(message.id)" class="chat-message__action-bar chat-message__action-bar--user">
<div v-if="!isEditingUserMessage(dm.id)" class="chat-message__action-bar chat-message__action-bar--user">
<button
type="button"
class="chat-message__icon-button"
aria-label="复制消息"
@click="copyText(message.content, '已复制用户消息')"
@click="copyText(dm.content, '已复制用户消息')"
>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M6.14923 4.02032C7.11191 4.02032 7.87977 4.02017 8.49591 4.07599C9.12122 4.1327 9.65786 4.25188 10.1414 4.53107C10.7201 4.8653 11.2008 5.34591 11.535 5.92462C11.8142 6.40818 11.9333 6.94482 11.9901 7.57013C12.0459 8.18625 12.0457 8.9542 12.0457 9.91681C12.0457 10.8795 12.0459 11.6474 11.9901 12.2635C11.9333 12.8888 11.8142 13.4254 11.535 13.909C11.2008 14.4877 10.7201 14.9683 10.1414 15.3026C9.65786 15.5817 9.12122 15.7009 8.49591 15.7576C7.87977 15.8134 7.1119 15.8133 6.14923 15.8133C5.18661 15.8133 4.41868 15.8134 3.80255 15.7576C3.17724 15.7009 2.6406 15.5817 2.15704 15.3026C1.57834 14.9684 1.09772 14.4877 0.763489 13.909C0.484305 13.4254 0.365123 12.8888 0.308411 12.2635C0.252587 11.6474 0.252747 10.8795 0.252747 9.91681C0.252747 8.95419 0.252603 8.18625 0.308411 7.57013C0.365123 6.94482 0.484305 6.40818 0.763489 5.92462C1.09771 5.3459 1.57833 4.86529 2.15704 4.53107C2.6406 4.25188 3.17724 4.1327 3.80255 4.07599C4.41868 4.02018 5.1866 4.02032 6.14923 4.02032Z" fill="currentColor" />
@@ -1833,7 +1592,7 @@ onBeforeUnmount(() => {
type="button"
class="chat-message__icon-button"
aria-label="修改消息"
@click="startEditUserMessage(message)"
@click="startEditUserMessage(dm.sources[0])"
>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M9.94073 1.34942C10.7047 0.902314 11.6503 0.902418 12.4143 1.34942C12.706 1.52016 12.9687 1.79118 13.3104 2.13284C13.652 2.47448 13.9231 2.73721 14.0938 3.02894C14.5408 3.79295 14.5409 4.73856 14.0938 5.50251C13.9231 5.79415 13.652 6.05704 13.3104 6.39861L6.65929 13.0497C6.28065 13.4284 6.00692 13.7108 5.6654 13.9097C5.32388 14.1085 4.94312 14.2074 4.42702 14.3498L3.24391 14.6761C2.77524 14.8054 2.34535 14.9262 2.00128 14.9684C1.65193 15.0112 1.17961 15.0013 0.810733 14.6325C0.44189 14.2637 0.432076 13.7913 0.474829 13.442C0.517004 13.0979 0.63787 12.668 0.767151 12.1993L1.09349 11.0162C1.23585 10.5001 1.33478 10.1194 1.53356 9.77785C1.73246 9.43633 2.01487 9.1626 2.39352 8.78395L9.04463 2.13284C9.38622 1.79126 9.64908 1.52017 9.94073 1.34942Z" fill="currentColor" />
@@ -1841,11 +1600,11 @@ onBeforeUnmount(() => {
</svg>
</button>
</div>
<span class="chat-message__time chat-message__time--user">{{ formatMessageTime(message.createdAt) }}</span>
<span class="chat-message__time chat-message__time--user">{{ formatMessageTime(dm.createdAt) }}</span>
</div>
<div v-else class="chat-message__assistant-flow">
<div v-if="shouldShowReasoningBox(message)" class="chat-message__reasoning">
<div v-if="shouldShowDisplayReasoningBox(dm)" class="chat-message__reasoning">
<div class="chat-message__reasoning-head">
<div class="chat-message__reasoning-title">
<span class="chat-message__reasoning-icon">
@@ -1868,18 +1627,18 @@ onBeforeUnmount(() => {
/>
</svg>
</span>
<span class="chat-message__reasoning-status">{{ getReasoningStatusLabel(message) }}</span>
<span class="chat-message__reasoning-status">{{ getDisplayReasoningStatusLabel(dm) }}</span>
</div>
<button
type="button"
class="chat-message__reasoning-toggle"
:aria-label="isReasoningCollapsed(message.id) ? '展开深度思考' : '折叠深度思考'"
@click="toggleReasoningCollapse(message.id)"
:aria-label="isDisplayReasoningCollapsed(dm) ? '展开深度思考' : '折叠深度思考'"
@click="toggleDisplayReasoningCollapse(dm)"
>
<span class="chat-message__reasoning-chevron">
<svg
class="chat-message__reasoning-chevron-icon"
:class="{ 'chat-message__reasoning-chevron-icon--expanded': !isReasoningCollapsed(message.id) }"
:class="{ 'chat-message__reasoning-chevron-icon--expanded': !isDisplayReasoningCollapsed(dm) }"
width="14"
height="14"
viewBox="0 0 14 14"
@@ -1896,11 +1655,11 @@ onBeforeUnmount(() => {
</button>
</div>
<div v-if="!isReasoningCollapsed(message.id)" class="chat-message__reasoning-body">
<div v-if="!isDisplayReasoningCollapsed(dm)" class="chat-message__reasoning-body">
<div
v-if="message.reasoning"
v-if="dm.reasoning"
class="chat-message__markdown chat-message__markdown--reasoning"
v-html="renderMessageMarkdown(message.reasoning)"
v-html="renderMessageMarkdown(dm.reasoning)"
/>
<div v-else class="chat-message__streaming chat-message__streaming--reasoning">
<div class="typing-indicator">
@@ -1912,10 +1671,10 @@ onBeforeUnmount(() => {
</div>
</div>
<div v-if="message.content" class="chat-message__assistant-content">
<div class="chat-message__markdown chat-message__markdown--assistant" v-html="renderMessageMarkdown(message.content)" />
<div v-if="dm.content" class="chat-message__assistant-content">
<div class="chat-message__markdown chat-message__markdown--assistant" v-html="renderMessageMarkdown(dm.content)" />
</div>
<div v-else-if="shouldShowAnsweringIndicator(message)" class="chat-message__streaming chat-message__streaming--plain">
<div v-else-if="shouldShowDisplayAnsweringIndicator(dm)" class="chat-message__streaming chat-message__streaming--plain">
<div class="typing-indicator">
<span />
<span />
@@ -1923,52 +1682,20 @@ onBeforeUnmount(() => {
</div>
</div>
<div v-if="message.content" class="chat-message__action-bar">
<div v-if="dm.content" class="chat-message__action-bar">
<button
type="button"
class="chat-message__icon-button"
aria-label="复制回复"
@click="copyText(message.content, '已复制回复内容')"
@click="copyText(dm.content, '已复制回复内容')"
>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M6.14923 4.02032C7.11191 4.02032 7.87977 4.02017 8.49591 4.07599C9.12122 4.1327 9.65786 4.25188 10.1414 4.53107C10.7201 4.8653 11.2008 5.34591 11.535 5.92462C11.8142 6.40818 11.9333 6.94482 11.9901 7.57013C12.0459 8.18625 12.0457 8.9542 12.0457 9.91681C12.0457 10.8795 12.0459 11.6474 11.9901 12.2635C11.9333 12.8888 11.8142 13.4254 11.535 13.909C11.2008 14.4877 10.7201 14.9683 10.1414 15.3026C9.65786 15.5817 9.12122 15.7009 8.49591 15.7576C7.87977 15.8134 7.1119 15.8133 6.14923 15.8133C5.18661 15.8133 4.41868 15.8134 3.80255 15.7576C3.17724 15.7009 2.6406 15.5817 2.15704 15.3026C1.57834 14.9684 1.09772 14.4877 0.763489 13.909C0.484305 13.4254 0.365123 12.8888 0.308411 12.2635C0.252587 11.6474 0.252747 10.8795 0.252747 9.91681C0.252747 8.95419 0.252603 8.18625 0.308411 7.57013C0.365123 6.94482 0.484305 6.40818 0.763489 5.92462C1.09771 5.3459 1.57833 4.86529 2.15704 4.53107C2.6406 4.25188 3.17724 4.1327 3.80255 4.07599C4.41868 4.02018 5.1866 4.02032 6.14923 4.02032Z" fill="currentColor" />
<path d="M9.80157 0.367981C10.7637 0.367981 11.5313 0.367886 12.1473 0.423645C12.7725 0.480313 13.3093 0.598765 13.7928 0.877747C14.3716 1.21192 14.852 1.69355 15.1863 2.27228C15.4655 2.75575 15.5857 3.29165 15.6424 3.91681C15.6982 4.53301 15.6971 5.30161 15.6971 6.26447V7.8299C15.6971 8.29265 15.6989 8.58994 15.6649 8.84845C15.4667 10.3525 14.4009 11.5738 12.9832 11.9988V10.5467C13.6973 10.1903 14.2104 9.49662 14.3192 8.67169C14.3387 8.52348 14.3406 8.3358 14.3406 7.8299V6.26447C14.3406 5.27707 14.3398 4.58149 14.2908 4.04083C14.2427 3.50969 14.1526 3.19373 14.0125 2.95099C13.7974 2.5785 13.4875 2.2687 13.1151 2.05353C12.8723 1.91347 12.5563 1.82237 12.0252 1.77423C11.4846 1.72528 10.7888 1.7254 9.80157 1.7254H7.71466C6.75614 1.72559 5.92659 2.27697 5.52325 3.07892H4.07013C4.54215 1.51132 5.99314 0.368192 7.71466 0.367981H9.80157Z" fill="currentColor" />
</svg>
</button>
<button
type="button"
class="chat-message__icon-button"
aria-label="重新生成"
:disabled="chatLoading"
@click="regenerateAssistantMessage(message)"
>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M7.92139 0.349152C10.3744 0.349234 12.5564 1.5052 13.9558 3.29894L15.1281 2.12759C15.3304 1.92546 15.6767 2.06943 15.6767 2.35538V5.53923C15.6766 5.71626 15.5329 5.85976 15.3559 5.86002H12.171C11.8855 5.8597 11.7426 5.51465 11.9443 5.31249L12.9641 4.29056C11.8237 2.74305 9.98911 1.74106 7.92139 1.74097C4.46439 1.74097 1.66236 4.543 1.66236 8C1.66236 11.457 4.46439 14.259 7.92139 14.259C11.3783 14.2589 14.1804 11.4569 14.1804 8H15.5722C15.5722 12.2251 12.1465 15.6507 7.92139 15.6508C3.69617 15.6508 0.270538 12.2252 0.270538 8C0.270538 3.77478 3.69617 0.349152 7.92139 0.349152Z" fill="currentColor" />
</svg>
</button>
<div v-if="shouldShowRetryPager(message)" class="chat-message__retry-pager">
<button
type="button"
class="chat-message__retry-pager-button"
:disabled="resolveRetryPageGroup(message)?.visibleIndex === 1"
@click="changeRetryPage(message, -1)"
>
&lt;
</button>
<span class="chat-message__retry-pager-label">
{{ resolveRetryPageGroup(message)?.visibleIndex }}/{{ resolveRetryPageGroup(message)?.total }}
</span>
<button
type="button"
class="chat-message__retry-pager-button"
:disabled="resolveRetryPageGroup(message)?.visibleIndex === resolveRetryPageGroup(message)?.total"
@click="changeRetryPage(message, 1)"
>
&gt;
</button>
</div>
</div>
<span class="chat-message__time">{{ formatMessageTime(message.createdAt) }}</span>
<span class="chat-message__time">{{ formatMessageTime(dm.createdAt) }}</span>
</div>
</article>
</div>
@@ -1979,6 +1706,7 @@ onBeforeUnmount(() => {
:key="action"
type="button"
class="assistant-actions__chip"
:disabled="chatLoading"
@click="sendMessage(action)"
>
{{ action }}
@@ -2255,6 +1983,7 @@ onBeforeUnmount(() => {
min-height: 0;
overflow-y: auto;
overflow-x: hidden;
overscroll-behavior: contain;
display: grid;
align-content: start;
gap: 12px;
@@ -2510,6 +2239,7 @@ onBeforeUnmount(() => {
min-height: 0;
overflow-y: auto;
padding: 24px 28px 18px;
overscroll-behavior: contain;
display: grid;
gap: 20px;
align-content: start;
@@ -2626,44 +2356,6 @@ onBeforeUnmount(() => {
cursor: not-allowed;
}
.chat-message__retry-pager {
display: inline-flex;
align-items: center;
gap: 6px;
margin-left: 2px;
}
.chat-message__retry-pager-button {
width: 24px;
height: 24px;
border: none;
border-radius: 999px;
background: transparent;
color: #7b8798;
font-size: 14px;
line-height: 1;
cursor: pointer;
transition: background-color 0.15s ease, color 0.15s ease;
}
.chat-message__retry-pager-button:hover {
background: rgba(79, 118, 234, 0.1);
color: #3f69d3;
}
.chat-message__retry-pager-button:disabled {
opacity: 0.3;
cursor: not-allowed;
}
.chat-message__retry-pager-label {
min-width: 34px;
text-align: center;
color: #6f7b8e;
font-size: 12px;
font-weight: 600;
}
.chat-message__editor {
width: min(100%, 640px);
border: 1px solid rgba(77, 107, 254, 0.22);
@@ -2970,6 +2662,23 @@ onBeforeUnmount(() => {
font-size: 12px;
}
.assistant-actions__chip:disabled {
opacity: 0.48;
cursor: not-allowed;
}
.assistant-history__toggle:focus-visible,
.assistant-history__new:focus-visible,
.assistant-history__item:focus-visible,
.assistant-actions__chip:focus-visible,
.chat-message__icon-button:focus-visible,
.chat-message__editor-button:focus-visible,
.chat-message__reasoning-toggle:focus-visible,
.ds-icon-button:focus-visible {
outline: 2px solid rgba(37, 99, 235, 0.36);
outline-offset: 2px;
}
.assistant-composer-ds {
--dsw-alias-brand-text: #3357c2;
--dsw-alias-label-primary: #1f2430;
@@ -3083,9 +2792,9 @@ onBeforeUnmount(() => {
}
.assistant-toolbar__context-meter {
width: 144px;
min-width: 144px;
flex: 0 0 144px;
width: 188px;
min-width: 188px;
flex: 0 0 188px;
margin-right: auto;
}
@@ -3253,9 +2962,9 @@ onBeforeUnmount(() => {
}
.assistant-toolbar__context-meter {
width: 144px;
min-width: 144px;
flex-basis: 144px;
width: 188px;
min-width: 188px;
flex-basis: 188px;
margin-right: 0;
order: 3;
}

View File

@@ -88,19 +88,12 @@ export interface AssistantMessage {
content: string
createdAt: string
reasoning?: string
retryGroupId?: string
retryIndex?: number
retryTotal?: number
}
export type ThinkingModeType = 'auto' | 'true' | 'false'
export interface ChatRequestExtra {
task_class_ids?: number[]
request_mode?: 'retry'
retry_group_id?: string
retry_from_user_message_id?: string | number
retry_from_assistant_message_id?: string | number
confirm_action?: string
always_execute?: boolean
resume?: Record<string, unknown>

View File

@@ -75,14 +75,20 @@ function handleSidebarNavigate(item: SidebarItem) {
<style scoped>
.assistant-view {
box-sizing: border-box;
height: 100vh;
height: 100dvh;
min-height: 100vh;
min-height: 100dvh;
padding: 10px;
overflow: hidden;
background: #f4f7fb;
background:
radial-gradient(circle at top left, rgba(22, 92, 168, 0.1), transparent 30%),
linear-gradient(180deg, #f8fbff 0%, #eef3f9 100%);
}
.assistant-view__layout {
height: calc(100vh - 20px);
height: 100%;
min-height: 0;
display: grid;
grid-template-columns: 78px minmax(0, 1fr);
@@ -182,6 +188,8 @@ function handleSidebarNavigate(item: SidebarItem) {
@media (max-width: 720px) {
.assistant-view {
height: auto;
min-height: 100vh;
min-height: 100svh;
padding: 8px;
overflow: visible;
}